commit
stringlengths
40
40
old_file
stringlengths
5
117
new_file
stringlengths
5
117
old_contents
stringlengths
0
1.93k
new_contents
stringlengths
19
3.3k
subject
stringlengths
17
320
message
stringlengths
18
3.28k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
7
42.4k
completion
stringlengths
19
3.3k
prompt
stringlengths
21
3.65k
b32b047656abd28dd794ee16dfab682337a753b1
accounts/tests.py
accounts/tests.py
from django.test import TestCase # Create your tests here.
"""accounts app unittests """ from django.test import TestCase class WelcomePageTest(TestCase): def test_uses_welcome_template(self): response = self.client.get('/') self.assertTemplateUsed(response, 'accounts/welcome.html')
Add first unit test for welcome page
Add first unit test for welcome page
Python
mit
randomic/aniauth-tdd,randomic/aniauth-tdd
"""accounts app unittests """ from django.test import TestCase class WelcomePageTest(TestCase): def test_uses_welcome_template(self): response = self.client.get('/') self.assertTemplateUsed(response, 'accounts/welcome.html')
Add first unit test for welcome page from django.test import TestCase # Create your tests here.
5eced1c1cb9253d73e3246dccb4c33e5ba154fd3
rcbi/rcbi/spiders/FlyduinoSpider.py
rcbi/rcbi/spiders/FlyduinoSpider.py
import scrapy from scrapy import log from scrapy.contrib.spiders import SitemapSpider, Rule from scrapy.contrib.linkextractors import LinkExtractor from rcbi.items import Part MANUFACTURERS = ["Rctimer", "RCTimer", "BaseCam", "Elgae", "ELGAE", "ArduFlyer", "Boscam", "T-Motor", "HQProp", "Suppo", "Flyduino", "SLS", "Frsky"] CORRECT = {"Rctimer": "RCTimer", "ELGAE": "Elgae", "Frsky": "FrSky"} class FlyduinoSpider(SitemapSpider): name = "flyduino" allowed_domains = ["flyduino.net"] sitemap_urls = ["http://flyduino.net/sitemap.xml"] def parse(self, response): item = Part() item["site"] = "flyduino" item["url"] = response.url product_name = response.css("div.hproduct") if not product_name: return item["name"] = product_name[0].xpath("//h1/text()").extract()[0] for m in MANUFACTURERS: if item["name"].startswith(m): if m in CORRECT: m = CORRECT[m] item["manufacturer"] = m item["name"] = item["name"][len(m):].strip() break return item
import scrapy from scrapy import log from scrapy.spiders import CrawlSpider, Rule from scrapy.linkextractors import LinkExtractor from rcbi.items import Part MANUFACTURERS = ["Rctimer", "RCTimer", "BaseCam", "Elgae", "ELGAE", "ArduFlyer", "Boscam", "T-Motor", "HQProp", "Suppo", "Flyduino", "SLS", "Frsky"] CORRECT = {"Rctimer": "RCTimer", "ELGAE": "Elgae", "Frsky": "FrSky"} class FlyduinoSpider(CrawlSpider): name = "flyduino" allowed_domains = ["flyduino.net"] start_urls = ["http://flyduino.net/"] rules = ( # Extract links matching 'category.php' (but not matching 'subsection.php') # and follow links from them (since no callback means follow=True by default). Rule(LinkExtractor(restrict_css=".categories")), # Extract links matching 'item.php' and parse them with the spider's method parse_item Rule(LinkExtractor(restrict_css=".article_wrapper h3"), callback='parse_item'), ) def parse_item(self, response): item = Part() item["site"] = "flyduino" item["url"] = response.url product_name = response.css("div.hproduct") if not product_name: return item["name"] = product_name[0].xpath("//h1/text()").extract()[0] for m in MANUFACTURERS: if item["name"].startswith(m): if m in CORRECT: m = CORRECT[m] item["manufacturer"] = m item["name"] = item["name"][len(m):].strip() break return item
Stop using the Flyduino sitemap.
Stop using the Flyduino sitemap.
Python
apache-2.0
rcbuild-info/scrape,rcbuild-info/scrape
import scrapy from scrapy import log from scrapy.spiders import CrawlSpider, Rule from scrapy.linkextractors import LinkExtractor from rcbi.items import Part MANUFACTURERS = ["Rctimer", "RCTimer", "BaseCam", "Elgae", "ELGAE", "ArduFlyer", "Boscam", "T-Motor", "HQProp", "Suppo", "Flyduino", "SLS", "Frsky"] CORRECT = {"Rctimer": "RCTimer", "ELGAE": "Elgae", "Frsky": "FrSky"} class FlyduinoSpider(CrawlSpider): name = "flyduino" allowed_domains = ["flyduino.net"] start_urls = ["http://flyduino.net/"] rules = ( # Extract links matching 'category.php' (but not matching 'subsection.php') # and follow links from them (since no callback means follow=True by default). Rule(LinkExtractor(restrict_css=".categories")), # Extract links matching 'item.php' and parse them with the spider's method parse_item Rule(LinkExtractor(restrict_css=".article_wrapper h3"), callback='parse_item'), ) def parse_item(self, response): item = Part() item["site"] = "flyduino" item["url"] = response.url product_name = response.css("div.hproduct") if not product_name: return item["name"] = product_name[0].xpath("//h1/text()").extract()[0] for m in MANUFACTURERS: if item["name"].startswith(m): if m in CORRECT: m = CORRECT[m] item["manufacturer"] = m item["name"] = item["name"][len(m):].strip() break return item
Stop using the Flyduino sitemap. import scrapy from scrapy import log from scrapy.contrib.spiders import SitemapSpider, Rule from scrapy.contrib.linkextractors import LinkExtractor from rcbi.items import Part MANUFACTURERS = ["Rctimer", "RCTimer", "BaseCam", "Elgae", "ELGAE", "ArduFlyer", "Boscam", "T-Motor", "HQProp", "Suppo", "Flyduino", "SLS", "Frsky"] CORRECT = {"Rctimer": "RCTimer", "ELGAE": "Elgae", "Frsky": "FrSky"} class FlyduinoSpider(SitemapSpider): name = "flyduino" allowed_domains = ["flyduino.net"] sitemap_urls = ["http://flyduino.net/sitemap.xml"] def parse(self, response): item = Part() item["site"] = "flyduino" item["url"] = response.url product_name = response.css("div.hproduct") if not product_name: return item["name"] = product_name[0].xpath("//h1/text()").extract()[0] for m in MANUFACTURERS: if item["name"].startswith(m): if m in CORRECT: m = CORRECT[m] item["manufacturer"] = m item["name"] = item["name"][len(m):].strip() break return item
26a66b90f3e1a63ae91eb2eac08a580b4be6a3c4
counting/mapper.py
counting/mapper.py
from contextlib import closing import subprocess import logging import do logger = logging.getLogger("Main") def g37_map(out_fn): url_map = "http://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/ARCHIVE/ANNOTATION_RELEASE.105/Assembled_chromosomes/chr_accessions_GRCh37.p13" url_ann = "http://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/ARCHIVE/ANNOTATION_RELEASE.105/GFF/ref_GRCh37.p13_top_level.gff3.gz" g_map(url_map, url_ann, out_fn) def g38_map(out_fn): url_map = "http://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/Assembled_chromosomes/chr_accessions_GRCh38.p2" url_ann = "http://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/GFF/ref_GRCh38.p2_top_level.gff3.gz" g_map(url_map, url_ann, out_fn) def g_map(url_map, url_ann, out_fn): cl = ("wget -q -O - {url_map}").format(**locals()) cl = cl.split(" ") proc = subprocess.Popen(cl, stdout=subprocess.PIPE) d_map = {} with closing(proc.stdout) as stdout: for line in iter(stdout.readline,''): cols = line.split("\t") d_map[cols[1]] = cols[0] cl = ("wget -q -O tmp.gz {url_ann}").format(**locals()).split(" ") do.run(cl) cl = ["zcat" ,"tmp.gz"] proc = subprocess.Popen(cl,stdout=subprocess.PIPE) logger.info("Creating GTF file %s" % out_fn) with closing(proc.stdout) as stdout: with open(out_fn, "w") as out_h: for line in iter(stdout.readline,''): cols = line.strip().split("\t") if line.startswith("#") or cols[2] == "region": continue if cols[0] in d_map: cols[0] = d_map[cols[0]] # cols[8] = cols[8].replace("=", " ") print >>out_h, "\t".join(cols)
Add smart functions to get the correct gene annotation
Add smart functions to get the correct gene annotation
Python
cc0-1.0
NCBI-Hackathons/rnaseqview,NCBI-Hackathons/rnaseqview,NCBI-Hackathons/rnaseqview,NCBI-Hackathons/rnaseqview,NCBI-Hackathons/rnaseqview
from contextlib import closing import subprocess import logging import do logger = logging.getLogger("Main") def g37_map(out_fn): url_map = "http://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/ARCHIVE/ANNOTATION_RELEASE.105/Assembled_chromosomes/chr_accessions_GRCh37.p13" url_ann = "http://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/ARCHIVE/ANNOTATION_RELEASE.105/GFF/ref_GRCh37.p13_top_level.gff3.gz" g_map(url_map, url_ann, out_fn) def g38_map(out_fn): url_map = "http://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/Assembled_chromosomes/chr_accessions_GRCh38.p2" url_ann = "http://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/GFF/ref_GRCh38.p2_top_level.gff3.gz" g_map(url_map, url_ann, out_fn) def g_map(url_map, url_ann, out_fn): cl = ("wget -q -O - {url_map}").format(**locals()) cl = cl.split(" ") proc = subprocess.Popen(cl, stdout=subprocess.PIPE) d_map = {} with closing(proc.stdout) as stdout: for line in iter(stdout.readline,''): cols = line.split("\t") d_map[cols[1]] = cols[0] cl = ("wget -q -O tmp.gz {url_ann}").format(**locals()).split(" ") do.run(cl) cl = ["zcat" ,"tmp.gz"] proc = subprocess.Popen(cl,stdout=subprocess.PIPE) logger.info("Creating GTF file %s" % out_fn) with closing(proc.stdout) as stdout: with open(out_fn, "w") as out_h: for line in iter(stdout.readline,''): cols = line.strip().split("\t") if line.startswith("#") or cols[2] == "region": continue if cols[0] in d_map: cols[0] = d_map[cols[0]] # cols[8] = cols[8].replace("=", " ") print >>out_h, "\t".join(cols)
Add smart functions to get the correct gene annotation
15437c33fd25a1f10c3203037be3bfef17716fbb
setup.py
setup.py
import os from setuptools import setup, find_packages LONG_DESCRIPTION = """Django-Prometheus This library contains code to expose some monitoring metrics relevant to Django internals so they can be monitored by Prometheus.io. See https://github.com/korfuri/django-prometheus for usage instructions. """ setup( name="django-prometheus", version="1.0.8", author="Uriel Corfa", author_email="[email protected]", description=( "Django middlewares to monitor your application with Prometheus.io."), license="Apache", keywords="django monitoring prometheus", url="http://github.com/korfuri/django-prometheus", packages=find_packages(), test_suite="django_prometheus.tests", long_description=LONG_DESCRIPTION, install_requires=[ "prometheus_client>=0.0.13", ], classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "Framework :: Django", "Topic :: System :: Monitoring", "License :: OSI Approved :: Apache Software License", ], )
import os from setuptools import setup, find_packages LONG_DESCRIPTION = """Django-Prometheus This library contains code to expose some monitoring metrics relevant to Django internals so they can be monitored by Prometheus.io. See https://github.com/korfuri/django-prometheus for usage instructions. """ setup( name="django-prometheus", version="1.0.8", author="Uriel Corfa", author_email="[email protected]", description=( "Django middlewares to monitor your application with Prometheus.io."), license="Apache", keywords="django monitoring prometheus", url="http://github.com/korfuri/django-prometheus", packages=find_packages(), test_suite="django_prometheus.tests", long_description=LONG_DESCRIPTION, install_requires=[ "prometheus_client>=0.0.13", ], classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Framework :: Django", "Topic :: System :: Monitoring", "License :: OSI Approved :: Apache Software License", ], )
Add trove classifiers for Python versions
Add trove classifiers for Python versions These are set to the versions tested by Travis. This fixes #39.
Python
apache-2.0
korfuri/django-prometheus,obytes/django-prometheus,obytes/django-prometheus,korfuri/django-prometheus
import os from setuptools import setup, find_packages LONG_DESCRIPTION = """Django-Prometheus This library contains code to expose some monitoring metrics relevant to Django internals so they can be monitored by Prometheus.io. See https://github.com/korfuri/django-prometheus for usage instructions. """ setup( name="django-prometheus", version="1.0.8", author="Uriel Corfa", author_email="[email protected]", description=( "Django middlewares to monitor your application with Prometheus.io."), license="Apache", keywords="django monitoring prometheus", url="http://github.com/korfuri/django-prometheus", packages=find_packages(), test_suite="django_prometheus.tests", long_description=LONG_DESCRIPTION, install_requires=[ "prometheus_client>=0.0.13", ], classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Framework :: Django", "Topic :: System :: Monitoring", "License :: OSI Approved :: Apache Software License", ], )
Add trove classifiers for Python versions These are set to the versions tested by Travis. This fixes #39. import os from setuptools import setup, find_packages LONG_DESCRIPTION = """Django-Prometheus This library contains code to expose some monitoring metrics relevant to Django internals so they can be monitored by Prometheus.io. See https://github.com/korfuri/django-prometheus for usage instructions. """ setup( name="django-prometheus", version="1.0.8", author="Uriel Corfa", author_email="[email protected]", description=( "Django middlewares to monitor your application with Prometheus.io."), license="Apache", keywords="django monitoring prometheus", url="http://github.com/korfuri/django-prometheus", packages=find_packages(), test_suite="django_prometheus.tests", long_description=LONG_DESCRIPTION, install_requires=[ "prometheus_client>=0.0.13", ], classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "Framework :: Django", "Topic :: System :: Monitoring", "License :: OSI Approved :: Apache Software License", ], )
3e89c102e9a47de1288b268b04a11ff73a22cd2e
main.py
main.py
# coding: utf-8 from web import app import db, config import os.path if __name__ == '__main__': if not config.check(): print >>sys.stderr, "Couldn't find configuration file" sys.exit(1) if not os.path.exists(config.get('CACHE_DIR')): os.makedirs(config.get('CACHE_DIR')) db.init_db() app.run(debug = True)
# coding: utf-8 import config import os.path, sys if __name__ == '__main__': if not config.check(): print >>sys.stderr, "Couldn't find configuration file" sys.exit(1) if not os.path.exists(config.get('CACHE_DIR')): os.makedirs(config.get('CACHE_DIR')) import db from web import app db.init_db() app.run(debug = True)
Fix handling of missing config file
Fix handling of missing config file
Python
agpl-3.0
hhm0/supysonic,nwokeo/supysonic,ezpuzz/supysonic,spl0k/supysonic,hhm0/supysonic,spl0k/supysonic,nwokeo/supysonic,nwokeo/supysonic,hhm0/supysonic,nwokeo/supysonic,spl0k/supysonic,nwokeo/supysonic,ezpuzz/supysonic
# coding: utf-8 import config import os.path, sys if __name__ == '__main__': if not config.check(): print >>sys.stderr, "Couldn't find configuration file" sys.exit(1) if not os.path.exists(config.get('CACHE_DIR')): os.makedirs(config.get('CACHE_DIR')) import db from web import app db.init_db() app.run(debug = True)
Fix handling of missing config file # coding: utf-8 from web import app import db, config import os.path if __name__ == '__main__': if not config.check(): print >>sys.stderr, "Couldn't find configuration file" sys.exit(1) if not os.path.exists(config.get('CACHE_DIR')): os.makedirs(config.get('CACHE_DIR')) db.init_db() app.run(debug = True)
315e6da0dc3d7424a14c65ac243af1faef36b710
test/parse_dive.py
test/parse_dive.py
#! /bin/python import argparse from xml.dom import minidom parser = argparse.ArgumentParser(description='Parse a dive in xml formt.') parser.add_argument('-f', '--file', required=True, dest='path', help='path to xml file') args = parser.parse_args() path = args.path doc = minidom.parse(path) nodes = doc.getElementsByTagName('Dive.Sample') for node in nodes: depth = (float(node.childNodes[2].childNodes[0].nodeValue) / 10 )+ 1 time = float(node.childNodes[8].childNodes[0].nodeValue) / 60 print ("%.2f %.2f" % (time , depth))
#! /bin/python import argparse from xml.dom import minidom parser = argparse.ArgumentParser(description='Parse a dive in xml formt.') parser.add_argument('-f', '--file', required=True, dest='path', help='path to xml file') args = parser.parse_args() path = args.path doc = minidom.parse(path) nodes = doc.getElementsByTagName('Dive.Sample') for node in nodes: if node.hasChildNodes() and len(node.childNodes) > 8: for subNode in node.childNodes: if (subNode.nodeName == "Depth" and subNode.hasChildNodes()): depth = (float(subNode.childNodes[0].nodeValue) / 10) + 1 if (subNode.nodeName == "Time" and subNode.hasChildNodes()): time = float(subNode.childNodes[0].nodeValue) / 60 print ("%.2f %.2f" % (time , depth))
Add a correct parsing of the file
Add a correct parsing of the file
Python
isc
AquaBSD/libbuhlmann,AquaBSD/libbuhlmann,AquaBSD/libbuhlmann
#! /bin/python import argparse from xml.dom import minidom parser = argparse.ArgumentParser(description='Parse a dive in xml formt.') parser.add_argument('-f', '--file', required=True, dest='path', help='path to xml file') args = parser.parse_args() path = args.path doc = minidom.parse(path) nodes = doc.getElementsByTagName('Dive.Sample') for node in nodes: if node.hasChildNodes() and len(node.childNodes) > 8: for subNode in node.childNodes: if (subNode.nodeName == "Depth" and subNode.hasChildNodes()): depth = (float(subNode.childNodes[0].nodeValue) / 10) + 1 if (subNode.nodeName == "Time" and subNode.hasChildNodes()): time = float(subNode.childNodes[0].nodeValue) / 60 print ("%.2f %.2f" % (time , depth))
Add a correct parsing of the file #! /bin/python import argparse from xml.dom import minidom parser = argparse.ArgumentParser(description='Parse a dive in xml formt.') parser.add_argument('-f', '--file', required=True, dest='path', help='path to xml file') args = parser.parse_args() path = args.path doc = minidom.parse(path) nodes = doc.getElementsByTagName('Dive.Sample') for node in nodes: depth = (float(node.childNodes[2].childNodes[0].nodeValue) / 10 )+ 1 time = float(node.childNodes[8].childNodes[0].nodeValue) / 60 print ("%.2f %.2f" % (time , depth))
ac02378dcc611fb2c3b8a98e7480e02f64ee716d
polling_stations/apps/data_collection/management/commands/import_shepway.py
polling_stations/apps/data_collection/management/commands/import_shepway.py
from data_collection.morph_importer import BaseMorphApiImporter class Command(BaseMorphApiImporter): srid = 4326 districts_srid = 4326 council_id = 'E07000112' elections = ['parl.2017-06-08'] scraper_name = 'wdiv-scrapers/DC-PollingStations-Shepway' geom_type = 'geojson' def district_record_to_dict(self, record): poly = self.extract_geometry(record, self.geom_type, self.get_srid('districts')) code = record['dist_code'].strip() return { 'internal_council_id': code, 'name': record['district_n'].strip() + ' - ' + code, 'area': poly, 'polling_station_id': code, } def station_record_to_dict(self, record): location = self.extract_geometry(record, self.geom_type, self.get_srid('stations')) codes = record['polling_di'].split('\\') codes = [code.strip() for code in codes] stations = [] for code in codes: stations.append({ 'internal_council_id': code, 'postcode': '', 'address': record['address'].strip(), 'location': location, }) return stations
from data_collection.morph_importer import BaseMorphApiImporter class Command(BaseMorphApiImporter): srid = 4326 districts_srid = 4326 council_id = 'E07000112' #elections = ['parl.2017-06-08'] scraper_name = 'wdiv-scrapers/DC-PollingStations-Shepway' geom_type = 'geojson' def district_record_to_dict(self, record): poly = self.extract_geometry(record, self.geom_type, self.get_srid('districts')) code = record['dist_code'].strip() return { 'internal_council_id': code, 'name': record['district_n'].strip() + ' - ' + code, 'area': poly, 'polling_station_id': code, } def station_record_to_dict(self, record): location = self.extract_geometry(record, self.geom_type, self.get_srid('stations')) codes = record['polling_di'].split('\\') codes = [code.strip() for code in codes] stations = [] for code in codes: stations.append({ 'internal_council_id': code, 'postcode': '', 'address': record['address'].strip(), 'location': location, }) return stations
Remove Shepway election id (waiting on feedback)
Remove Shepway election id (waiting on feedback)
Python
bsd-3-clause
chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations
from data_collection.morph_importer import BaseMorphApiImporter class Command(BaseMorphApiImporter): srid = 4326 districts_srid = 4326 council_id = 'E07000112' #elections = ['parl.2017-06-08'] scraper_name = 'wdiv-scrapers/DC-PollingStations-Shepway' geom_type = 'geojson' def district_record_to_dict(self, record): poly = self.extract_geometry(record, self.geom_type, self.get_srid('districts')) code = record['dist_code'].strip() return { 'internal_council_id': code, 'name': record['district_n'].strip() + ' - ' + code, 'area': poly, 'polling_station_id': code, } def station_record_to_dict(self, record): location = self.extract_geometry(record, self.geom_type, self.get_srid('stations')) codes = record['polling_di'].split('\\') codes = [code.strip() for code in codes] stations = [] for code in codes: stations.append({ 'internal_council_id': code, 'postcode': '', 'address': record['address'].strip(), 'location': location, }) return stations
Remove Shepway election id (waiting on feedback) from data_collection.morph_importer import BaseMorphApiImporter class Command(BaseMorphApiImporter): srid = 4326 districts_srid = 4326 council_id = 'E07000112' elections = ['parl.2017-06-08'] scraper_name = 'wdiv-scrapers/DC-PollingStations-Shepway' geom_type = 'geojson' def district_record_to_dict(self, record): poly = self.extract_geometry(record, self.geom_type, self.get_srid('districts')) code = record['dist_code'].strip() return { 'internal_council_id': code, 'name': record['district_n'].strip() + ' - ' + code, 'area': poly, 'polling_station_id': code, } def station_record_to_dict(self, record): location = self.extract_geometry(record, self.geom_type, self.get_srid('stations')) codes = record['polling_di'].split('\\') codes = [code.strip() for code in codes] stations = [] for code in codes: stations.append({ 'internal_council_id': code, 'postcode': '', 'address': record['address'].strip(), 'location': location, }) return stations
e6ce8e25ac819a874eb4e42087157f9cf780e0e4
lib/rapidsms/contrib/messagelog/tests.py
lib/rapidsms/contrib/messagelog/tests.py
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 from rapidsms.conf import settings import rapidsms.contrib.messagelog.app def test_messagelog(): app = rapidsms.contrib.messagelog.app.App() # Invoke _log, make sure it doesn't blow up regardless of Django version app._log('I', {}, "text")
Add a test for contrib.messagelog's _log() method
Add a test for contrib.messagelog's _log() method
Python
bsd-3-clause
ehealthafrica-ci/rapidsms,catalpainternational/rapidsms,eHealthAfrica/rapidsms,peterayeni/rapidsms,eHealthAfrica/rapidsms,ehealthafrica-ci/rapidsms,ehealthafrica-ci/rapidsms,lsgunth/rapidsms,lsgunth/rapidsms,peterayeni/rapidsms,caktus/rapidsms,lsgunth/rapidsms,lsgunth/rapidsms,eHealthAfrica/rapidsms,catalpainternational/rapidsms,caktus/rapidsms,caktus/rapidsms,catalpainternational/rapidsms,peterayeni/rapidsms,peterayeni/rapidsms,catalpainternational/rapidsms
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 from rapidsms.conf import settings import rapidsms.contrib.messagelog.app def test_messagelog(): app = rapidsms.contrib.messagelog.app.App() # Invoke _log, make sure it doesn't blow up regardless of Django version app._log('I', {}, "text")
Add a test for contrib.messagelog's _log() method
a0c0499c3da95e53e99d6386f7970079a2669141
app/twitter/views.py
app/twitter/views.py
from flask import Blueprint, request, render_template from ..load import processing_results, api import string import tweepy twitter_mod = Blueprint('twitter', __name__, template_folder='templates', static_folder='static') ascii_chars = set(string.printable) ascii_chars.remove(' ') ascii_chars.add('...') def takeout_non_ascii(s): return list(filter(lambda x: x not in ascii_chars, s)) @twitter_mod.route('/twitter', methods=['GET', 'POST']) def twitter(): if request.method == 'POST': text = [] for tweet in tweepy.Cursor(api.search, request.form['topic'], lang='hi').items(50): temp = ''.join(takeout_non_ascii(tweet.text)) if not len(temp) in range(3): text.append(temp) data, emotion_sents, score, line_sentiment, text, length = processing_results(text) return render_template('projects/twitter.html', data=[data, emotion_sents, score, zip(text, line_sentiment), length]) else: return render_template('projects/twitter.html')
from flask import Blueprint, request, render_template from ..load import processing_results, api import string import tweepy twitter_mod = Blueprint('twitter', __name__, template_folder='templates', static_folder='static') ascii_chars = set(string.printable) ascii_chars.remove(' ') ascii_chars.add('...') def takeout_non_ascii(s): return list(filter(lambda x: x not in ascii_chars, s)) @twitter_mod.route('/twitter', methods=['GET', 'POST']) def twitter(): if request.method == 'POST': try: topic = request.form['topic'].strip() if topic == '': return render_template('projects/twitter.html', message='Please enter a valid topic') text = [] for tweet in tweepy.Cursor(api.search, topic, lang='hi').items(50): temp = ''.join(takeout_non_ascii(tweet.text)) if not len(temp) in range(3): text.append(temp) data, emotion_sents, score, line_sentiment, text, length = processing_results(text) return render_template('projects/twitter.html', data=[data, emotion_sents, score, zip(text, line_sentiment), length]) except Exception: return render_template('projects/twitter.html', message='Something went wrong. Please try again.') else: return render_template('projects/twitter.html')
Add exception handling in twitter view
Add exception handling in twitter view
Python
mit
griimick/feature-mlsite,griimick/feature-mlsite,griimick/feature-mlsite
from flask import Blueprint, request, render_template from ..load import processing_results, api import string import tweepy twitter_mod = Blueprint('twitter', __name__, template_folder='templates', static_folder='static') ascii_chars = set(string.printable) ascii_chars.remove(' ') ascii_chars.add('...') def takeout_non_ascii(s): return list(filter(lambda x: x not in ascii_chars, s)) @twitter_mod.route('/twitter', methods=['GET', 'POST']) def twitter(): if request.method == 'POST': try: topic = request.form['topic'].strip() if topic == '': return render_template('projects/twitter.html', message='Please enter a valid topic') text = [] for tweet in tweepy.Cursor(api.search, topic, lang='hi').items(50): temp = ''.join(takeout_non_ascii(tweet.text)) if not len(temp) in range(3): text.append(temp) data, emotion_sents, score, line_sentiment, text, length = processing_results(text) return render_template('projects/twitter.html', data=[data, emotion_sents, score, zip(text, line_sentiment), length]) except Exception: return render_template('projects/twitter.html', message='Something went wrong. Please try again.') else: return render_template('projects/twitter.html')
Add exception handling in twitter view from flask import Blueprint, request, render_template from ..load import processing_results, api import string import tweepy twitter_mod = Blueprint('twitter', __name__, template_folder='templates', static_folder='static') ascii_chars = set(string.printable) ascii_chars.remove(' ') ascii_chars.add('...') def takeout_non_ascii(s): return list(filter(lambda x: x not in ascii_chars, s)) @twitter_mod.route('/twitter', methods=['GET', 'POST']) def twitter(): if request.method == 'POST': text = [] for tweet in tweepy.Cursor(api.search, request.form['topic'], lang='hi').items(50): temp = ''.join(takeout_non_ascii(tweet.text)) if not len(temp) in range(3): text.append(temp) data, emotion_sents, score, line_sentiment, text, length = processing_results(text) return render_template('projects/twitter.html', data=[data, emotion_sents, score, zip(text, line_sentiment), length]) else: return render_template('projects/twitter.html')
50d9601ea0fa35a9d5d831353f5a17b33dc7d8bf
panoptes_client/subject_set.py
panoptes_client/subject_set.py
from panoptes_client.panoptes import PanoptesObject, LinkResolver from panoptes_client.subject import Subject class SubjectSet(PanoptesObject): _api_slug = 'subject_sets' _link_slug = 'subject_sets' _edit_attributes = ( 'display_name', { 'links': ( 'project', ), 'metadata': ( 'category', ) }, ) def __init__(self, raw={}, etag=None): r = super(SubjectSet, self).__init__(raw, etag) if self.id: self._edit_attributes[1]['links'] = ( 'subjects', 'workflows', ) return r def subjects(self): return Subject.where(subject_set_id=self.id) def add_subjects(self, subjects): if not type(subjects) in (tuple, list): subjects = [subjects] _subjects = [] for subject in subjects: if not isinstance(subject, Subject): raise TypeError _subjects.append(subject.id) self.post( '{}/links/subjects'.format(self.id), json={'subjects': _subjects} ) LinkResolver.register(SubjectSet)
from panoptes_client.panoptes import PanoptesObject, LinkResolver from panoptes_client.subject import Subject class SubjectSet(PanoptesObject): _api_slug = 'subject_sets' _link_slug = 'subject_sets' _edit_attributes = ( 'display_name', { 'links': ( 'project', ), 'metadata': ( 'category', ) }, ) def subjects(self): return Subject.where(subject_set_id=self.id) def add_subjects(self, subjects): if not type(subjects) in (tuple, list): subjects = [subjects] _subjects = [] for subject in subjects: if not isinstance(subject, Subject): raise TypeError _subjects.append(subject.id) self.post( '{}/links/subjects'.format(self.id), json={'subjects': _subjects} ) LinkResolver.register(SubjectSet)
Revert "Don't try to save SubjectSet.links.project on exiting objects"
Revert "Don't try to save SubjectSet.links.project on exiting objects" This reverts commit b9a107b45cf2569f9effa1c8836a65255f2f3e64. Superseded by 7d2fecab46f0ede85c00fba8335a8dd74fe16489
Python
apache-2.0
zooniverse/panoptes-python-client
from panoptes_client.panoptes import PanoptesObject, LinkResolver from panoptes_client.subject import Subject class SubjectSet(PanoptesObject): _api_slug = 'subject_sets' _link_slug = 'subject_sets' _edit_attributes = ( 'display_name', { 'links': ( 'project', ), 'metadata': ( 'category', ) }, ) def subjects(self): return Subject.where(subject_set_id=self.id) def add_subjects(self, subjects): if not type(subjects) in (tuple, list): subjects = [subjects] _subjects = [] for subject in subjects: if not isinstance(subject, Subject): raise TypeError _subjects.append(subject.id) self.post( '{}/links/subjects'.format(self.id), json={'subjects': _subjects} ) LinkResolver.register(SubjectSet)
Revert "Don't try to save SubjectSet.links.project on exiting objects" This reverts commit b9a107b45cf2569f9effa1c8836a65255f2f3e64. Superseded by 7d2fecab46f0ede85c00fba8335a8dd74fe16489 from panoptes_client.panoptes import PanoptesObject, LinkResolver from panoptes_client.subject import Subject class SubjectSet(PanoptesObject): _api_slug = 'subject_sets' _link_slug = 'subject_sets' _edit_attributes = ( 'display_name', { 'links': ( 'project', ), 'metadata': ( 'category', ) }, ) def __init__(self, raw={}, etag=None): r = super(SubjectSet, self).__init__(raw, etag) if self.id: self._edit_attributes[1]['links'] = ( 'subjects', 'workflows', ) return r def subjects(self): return Subject.where(subject_set_id=self.id) def add_subjects(self, subjects): if not type(subjects) in (tuple, list): subjects = [subjects] _subjects = [] for subject in subjects: if not isinstance(subject, Subject): raise TypeError _subjects.append(subject.id) self.post( '{}/links/subjects'.format(self.id), json={'subjects': _subjects} ) LinkResolver.register(SubjectSet)
d37c1dca5ffe0508b0944b811a2a65daf8717bea
tests/test_garner_dates.py
tests/test_garner_dates.py
"""Test garner.dates.""" from __future__ import absolute_import from .check import Check from proselint.checks.garner import dates class TestCheck(Check): """Test class for garner.dates.""" __test__ = True def test_50s_hyphenation(self): """Find uneeded hyphen in 50's.""" text = """The 50's were swell.""" errors = dates.check_decade_apostrophes_short(text) assert len(errors) == 1 def test_50_Cent_hyphenation(self): """Don't flag 50's when it refers to 50 Cent's manager.""" text = """ Dr. Dre suggested to 50's manager that he look into signing Eminem to the G-Unit record label. """ errors = dates.check_decade_apostrophes_short(text) assert len(errors) == 0 def test_dash_and_from(self): """Test garner.check_dash_and_from.""" text = """From 1999-2002, Sally served as chair of the committee.""" errors = dates.check_dash_and_from(text) print errors assert len(errors) == 1
"""Test garner.dates.""" from __future__ import absolute_import from .check import Check from proselint.checks.garner import dates class TestCheck(Check): """Test class for garner.dates.""" __test__ = True def test_50s_hyphenation(self): """Find uneeded hyphen in 50's.""" text = """The 50's were swell.""" errors = dates.check_decade_apostrophes_short(text) assert len(errors) == 1 def test_50_Cent_hyphenation(self): """Don't flag 50's when it refers to 50 Cent's manager.""" text = """ Dr. Dre suggested to 50's manager that he look into signing Eminem to the G-Unit record label. """ errors = dates.check_decade_apostrophes_short(text) assert len(errors) == 0 def test_dash_and_from(self): """Test garner.check_dash_and_from.""" text = """From 1999-2002, Sally served as chair of the committee.""" errors = dates.check_dash_and_from(text) print(errors) assert len(errors) == 1
Fix bug in print statement
Fix bug in print statement
Python
bsd-3-clause
jstewmon/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint
"""Test garner.dates.""" from __future__ import absolute_import from .check import Check from proselint.checks.garner import dates class TestCheck(Check): """Test class for garner.dates.""" __test__ = True def test_50s_hyphenation(self): """Find uneeded hyphen in 50's.""" text = """The 50's were swell.""" errors = dates.check_decade_apostrophes_short(text) assert len(errors) == 1 def test_50_Cent_hyphenation(self): """Don't flag 50's when it refers to 50 Cent's manager.""" text = """ Dr. Dre suggested to 50's manager that he look into signing Eminem to the G-Unit record label. """ errors = dates.check_decade_apostrophes_short(text) assert len(errors) == 0 def test_dash_and_from(self): """Test garner.check_dash_and_from.""" text = """From 1999-2002, Sally served as chair of the committee.""" errors = dates.check_dash_and_from(text) print(errors) assert len(errors) == 1
Fix bug in print statement """Test garner.dates.""" from __future__ import absolute_import from .check import Check from proselint.checks.garner import dates class TestCheck(Check): """Test class for garner.dates.""" __test__ = True def test_50s_hyphenation(self): """Find uneeded hyphen in 50's.""" text = """The 50's were swell.""" errors = dates.check_decade_apostrophes_short(text) assert len(errors) == 1 def test_50_Cent_hyphenation(self): """Don't flag 50's when it refers to 50 Cent's manager.""" text = """ Dr. Dre suggested to 50's manager that he look into signing Eminem to the G-Unit record label. """ errors = dates.check_decade_apostrophes_short(text) assert len(errors) == 0 def test_dash_and_from(self): """Test garner.check_dash_and_from.""" text = """From 1999-2002, Sally served as chair of the committee.""" errors = dates.check_dash_and_from(text) print errors assert len(errors) == 1
e17d8f9b8bd09b1b96cad3e61961f3833d2e486c
dataverse/file.py
dataverse/file.py
from __future__ import absolute_import from dataverse.utils import sanitize class DataverseFile(object): def __init__(self, dataset, name, file_id=None): self.dataset = dataset self.name = sanitize(name) self.id = file_id self.download_url = '{0}/access/datafile/{1}'.format( dataset.connection.native_base_url, self.id ) edit_media_base = '{0}/edit-media/file/{1}' self.edit_media_uri = edit_media_base.format( dataset.connection.sword_base_url, self.id ) @classmethod def from_json(cls, dataset, json): name = json['datafile']['name'] file_id = json['datafile']['id'] return cls(dataset, name, file_id)
from __future__ import absolute_import from dataverse.utils import sanitize class DataverseFile(object): def __init__(self, dataset, name, file_id=None): self.dataset = dataset self.name = sanitize(name) self.id = file_id self.download_url = '{0}/access/datafile/{1}'.format( dataset.connection.native_base_url, self.id ) edit_media_base = '{0}/edit-media/file/{1}' self.edit_media_uri = edit_media_base.format( dataset.connection.sword_base_url, self.id ) @classmethod def from_json(cls, dataset, json): try: name = json['dataFile']['filename'] file_id = json['dataFile']['id'] except KeyError: name = json['datafile']['name'] file_id = json['datafile']['id'] return cls(dataset, name, file_id)
Fix 'class DataverseFile' to handle old and new response format Tests were failing after swith to new server/version
Fix 'class DataverseFile' to handle old and new response format Tests were failing after swith to new server/version
Python
apache-2.0
CenterForOpenScience/dataverse-client-python,IQSS/dataverse-client-python
from __future__ import absolute_import from dataverse.utils import sanitize class DataverseFile(object): def __init__(self, dataset, name, file_id=None): self.dataset = dataset self.name = sanitize(name) self.id = file_id self.download_url = '{0}/access/datafile/{1}'.format( dataset.connection.native_base_url, self.id ) edit_media_base = '{0}/edit-media/file/{1}' self.edit_media_uri = edit_media_base.format( dataset.connection.sword_base_url, self.id ) @classmethod def from_json(cls, dataset, json): try: name = json['dataFile']['filename'] file_id = json['dataFile']['id'] except KeyError: name = json['datafile']['name'] file_id = json['datafile']['id'] return cls(dataset, name, file_id)
Fix 'class DataverseFile' to handle old and new response format Tests were failing after swith to new server/version from __future__ import absolute_import from dataverse.utils import sanitize class DataverseFile(object): def __init__(self, dataset, name, file_id=None): self.dataset = dataset self.name = sanitize(name) self.id = file_id self.download_url = '{0}/access/datafile/{1}'.format( dataset.connection.native_base_url, self.id ) edit_media_base = '{0}/edit-media/file/{1}' self.edit_media_uri = edit_media_base.format( dataset.connection.sword_base_url, self.id ) @classmethod def from_json(cls, dataset, json): name = json['datafile']['name'] file_id = json['datafile']['id'] return cls(dataset, name, file_id)
d7d1df44e39ad7af91046a61f40b357a9aa9943a
pox.py
pox.py
#!/usr/bin/python # Set default log level import logging logging.basicConfig(level=logging.DEBUG) from pox.core import core import pox.openflow.openflow import pox.topology.topology import pox.openflow.of_01 import pox.dumb_l3_switch.dumb_l3_switch import pox.messenger.messenger # Turn on extra info for event exceptions import pox.lib.revent.revent as revent revent.showEventExceptions = True def startup (): core.register("topology", pox.topology.topology.Topology()) core.register("openflow", pox.openflow.openflow.OpenFlowHub()) core.register("switch", pox.dumb_l3_switch.dumb_l3_switch.dumb_l3_switch()) pox.openflow.of_01.start() pox.messenger.messenger.start() if __name__ == '__main__': try: startup() core.goUp() except: import traceback traceback.print_exc() import code code.interact('Ready.', local=locals()) pox.core.core.quit()
#!/usr/bin/python # Set default log level import logging logging.basicConfig(level=logging.DEBUG) from pox.core import core import pox.openflow.openflow import pox.topology.topology import pox.openflow.of_01 import pox.dumb_l3_switch.dumb_l3_switch import pox.messenger.messenger # Turn on extra info for event exceptions import pox.lib.revent.revent as revent revent.showEventExceptions = True def startup (): core.register("topology", pox.topology.topology.Topology()) core.register("openflow", pox.openflow.openflow.OpenFlowHub()) core.register("switch", pox.dumb_l3_switch.dumb_l3_switch.dumb_l3_switch()) pox.openflow.of_01.start() pox.messenger.messenger.start() if __name__ == '__main__': try: startup() core.goUp() except: import traceback traceback.print_exc() import time time.sleep(1) import code import sys sys.ps1 = "POX> " sys.ps2 = " ... " code.interact('Ready.', local=locals()) pox.core.core.quit()
Add startup delay and change interpreter prompts
Add startup delay and change interpreter prompts The delay is so that hopefully switch connections don't IMMEDIATELY print all over the prompt. We'll do something better eventually.
Python
apache-2.0
adusia/pox,jacobq/csci5221-viro-project,jacobq/csci5221-viro-project,chenyuntc/pox,VamsikrishnaNallabothu/pox,andiwundsam/_of_normalize,andiwundsam/_of_normalize,jacobq/csci5221-viro-project,pthien92/sdn,MurphyMc/pox,denovogroup/pox,chenyuntc/pox,carlye566/IoT-POX,kulawczukmarcin/mypox,MurphyMc/pox,PrincetonUniversity/pox,waltznetworks/pox,waltznetworks/pox,kavitshah8/SDNDeveloper,PrincetonUniversity/pox,MurphyMc/pox,diogommartins/pox,denovogroup/pox,noxrepo/pox,kpengboy/pox-exercise,adusia/pox,xAKLx/pox,chenyuntc/pox,MurphyMc/pox,pthien92/sdn,denovogroup/pox,diogommartins/pox,carlye566/IoT-POX,carlye566/IoT-POX,diogommartins/pox,waltznetworks/pox,waltznetworks/pox,jacobq/csci5221-viro-project,adusia/pox,chenyuntc/pox,kpengboy/pox-exercise,kulawczukmarcin/mypox,denovogroup/pox,kavitshah8/SDNDeveloper,VamsikrishnaNallabothu/pox,noxrepo/pox,PrincetonUniversity/pox,PrincetonUniversity/pox,andiwundsam/_of_normalize,pthien92/sdn,denovogroup/pox,noxrepo/pox,kpengboy/pox-exercise,waltznetworks/pox,kpengboy/pox-exercise,diogommartins/pox,carlye566/IoT-POX,xAKLx/pox,VamsikrishnaNallabothu/pox,pthien92/sdn,xAKLx/pox,adusia/pox,kulawczukmarcin/mypox,pthien92/sdn,xAKLx/pox,kavitshah8/SDNDeveloper,xAKLx/pox,kpengboy/pox-exercise,noxrepo/pox,adusia/pox,MurphyMc/pox,VamsikrishnaNallabothu/pox,kulawczukmarcin/mypox,PrincetonUniversity/pox,andiwundsam/_of_normalize,kavitshah8/SDNDeveloper,carlye566/IoT-POX,jacobq/csci5221-viro-project,diogommartins/pox,chenyuntc/pox,VamsikrishnaNallabothu/pox,kulawczukmarcin/mypox
#!/usr/bin/python # Set default log level import logging logging.basicConfig(level=logging.DEBUG) from pox.core import core import pox.openflow.openflow import pox.topology.topology import pox.openflow.of_01 import pox.dumb_l3_switch.dumb_l3_switch import pox.messenger.messenger # Turn on extra info for event exceptions import pox.lib.revent.revent as revent revent.showEventExceptions = True def startup (): core.register("topology", pox.topology.topology.Topology()) core.register("openflow", pox.openflow.openflow.OpenFlowHub()) core.register("switch", pox.dumb_l3_switch.dumb_l3_switch.dumb_l3_switch()) pox.openflow.of_01.start() pox.messenger.messenger.start() if __name__ == '__main__': try: startup() core.goUp() except: import traceback traceback.print_exc() import time time.sleep(1) import code import sys sys.ps1 = "POX> " sys.ps2 = " ... " code.interact('Ready.', local=locals()) pox.core.core.quit()
Add startup delay and change interpreter prompts The delay is so that hopefully switch connections don't IMMEDIATELY print all over the prompt. We'll do something better eventually. #!/usr/bin/python # Set default log level import logging logging.basicConfig(level=logging.DEBUG) from pox.core import core import pox.openflow.openflow import pox.topology.topology import pox.openflow.of_01 import pox.dumb_l3_switch.dumb_l3_switch import pox.messenger.messenger # Turn on extra info for event exceptions import pox.lib.revent.revent as revent revent.showEventExceptions = True def startup (): core.register("topology", pox.topology.topology.Topology()) core.register("openflow", pox.openflow.openflow.OpenFlowHub()) core.register("switch", pox.dumb_l3_switch.dumb_l3_switch.dumb_l3_switch()) pox.openflow.of_01.start() pox.messenger.messenger.start() if __name__ == '__main__': try: startup() core.goUp() except: import traceback traceback.print_exc() import code code.interact('Ready.', local=locals()) pox.core.core.quit()
43ad3b2d2e25b816d6d7b339d62e674541d76712
setup.py
setup.py
from setuptools import setup, find_packages from gdc_client.version import __version__ setup( name="gdc_client", version=__version__, packages=find_packages(), package_data={}, install_requires=[ 'parcel', 'lxml==3.5.0b1', 'PyYAML==3.11', 'jsonschema==2.5.1', 'pyOpenSSL==17.1.0', 'ndg-httpsclient==0.4.2', 'pyasn1==0.2.3', ], dependency_links=[ 'git+https://github.com/LabAdvComp/parcel.git@50d6124a3e3fcd2a234b3373831075390b886a15#egg=parcel', ], scripts=[ 'bin/gdc-client', ], )
from setuptools import setup, find_packages from gdc_client.version import __version__ setup( name="gdc_client", version=__version__, packages=find_packages(), package_data={}, install_requires=[ 'parcel', 'lxml==3.5.0b1', 'PyYAML==3.11', 'jsonschema==2.5.1', 'pyOpenSSL==17.1.0', 'ndg-httpsclient==0.4.2', 'pyasn1==0.2.3', ], dependency_links=[ 'git+https://github.com/LabAdvComp/parcel.git@c421063aeff60c316693756da3477634b8551f18#egg=parcel', ], scripts=[ 'bin/gdc-client', ], )
Update dependency link for parcel and recent DTT-99 fix
Update dependency link for parcel and recent DTT-99 fix
Python
apache-2.0
NCI-GDC/gdc-client,NCI-GDC/gdc-client
from setuptools import setup, find_packages from gdc_client.version import __version__ setup( name="gdc_client", version=__version__, packages=find_packages(), package_data={}, install_requires=[ 'parcel', 'lxml==3.5.0b1', 'PyYAML==3.11', 'jsonschema==2.5.1', 'pyOpenSSL==17.1.0', 'ndg-httpsclient==0.4.2', 'pyasn1==0.2.3', ], dependency_links=[ 'git+https://github.com/LabAdvComp/parcel.git@c421063aeff60c316693756da3477634b8551f18#egg=parcel', ], scripts=[ 'bin/gdc-client', ], )
Update dependency link for parcel and recent DTT-99 fix from setuptools import setup, find_packages from gdc_client.version import __version__ setup( name="gdc_client", version=__version__, packages=find_packages(), package_data={}, install_requires=[ 'parcel', 'lxml==3.5.0b1', 'PyYAML==3.11', 'jsonschema==2.5.1', 'pyOpenSSL==17.1.0', 'ndg-httpsclient==0.4.2', 'pyasn1==0.2.3', ], dependency_links=[ 'git+https://github.com/LabAdvComp/parcel.git@50d6124a3e3fcd2a234b3373831075390b886a15#egg=parcel', ], scripts=[ 'bin/gdc-client', ], )
6dfa189bdab536ecfa2c14e4893017363923ee6a
bayes.py
bayes.py
import numpy as np import cv2 # pos and neg are positive and negative instances # each is a list of files of nparray dumps, # nparray of BoW histograms; shape = (n, 101) # of the class to be trained for def build_trained_classifier(pos_files, neg_files): total = len(pos_files) + len(neg_files) samples = np.empty((total, 101), np.float32) i = 0 for pos_file in pos_files: samples[i] = np.load(pos_file) i = i + 1 for neg_file in neg_files: samples[i] = np.load(neg_file) i = i + 1 labels = np.empty((total, 1), np.float32) labels[0:len(pos_files), 0] = 1.0 labels[len(pos_files):, 0] = 0.0 return cv2.NormalBayesClassifier(samples, labels)
Implement Naive Bayes Classifier builder method
Implement Naive Bayes Classifier builder method
Python
mit
ah450/ObjectRecognizer
import numpy as np import cv2 # pos and neg are positive and negative instances # each is a list of files of nparray dumps, # nparray of BoW histograms; shape = (n, 101) # of the class to be trained for def build_trained_classifier(pos_files, neg_files): total = len(pos_files) + len(neg_files) samples = np.empty((total, 101), np.float32) i = 0 for pos_file in pos_files: samples[i] = np.load(pos_file) i = i + 1 for neg_file in neg_files: samples[i] = np.load(neg_file) i = i + 1 labels = np.empty((total, 1), np.float32) labels[0:len(pos_files), 0] = 1.0 labels[len(pos_files):, 0] = 0.0 return cv2.NormalBayesClassifier(samples, labels)
Implement Naive Bayes Classifier builder method
7645d98247df22dbd4a5af19d89174d347d827e6
python/challenges/plusMinus.py
python/challenges/plusMinus.py
""" Problem Statement: Given an array of integers, calculate which fraction of the elements are positive, negative, and zeroes, respectively. Print the decimal value of each fraction. Input Format: The first line, N, is the size of the array. The second line contains N space-separated integers describing the array of numbers (A1,A2,A3,⋯,AN). Output Format: Print each value on its own line with the fraction of positive numbers first, negative numbers second, and zeroes third. """
Create main challenge file with proble statement and i/o expectations
Create main challenge file with proble statement and i/o expectations
Python
mit
markthethomas/algorithms,markthethomas/algorithms,markthethomas/algorithms,markthethomas/algorithms
""" Problem Statement: Given an array of integers, calculate which fraction of the elements are positive, negative, and zeroes, respectively. Print the decimal value of each fraction. Input Format: The first line, N, is the size of the array. The second line contains N space-separated integers describing the array of numbers (A1,A2,A3,⋯,AN). Output Format: Print each value on its own line with the fraction of positive numbers first, negative numbers second, and zeroes third. """
Create main challenge file with proble statement and i/o expectations
b5672d55beb837f21d761f50740b93c5b1e0dc5d
napalm/exceptions.py
napalm/exceptions.py
# Copyright 2015 Spotify AB. All rights reserved. # # The contents of this file are licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. class ReplaceConfigException(Exception): pass class MergeConfigException(Exception): pass class SessionLockedException(Exception): pass class CommandTimeoutException(Exception): pass class CommandErrorException(Exception): pass
# Copyright 2015 Spotify AB. All rights reserved. # # The contents of this file are licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. class ConnectionException(Exception): pass class ReplaceConfigException(Exception): pass class MergeConfigException(Exception): pass class SessionLockedException(Exception): pass class CommandTimeoutException(Exception): pass class CommandErrorException(Exception): pass
Raise ConnectionException when device unusable
Raise ConnectionException when device unusable
Python
apache-2.0
napalm-automation/napalm-base,napalm-automation/napalm-base,Netflix-Skunkworks/napalm-base,napalm-automation/napalm,Netflix-Skunkworks/napalm-base,spotify/napalm,bewing/napalm-base,spotify/napalm,bewing/napalm-base
# Copyright 2015 Spotify AB. All rights reserved. # # The contents of this file are licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. class ConnectionException(Exception): pass class ReplaceConfigException(Exception): pass class MergeConfigException(Exception): pass class SessionLockedException(Exception): pass class CommandTimeoutException(Exception): pass class CommandErrorException(Exception): pass
Raise ConnectionException when device unusable # Copyright 2015 Spotify AB. All rights reserved. # # The contents of this file are licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. class ReplaceConfigException(Exception): pass class MergeConfigException(Exception): pass class SessionLockedException(Exception): pass class CommandTimeoutException(Exception): pass class CommandErrorException(Exception): pass
92e840aef7ac0d9aee629db58791a43a71cd578c
myhdl/test/conversion/numeric/test_numass.py
myhdl/test/conversion/numeric/test_numass.py
from __future__ import absolute_import, print_function from random import randrange from myhdl import Signal, uintba, sintba, instance, delay, conversion def NumassBench(): p = Signal(uintba(1, 8)) q = Signal(uintba(1, 40)) r = Signal(sintba(1, 9)) s = Signal(sintba(1, 41)) PBIGINT = randrange(2**34, 2**40) NBIGINT = -randrange(2**34, 2**40) @instance def check(): p.next = 0 q.next = 0 r.next = 0 s.next = 0 yield delay(10) print("%d %d %d %d" % (p, q, r, s)) p.next = 1 q.next = 1 r.next = 1 s.next = 1 yield delay(10) print("%d %d %d %d" % (p, q, r, s)) p.next = 2 q.next = 2 r.next = -2 s.next = -2 yield delay(10) print("%d %d %d %d" % (p, q, r, s)) p.next = 255 q.next = 246836311517 r.next = 255 s.next = -246836311517 yield delay(10) print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r, s[41:20], s[20:0])) p.next = 254 q.next = PBIGINT r.next = -256 s.next = NBIGINT yield delay(10) print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r, s[41:20], s[20:0])) return check def test_numass(): assert conversion.verify(NumassBench) == 0
Revert "Revert "Revert "Revert "Added the number assignment test for numeric.""""
Revert "Revert "Revert "Revert "Added the number assignment test for numeric."""" This reverts commit 91151bc6fd2c48c83656452e7c8f8f7e8b7b4218.
Python
lgpl-2.1
jmgc/myhdl-numeric,jmgc/myhdl-numeric,jmgc/myhdl-numeric
from __future__ import absolute_import, print_function from random import randrange from myhdl import Signal, uintba, sintba, instance, delay, conversion def NumassBench(): p = Signal(uintba(1, 8)) q = Signal(uintba(1, 40)) r = Signal(sintba(1, 9)) s = Signal(sintba(1, 41)) PBIGINT = randrange(2**34, 2**40) NBIGINT = -randrange(2**34, 2**40) @instance def check(): p.next = 0 q.next = 0 r.next = 0 s.next = 0 yield delay(10) print("%d %d %d %d" % (p, q, r, s)) p.next = 1 q.next = 1 r.next = 1 s.next = 1 yield delay(10) print("%d %d %d %d" % (p, q, r, s)) p.next = 2 q.next = 2 r.next = -2 s.next = -2 yield delay(10) print("%d %d %d %d" % (p, q, r, s)) p.next = 255 q.next = 246836311517 r.next = 255 s.next = -246836311517 yield delay(10) print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r, s[41:20], s[20:0])) p.next = 254 q.next = PBIGINT r.next = -256 s.next = NBIGINT yield delay(10) print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r, s[41:20], s[20:0])) return check def test_numass(): assert conversion.verify(NumassBench) == 0
Revert "Revert "Revert "Revert "Added the number assignment test for numeric."""" This reverts commit 91151bc6fd2c48c83656452e7c8f8f7e8b7b4218.
a15701a49c1fffedc30f939c231be4936d3ab790
setup.py
setup.py
import setuptools from valohai_yaml import __version__ dev_dependencies = [ 'flake8', 'isort', 'pydocstyle', 'pytest-cov', ] if __name__ == '__main__': setuptools.setup( name='valohai-yaml', description='Valohai.yaml validation and parsing', version=__version__, url='https://github.com/valohai/valohai-yaml', author='Valohai', author_email='[email protected]', maintainer='Aarni Koskela', maintainer_email='[email protected]', license='MIT', install_requires=['jsonschema', 'PyYAML', 'six'], tests_require=dev_dependencies, extras_require={'dev': dev_dependencies}, packages=setuptools.find_packages('.', exclude=('*tests*',)), include_package_data=True, entry_points={ 'console_scripts': [ 'valohai-yaml = valohai_yaml.__main__:main', ], }, )
import ast import os import re import setuptools with open(os.path.join(os.path.dirname(__file__), 'valohai_yaml', '__init__.py')) as infp: version = ast.literal_eval(re.search('__version__ = (.+?)$', infp.read(), re.M).group(1)) dev_dependencies = [ 'flake8', 'isort', 'pydocstyle', 'pytest-cov', ] if __name__ == '__main__': setuptools.setup( name='valohai-yaml', description='Valohai.yaml validation and parsing', version=version, url='https://github.com/valohai/valohai-yaml', author='Valohai', author_email='[email protected]', maintainer='Aarni Koskela', maintainer_email='[email protected]', license='MIT', install_requires=['jsonschema', 'PyYAML', 'six'], tests_require=dev_dependencies, extras_require={'dev': dev_dependencies}, packages=setuptools.find_packages('.', exclude=('*tests*',)), include_package_data=True, entry_points={ 'console_scripts': [ 'valohai-yaml = valohai_yaml.__main__:main', ], }, )
Read version without importing package
Read version without importing package
Python
mit
valohai/valohai-yaml
import ast import os import re import setuptools with open(os.path.join(os.path.dirname(__file__), 'valohai_yaml', '__init__.py')) as infp: version = ast.literal_eval(re.search('__version__ = (.+?)$', infp.read(), re.M).group(1)) dev_dependencies = [ 'flake8', 'isort', 'pydocstyle', 'pytest-cov', ] if __name__ == '__main__': setuptools.setup( name='valohai-yaml', description='Valohai.yaml validation and parsing', version=version, url='https://github.com/valohai/valohai-yaml', author='Valohai', author_email='[email protected]', maintainer='Aarni Koskela', maintainer_email='[email protected]', license='MIT', install_requires=['jsonschema', 'PyYAML', 'six'], tests_require=dev_dependencies, extras_require={'dev': dev_dependencies}, packages=setuptools.find_packages('.', exclude=('*tests*',)), include_package_data=True, entry_points={ 'console_scripts': [ 'valohai-yaml = valohai_yaml.__main__:main', ], }, )
Read version without importing package import setuptools from valohai_yaml import __version__ dev_dependencies = [ 'flake8', 'isort', 'pydocstyle', 'pytest-cov', ] if __name__ == '__main__': setuptools.setup( name='valohai-yaml', description='Valohai.yaml validation and parsing', version=__version__, url='https://github.com/valohai/valohai-yaml', author='Valohai', author_email='[email protected]', maintainer='Aarni Koskela', maintainer_email='[email protected]', license='MIT', install_requires=['jsonschema', 'PyYAML', 'six'], tests_require=dev_dependencies, extras_require={'dev': dev_dependencies}, packages=setuptools.find_packages('.', exclude=('*tests*',)), include_package_data=True, entry_points={ 'console_scripts': [ 'valohai-yaml = valohai_yaml.__main__:main', ], }, )
da8b184267d04ae8c95772b4cbfaef7603d4ed67
scripts/jenkins_console_log_search.py
scripts/jenkins_console_log_search.py
#!/usr/bin/env python3 """ This short script uses curl requests to search the last 100 builds of a jenkins job to find recurring errors, written in Python3. It results in printing a list of links to builds that match the search As the requests package is not included within kv, you will need to either download this package yourself or reference the one included inside couchbase-cli. """ import argparse import json import requests import sys import time serverURL = 'http://cv.jenkins.couchbase.com/' # Create argparser so the user can specify which job to search argParser = argparse.ArgumentParser() argParser.add_argument('--job', '-j', type=str, help='The cv job to query. ' "Common jobs are: 'kv_engine-ASan-UBSan-master', " "'kv_engine-clang_analyzer-master', " "'kv_engine-linux-master', " "'kv_engine-threadsanitizer-master', " "'kv_engine-windows-master', " "'kv_engine-clang_format', " "'kv-engine-cv-perf'", required=True) argParser.add_argument('--search', '-s', type=str, help='The string to search the logs for', required=True) argParser.add_argument('--build-no', '-b', type=int, help='The build number of cv job to check backwards ' 'from. 0 (default) fetches latest build number', default=0) argParser.add_argument('--no-of-builds', '-n', type=int, help='The number of builds to check back', default=100) args = argParser.parse_args() job = 'job/' + args.job + '/' consoleText = '/consoleText/' resultURLs = [] if args.build_no == 0: # need to fetch the latest build number r = requests.get(serverURL + job + 'lastBuild/api/json') j = r.json() args.build_no = j['number'] print("Searching for:", ('"' + args.search + '"'), "in console logs of job:", args.job, "between build", args.build_no - (args.no_of_builds - 1), "and", args.build_no, file=sys.stderr) start_time = time.time() for i in range(0, args.no_of_builds): print('\r >>> Current progress: {} '.format(str(i)), end='', flush=True, file=sys.stderr) r = requests.get(serverURL + job + str(args.build_no-i) + consoleText) result = r.text.find(args.search) if result != -1: resultURLs.append(serverURL + job + str(args.build_no-i) + '/console/') print('\r Completed search in', (time.time() - start_time), 's', file=sys.stderr) for url in resultURLs: print(url)
Add utility script for searching Jenkins console logs
Add utility script for searching Jenkins console logs This small python script can be used to quickly check the last 100 (or more if you're willing to edit and wait) to see if a string is present within the console log. This can help find instances of errors to help determine intermittent failures from one off problems. The script requires requests which is not included within kv itself (in couchbase-cli) so you cannot run directly inside kv. Change-Id: I1579b01bc3157587787249f34a2aaf3182de0a4e Reviewed-on: http://review.couchbase.org/103291 Tested-by: Build Bot <[email protected]> Reviewed-by: Dave Rigby <[email protected]>
Python
bsd-3-clause
daverigby/kv_engine,daverigby/kv_engine,daverigby/kv_engine,daverigby/kv_engine
#!/usr/bin/env python3 """ This short script uses curl requests to search the last 100 builds of a jenkins job to find recurring errors, written in Python3. It results in printing a list of links to builds that match the search As the requests package is not included within kv, you will need to either download this package yourself or reference the one included inside couchbase-cli. """ import argparse import json import requests import sys import time serverURL = 'http://cv.jenkins.couchbase.com/' # Create argparser so the user can specify which job to search argParser = argparse.ArgumentParser() argParser.add_argument('--job', '-j', type=str, help='The cv job to query. ' "Common jobs are: 'kv_engine-ASan-UBSan-master', " "'kv_engine-clang_analyzer-master', " "'kv_engine-linux-master', " "'kv_engine-threadsanitizer-master', " "'kv_engine-windows-master', " "'kv_engine-clang_format', " "'kv-engine-cv-perf'", required=True) argParser.add_argument('--search', '-s', type=str, help='The string to search the logs for', required=True) argParser.add_argument('--build-no', '-b', type=int, help='The build number of cv job to check backwards ' 'from. 0 (default) fetches latest build number', default=0) argParser.add_argument('--no-of-builds', '-n', type=int, help='The number of builds to check back', default=100) args = argParser.parse_args() job = 'job/' + args.job + '/' consoleText = '/consoleText/' resultURLs = [] if args.build_no == 0: # need to fetch the latest build number r = requests.get(serverURL + job + 'lastBuild/api/json') j = r.json() args.build_no = j['number'] print("Searching for:", ('"' + args.search + '"'), "in console logs of job:", args.job, "between build", args.build_no - (args.no_of_builds - 1), "and", args.build_no, file=sys.stderr) start_time = time.time() for i in range(0, args.no_of_builds): print('\r >>> Current progress: {} '.format(str(i)), end='', flush=True, file=sys.stderr) r = requests.get(serverURL + job + str(args.build_no-i) + consoleText) result = r.text.find(args.search) if result != -1: resultURLs.append(serverURL + job + str(args.build_no-i) + '/console/') print('\r Completed search in', (time.time() - start_time), 's', file=sys.stderr) for url in resultURLs: print(url)
Add utility script for searching Jenkins console logs This small python script can be used to quickly check the last 100 (or more if you're willing to edit and wait) to see if a string is present within the console log. This can help find instances of errors to help determine intermittent failures from one off problems. The script requires requests which is not included within kv itself (in couchbase-cli) so you cannot run directly inside kv. Change-Id: I1579b01bc3157587787249f34a2aaf3182de0a4e Reviewed-on: http://review.couchbase.org/103291 Tested-by: Build Bot <[email protected]> Reviewed-by: Dave Rigby <[email protected]>
6941d9048a8c630244bb48100864872b35a1a307
tests/functional/test_layout_and_styling.py
tests/functional/test_layout_and_styling.py
import os from .base import FunctionalTest class LayoutStylingTest(FunctionalTest): def test_bootstrap_links_loaded_successfully(self): self.browser.get(self.live_server_url) self.assertIn( "//netdna.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css", self.browser.page_source.strip()) self.assertIn( "//netdna.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js", self.browser.page_source.strip()) self.assertIn( '//code.jquery.com/jquery.min.js', self.browser.page_source.strip())
from .base import FunctionalTest class LayoutStylingTest(FunctionalTest): def test_bootstrap_links_loaded_successfully(self): self.browser.get(self.live_server_url) links = [link.get_attribute("href") for link in self.browser.find_elements_by_tag_name('link')] scripts = [script.get_attribute("src") for script in self.browser.find_elements_by_tag_name('script')] self.assertTrue( ["//netdna.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" in link for link in links]) self.assertTrue( ["//netdna.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js" in link for link in links]) self.assertTrue( ["//code.jquery.com/jquery.min.js" in link for link in scripts])
Fix bootstrap and jQuery link checking in homepage
Fix bootstrap and jQuery link checking in homepage
Python
bsd-3-clause
andela-kndungu/compshop,andela-kndungu/compshop,kevgathuku/compshop,kevgathuku/compshop,kevgathuku/compshop,kevgathuku/compshop,andela-kndungu/compshop,andela-kndungu/compshop
from .base import FunctionalTest class LayoutStylingTest(FunctionalTest): def test_bootstrap_links_loaded_successfully(self): self.browser.get(self.live_server_url) links = [link.get_attribute("href") for link in self.browser.find_elements_by_tag_name('link')] scripts = [script.get_attribute("src") for script in self.browser.find_elements_by_tag_name('script')] self.assertTrue( ["//netdna.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" in link for link in links]) self.assertTrue( ["//netdna.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js" in link for link in links]) self.assertTrue( ["//code.jquery.com/jquery.min.js" in link for link in scripts])
Fix bootstrap and jQuery link checking in homepage import os from .base import FunctionalTest class LayoutStylingTest(FunctionalTest): def test_bootstrap_links_loaded_successfully(self): self.browser.get(self.live_server_url) self.assertIn( "//netdna.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css", self.browser.page_source.strip()) self.assertIn( "//netdna.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js", self.browser.page_source.strip()) self.assertIn( '//code.jquery.com/jquery.min.js', self.browser.page_source.strip())
33e23230315f7a922e50264948c42b2c68116cc2
numba/tests/issues/test_potential_gcc_error.py
numba/tests/issues/test_potential_gcc_error.py
# This tests a potential GCC 4.1.2 miscompile of LLVM. # The problem is observed as a error in greedy register allocation pass, # which resulted as a segfault. # No such problem in GCC 4.4.6. from numba import * import numpy as np @jit(uint8[:,:](f8, f8, f8, f8, uint8[:,:], int32)) def create_fractal(min_x, max_x, min_y, max_y, image, iters): return image
Add tests for the possibly gcc miscompile error of LLVM in the mandel example.
Add tests for the possibly gcc miscompile error of LLVM in the mandel example.
Python
bsd-2-clause
numba/numba,gmarkall/numba,pombredanne/numba,pitrou/numba,pitrou/numba,IntelLabs/numba,numba/numba,ssarangi/numba,gmarkall/numba,stuartarchibald/numba,jriehl/numba,stefanseefeld/numba,numba/numba,pitrou/numba,GaZ3ll3/numba,stonebig/numba,pitrou/numba,shiquanwang/numba,stefanseefeld/numba,stuartarchibald/numba,stonebig/numba,GaZ3ll3/numba,GaZ3ll3/numba,ssarangi/numba,stefanseefeld/numba,stonebig/numba,shiquanwang/numba,pitrou/numba,jriehl/numba,numba/numba,ssarangi/numba,gdementen/numba,gdementen/numba,numba/numba,stonebig/numba,sklam/numba,sklam/numba,stefanseefeld/numba,ssarangi/numba,IntelLabs/numba,GaZ3ll3/numba,pombredanne/numba,stuartarchibald/numba,jriehl/numba,stonebig/numba,gmarkall/numba,seibert/numba,cpcloud/numba,sklam/numba,stefanseefeld/numba,gdementen/numba,pombredanne/numba,stuartarchibald/numba,seibert/numba,jriehl/numba,seibert/numba,cpcloud/numba,IntelLabs/numba,ssarangi/numba,stuartarchibald/numba,cpcloud/numba,cpcloud/numba,gmarkall/numba,cpcloud/numba,sklam/numba,IntelLabs/numba,pombredanne/numba,gdementen/numba,seibert/numba,pombredanne/numba,IntelLabs/numba,GaZ3ll3/numba,seibert/numba,sklam/numba,jriehl/numba,gmarkall/numba,shiquanwang/numba,gdementen/numba
# This tests a potential GCC 4.1.2 miscompile of LLVM. # The problem is observed as a error in greedy register allocation pass, # which resulted as a segfault. # No such problem in GCC 4.4.6. from numba import * import numpy as np @jit(uint8[:,:](f8, f8, f8, f8, uint8[:,:], int32)) def create_fractal(min_x, max_x, min_y, max_y, image, iters): return image
Add tests for the possibly gcc miscompile error of LLVM in the mandel example.
ff44e924a4f01bd39d4b26a39519bf55dd5e7560
ann.py
ann.py
class ANN: def __init__(self): pass def train(self): pass def predict(self): pass def update_weights(self): pass class Layer: def __init__(self): pass
Add top down design of ANN and Layer
Add top down design of ANN and Layer
Python
apache-2.0
Razvy000/ANN_Course
class ANN: def __init__(self): pass def train(self): pass def predict(self): pass def update_weights(self): pass class Layer: def __init__(self): pass
Add top down design of ANN and Layer
af599f0168096fa594773d3fac049869c31f8ecc
setup.py
setup.py
from setuptools import setup import jasinja, sys requires = ['Jinja2'] if sys.version_info < (2, 6): requirements += ['simplejson'] setup( name='jasinja', version=jasinja.__version__, url='http://bitbucket.org/djc/jasinja', license='BSD', author='Dirkjan Ochtman', author_email='[email protected]', description='A JavaScript code generator for Jinja templates', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=['jasinja', 'jasinja.tests'], package_data={ 'jasinja': ['*.js'] }, install_requires=requires, test_suite='jasinja.tests.run.suite', entry_points={ 'console_scripts': ['jasinja-compile = jasinja.compile:main'], }, )
from setuptools import setup import jasinja, sys requires = ['Jinja2'] if sys.version_info < (2, 6): requires += ['simplejson'] setup( name='jasinja', version=jasinja.__version__, url='http://bitbucket.org/djc/jasinja', license='BSD', author='Dirkjan Ochtman', author_email='[email protected]', description='A JavaScript code generator for Jinja templates', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=['jasinja', 'jasinja.tests'], package_data={ 'jasinja': ['*.js'] }, install_requires=requires, test_suite='jasinja.tests.run.suite', entry_points={ 'console_scripts': ['jasinja-compile = jasinja.compile:main'], }, )
Fix stupid typo in requirements specification.
Fix stupid typo in requirements specification.
Python
bsd-3-clause
djc/jasinja,djc/jasinja
from setuptools import setup import jasinja, sys requires = ['Jinja2'] if sys.version_info < (2, 6): requires += ['simplejson'] setup( name='jasinja', version=jasinja.__version__, url='http://bitbucket.org/djc/jasinja', license='BSD', author='Dirkjan Ochtman', author_email='[email protected]', description='A JavaScript code generator for Jinja templates', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=['jasinja', 'jasinja.tests'], package_data={ 'jasinja': ['*.js'] }, install_requires=requires, test_suite='jasinja.tests.run.suite', entry_points={ 'console_scripts': ['jasinja-compile = jasinja.compile:main'], }, )
Fix stupid typo in requirements specification. from setuptools import setup import jasinja, sys requires = ['Jinja2'] if sys.version_info < (2, 6): requirements += ['simplejson'] setup( name='jasinja', version=jasinja.__version__, url='http://bitbucket.org/djc/jasinja', license='BSD', author='Dirkjan Ochtman', author_email='[email protected]', description='A JavaScript code generator for Jinja templates', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=['jasinja', 'jasinja.tests'], package_data={ 'jasinja': ['*.js'] }, install_requires=requires, test_suite='jasinja.tests.run.suite', entry_points={ 'console_scripts': ['jasinja-compile = jasinja.compile:main'], }, )
5e03af4b0f920e97507b3ada6b4b925136ddbf07
froide/upload/serializers.py
froide/upload/serializers.py
from rest_framework import serializers from .models import Upload class UploadSerializer(serializers.ModelSerializer): class Meta: model = Upload fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['guid'].required = True
from rest_framework import serializers from .models import Upload class UploadSerializer(serializers.ModelSerializer): class Meta: model = Upload fields = '__all__' def __init__(self, *args, **kwargs): ''' Add required marker, so OpenAPI schema generator can remove it again -.- ''' super().__init__(*args, **kwargs) self.fields['guid'].required = True
Add some documentation for weird init
Add some documentation for weird init
Python
mit
fin/froide,stefanw/froide,stefanw/froide,fin/froide,fin/froide,fin/froide,stefanw/froide,stefanw/froide,stefanw/froide
from rest_framework import serializers from .models import Upload class UploadSerializer(serializers.ModelSerializer): class Meta: model = Upload fields = '__all__' def __init__(self, *args, **kwargs): ''' Add required marker, so OpenAPI schema generator can remove it again -.- ''' super().__init__(*args, **kwargs) self.fields['guid'].required = True
Add some documentation for weird init from rest_framework import serializers from .models import Upload class UploadSerializer(serializers.ModelSerializer): class Meta: model = Upload fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['guid'].required = True
f0e11b0743c2779f61970917da6eef859149f600
taar/recommenders/utils.py
taar/recommenders/utils.py
import json import os from tempfile import gettempdir import boto3 from botocore.exceptions import ClientError import requests def fetch_json(uri): """ Perform an HTTP GET on the given uri, return the results as json. Args: uri: the string URI to fetch. Returns: A JSON object with the response or None if the status code of the response is an error code. """ r = requests.get(uri) if r.status_code != requests.codes.ok: return None return r.json() def get_s3_json_content(s3_bucket, s3_key): """Download and parse a json file stored on AWS S3. The file is downloaded and then cached for future use. """ local_filename = '_'.join([s3_bucket, s3_key]).replace('/', '_') local_path = os.path.join(gettempdir(), local_filename) if not os.path.exists(local_path): with open(local_path, 'wb') as data: try: s3 = boto3.client('s3') s3.download_fileobj(s3_bucket, s3_key, data) except ClientError: return None with open(local_path, 'r') as data: return json.loads(data.read())
import json import os from tempfile import gettempdir import boto3 from botocore.exceptions import ClientError import requests def fetch_json(uri): """ Perform an HTTP GET on the given uri, return the results as json. Args: uri: the string URI to fetch. Returns: A JSON object with the response or None if the status code of the response is an error code. """ r = requests.get(uri) if r.status_code != requests.codes.ok: return None return r.json() def get_s3_json_content(s3_bucket, s3_key): """Download and parse a json file stored on AWS S3. The file is downloaded and then cached for future use. """ local_filename = '_'.join([s3_bucket, s3_key]).replace('/', '_') local_path = os.path.join(gettempdir(), local_filename) if not os.path.exists(local_path): with open(local_path, 'wb') as data: try: s3 = boto3.client('s3') s3.download_fileobj(s3_bucket, s3_key, data) except ClientError: return None with open(local_path, 'r') as data: return json.loads(data.read())
Make sure to load the S3 cache file when available
Make sure to load the S3 cache file when available
Python
mpl-2.0
maurodoglio/taar
import json import os from tempfile import gettempdir import boto3 from botocore.exceptions import ClientError import requests def fetch_json(uri): """ Perform an HTTP GET on the given uri, return the results as json. Args: uri: the string URI to fetch. Returns: A JSON object with the response or None if the status code of the response is an error code. """ r = requests.get(uri) if r.status_code != requests.codes.ok: return None return r.json() def get_s3_json_content(s3_bucket, s3_key): """Download and parse a json file stored on AWS S3. The file is downloaded and then cached for future use. """ local_filename = '_'.join([s3_bucket, s3_key]).replace('/', '_') local_path = os.path.join(gettempdir(), local_filename) if not os.path.exists(local_path): with open(local_path, 'wb') as data: try: s3 = boto3.client('s3') s3.download_fileobj(s3_bucket, s3_key, data) except ClientError: return None with open(local_path, 'r') as data: return json.loads(data.read())
Make sure to load the S3 cache file when available import json import os from tempfile import gettempdir import boto3 from botocore.exceptions import ClientError import requests def fetch_json(uri): """ Perform an HTTP GET on the given uri, return the results as json. Args: uri: the string URI to fetch. Returns: A JSON object with the response or None if the status code of the response is an error code. """ r = requests.get(uri) if r.status_code != requests.codes.ok: return None return r.json() def get_s3_json_content(s3_bucket, s3_key): """Download and parse a json file stored on AWS S3. The file is downloaded and then cached for future use. """ local_filename = '_'.join([s3_bucket, s3_key]).replace('/', '_') local_path = os.path.join(gettempdir(), local_filename) if not os.path.exists(local_path): with open(local_path, 'wb') as data: try: s3 = boto3.client('s3') s3.download_fileobj(s3_bucket, s3_key, data) except ClientError: return None with open(local_path, 'r') as data: return json.loads(data.read())
b9b5af6bc8da56caadf74e75b833338330305779
setup.py
setup.py
from setuptools import setup, find_packages import sys, os here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, 'README.rst')).read() except IOError: README = '' version = "0.1.0" test_requirements = [ 'nose', 'webtest', ] setup(name='tgext.mailer', version=version, description="TurboGears extension for sending emails with transaction manager integration", long_description=README, classifiers=[ "Environment :: Web Environment", "Framework :: TurboGears" ], keywords='turbogears2.extension', author='Alessandro Molina', author_email='[email protected]', url='https://github.com/amol-/tgext.mailer', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tgext.mailer.tests']), namespace_packages = ['tgext'], include_package_data=True, zip_safe=False, install_requires=[ "TurboGears2 >= 2.3.2", "repoze.sendmail == 4.1", ], extras_require={ # Used by Travis and Coverage due to setup.py nosetests # causing a coredump when used with coverage 'testing': test_requirements, }, test_suite='nose.collector', tests_require=test_requirements, entry_points=""" # -*- Entry points: -*- """, )
from setuptools import setup, find_packages import sys, os here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, 'README.rst')).read() except IOError: README = '' version = "0.1.0" test_requirements = [ 'nose', 'webtest', ] setup(name='tgext.mailer', version=version, description="TurboGears extension for sending emails with transaction manager integration", long_description=README, classifiers=[ "Environment :: Web Environment", "Framework :: TurboGears" ], keywords='turbogears2.extension', author='Alessandro Molina', author_email='[email protected]', url='https://github.com/amol-/tgext.mailer', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tgext.mailer.tests']), namespace_packages = ['tgext'], include_package_data=True, zip_safe=False, install_requires=[ "TurboGears2 >= 2.3.2", "repoze.sendmail == 4.3", ], extras_require={ # Used by Travis and Coverage due to setup.py nosetests # causing a coredump when used with coverage 'testing': test_requirements, }, test_suite='nose.collector', tests_require=test_requirements, entry_points=""" # -*- Entry points: -*- """, )
Upgrade to sendmail 4.3, fixed old bug with transaction
Upgrade to sendmail 4.3, fixed old bug with transaction
Python
mit
amol-/tgext.mailer
from setuptools import setup, find_packages import sys, os here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, 'README.rst')).read() except IOError: README = '' version = "0.1.0" test_requirements = [ 'nose', 'webtest', ] setup(name='tgext.mailer', version=version, description="TurboGears extension for sending emails with transaction manager integration", long_description=README, classifiers=[ "Environment :: Web Environment", "Framework :: TurboGears" ], keywords='turbogears2.extension', author='Alessandro Molina', author_email='[email protected]', url='https://github.com/amol-/tgext.mailer', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tgext.mailer.tests']), namespace_packages = ['tgext'], include_package_data=True, zip_safe=False, install_requires=[ "TurboGears2 >= 2.3.2", "repoze.sendmail == 4.3", ], extras_require={ # Used by Travis and Coverage due to setup.py nosetests # causing a coredump when used with coverage 'testing': test_requirements, }, test_suite='nose.collector', tests_require=test_requirements, entry_points=""" # -*- Entry points: -*- """, )
Upgrade to sendmail 4.3, fixed old bug with transaction from setuptools import setup, find_packages import sys, os here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, 'README.rst')).read() except IOError: README = '' version = "0.1.0" test_requirements = [ 'nose', 'webtest', ] setup(name='tgext.mailer', version=version, description="TurboGears extension for sending emails with transaction manager integration", long_description=README, classifiers=[ "Environment :: Web Environment", "Framework :: TurboGears" ], keywords='turbogears2.extension', author='Alessandro Molina', author_email='[email protected]', url='https://github.com/amol-/tgext.mailer', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tgext.mailer.tests']), namespace_packages = ['tgext'], include_package_data=True, zip_safe=False, install_requires=[ "TurboGears2 >= 2.3.2", "repoze.sendmail == 4.1", ], extras_require={ # Used by Travis and Coverage due to setup.py nosetests # causing a coredump when used with coverage 'testing': test_requirements, }, test_suite='nose.collector', tests_require=test_requirements, entry_points=""" # -*- Entry points: -*- """, )
ab42c5e8c3ac51c65ed7229dafb751c7baa667aa
examples/mnist-rica.py
examples/mnist-rica.py
#!/usr/bin/env python import matplotlib.pyplot as plt import numpy as np import theanets from utils import load_mnist, plot_layers, plot_images class RICA(theanets.Autoencoder): def J(self, weight_inverse=0, **kwargs): cost = super(RICA, self).J(**kwargs) if weight_inverse > 0: cost += sum((weight_inverse / (w * w).sum(axis=0)).sum() for w in self.weights) return cost train, valid, _ = load_mnist() # mean-center the digits and compute a pca whitening transform. train -= 0.5 valid -= 0.5 vals, vecs = np.linalg.eigh(np.dot(train.T, train) / len(train)) vals = vals[::-1] vecs = vecs[:, ::-1] K = 197 # this retains 99% of the variance in the digit data. vals = np.sqrt(vals[:K]) vecs = vecs[:, :K] def whiten(x): return np.dot(x, np.dot(vecs, np.diag(1. / vals))) def color(z): return np.dot(z, np.dot(np.diag(vals), vecs.T)) # now train our model on the whitened dataset. N = 16 e = theanets.Experiment( RICA, layers=(K, N * N, K), activation='linear', hidden_l1=0.2, no_learn_biases=True, tied_weights=True, train_batches=100, weight_inverse=0.01, ) e.run(whiten(train), whiten(valid)) # color the network weights so they are viewable as digits. plot_layers( [color(e.network.weights[0].get_value().T).T], tied_weights=True) plt.tight_layout() plt.show() plot_images(valid[:N*N], 121, 'Sample data') plot_images( color(e.network.predict(whiten(valid[:N*N]))), 122, 'Reconstructed data') plt.tight_layout() plt.show()
Add an example for computing sparse codes using RICA.
Add an example for computing sparse codes using RICA.
Python
mit
lmjohns3/theanets,chrinide/theanets,devdoer/theanets
#!/usr/bin/env python import matplotlib.pyplot as plt import numpy as np import theanets from utils import load_mnist, plot_layers, plot_images class RICA(theanets.Autoencoder): def J(self, weight_inverse=0, **kwargs): cost = super(RICA, self).J(**kwargs) if weight_inverse > 0: cost += sum((weight_inverse / (w * w).sum(axis=0)).sum() for w in self.weights) return cost train, valid, _ = load_mnist() # mean-center the digits and compute a pca whitening transform. train -= 0.5 valid -= 0.5 vals, vecs = np.linalg.eigh(np.dot(train.T, train) / len(train)) vals = vals[::-1] vecs = vecs[:, ::-1] K = 197 # this retains 99% of the variance in the digit data. vals = np.sqrt(vals[:K]) vecs = vecs[:, :K] def whiten(x): return np.dot(x, np.dot(vecs, np.diag(1. / vals))) def color(z): return np.dot(z, np.dot(np.diag(vals), vecs.T)) # now train our model on the whitened dataset. N = 16 e = theanets.Experiment( RICA, layers=(K, N * N, K), activation='linear', hidden_l1=0.2, no_learn_biases=True, tied_weights=True, train_batches=100, weight_inverse=0.01, ) e.run(whiten(train), whiten(valid)) # color the network weights so they are viewable as digits. plot_layers( [color(e.network.weights[0].get_value().T).T], tied_weights=True) plt.tight_layout() plt.show() plot_images(valid[:N*N], 121, 'Sample data') plot_images( color(e.network.predict(whiten(valid[:N*N]))), 122, 'Reconstructed data') plt.tight_layout() plt.show()
Add an example for computing sparse codes using RICA.
315ad5f2f31f82f8d42d2a65fe4f056b4e3fcfd7
tests/test_quickstart.py
tests/test_quickstart.py
import pytest from lektor.quickstart import get_default_author from lektor.quickstart import get_default_author_email from lektor.utils import locate_executable def test_default_author(os_user): assert get_default_author() == "Lektor Test" @pytest.mark.skipif(locate_executable("git") is None, reason="git not installed") def test_default_author_email(): assert isinstance(get_default_author_email(), str)
import os import pytest from lektor.quickstart import get_default_author from lektor.quickstart import get_default_author_email from lektor.utils import locate_executable def test_default_author(os_user): assert get_default_author() == "Lektor Test" @pytest.mark.skipif(locate_executable("git") is None, reason="git not installed") def test_default_author_email(): assert isinstance(get_default_author_email(), str) def test_default_author_email_git_unavailable(monkeypatch): monkeypatch.setitem(os.environ, "PATH", "/dev/null") locate_executable.cache_clear() assert get_default_author_email() is None
Add test case for when git is not available
Add test case for when git is not available
Python
bsd-3-clause
lektor/lektor,lektor/lektor,lektor/lektor,lektor/lektor
import os import pytest from lektor.quickstart import get_default_author from lektor.quickstart import get_default_author_email from lektor.utils import locate_executable def test_default_author(os_user): assert get_default_author() == "Lektor Test" @pytest.mark.skipif(locate_executable("git") is None, reason="git not installed") def test_default_author_email(): assert isinstance(get_default_author_email(), str) def test_default_author_email_git_unavailable(monkeypatch): monkeypatch.setitem(os.environ, "PATH", "/dev/null") locate_executable.cache_clear() assert get_default_author_email() is None
Add test case for when git is not available import pytest from lektor.quickstart import get_default_author from lektor.quickstart import get_default_author_email from lektor.utils import locate_executable def test_default_author(os_user): assert get_default_author() == "Lektor Test" @pytest.mark.skipif(locate_executable("git") is None, reason="git not installed") def test_default_author_email(): assert isinstance(get_default_author_email(), str)
8b847215c2ae071a4a2e402167e20fdd641b222d
xenserver/destroy_cached_images.py
xenserver/destroy_cached_images.py
""" destroy_cached_images.py This script is used to clean up Glance images that are cached in the SR. By default, this script will only cleanup unused cached images. Options: --dry_run - Don't actually destroy the VDIs --all_cached - Destroy all cached images instead of just unused cached images. """ import eventlet eventlet.monkey_patch() import os import sys # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir, os.pardir)) if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova import utils from nova.virt.xenapi import driver as xenapi_driver from nova.virt.xenapi import vm_utils FLAGS = flags.FLAGS destroy_opts = [ cfg.BoolOpt('all_cached', default=False, help='Destroy all cached images instead of just unused cached' ' images.'), cfg.BoolOpt('dry_run', default=False, help='Don\'t actually delete the VDIs.') ] FLAGS.register_cli_opts(destroy_opts) def main(): flags.parse_args(sys.argv) utils.monkey_patch() xenapi = xenapi_driver.XenAPIDriver() session = xenapi._session sr_ref = vm_utils.safe_find_sr(session) destroyed = vm_utils.destroy_cached_images( session, sr_ref, all_cached=FLAGS.all_cached, dry_run=FLAGS.dry_run) if '--verbose' in sys.argv: print '\n'.join(destroyed) print "Destroyed %d cached VDIs" % len(destroyed) if __name__ == "__main__": main()
Add script to destroy cached images.
XenAPI: Add script to destroy cached images. Operations will want the ability to clear out cached images when disk-space becomes an issue. This script allows ops to clear out all cached images or just cached images that aren't in current use. Change-Id: If87bd10ef3f893c416d2f0615358ba65aef17a2d
Python
apache-2.0
emonty/oslo-hacking,zancas/hacking,zancas/hacking,emonty/oslo-hacking,hyakuhei/cleantox,hyakuhei/cleantox,openstack-dev/hacking,openstack-dev/hacking
""" destroy_cached_images.py This script is used to clean up Glance images that are cached in the SR. By default, this script will only cleanup unused cached images. Options: --dry_run - Don't actually destroy the VDIs --all_cached - Destroy all cached images instead of just unused cached images. """ import eventlet eventlet.monkey_patch() import os import sys # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir, os.pardir)) if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova import utils from nova.virt.xenapi import driver as xenapi_driver from nova.virt.xenapi import vm_utils FLAGS = flags.FLAGS destroy_opts = [ cfg.BoolOpt('all_cached', default=False, help='Destroy all cached images instead of just unused cached' ' images.'), cfg.BoolOpt('dry_run', default=False, help='Don\'t actually delete the VDIs.') ] FLAGS.register_cli_opts(destroy_opts) def main(): flags.parse_args(sys.argv) utils.monkey_patch() xenapi = xenapi_driver.XenAPIDriver() session = xenapi._session sr_ref = vm_utils.safe_find_sr(session) destroyed = vm_utils.destroy_cached_images( session, sr_ref, all_cached=FLAGS.all_cached, dry_run=FLAGS.dry_run) if '--verbose' in sys.argv: print '\n'.join(destroyed) print "Destroyed %d cached VDIs" % len(destroyed) if __name__ == "__main__": main()
XenAPI: Add script to destroy cached images. Operations will want the ability to clear out cached images when disk-space becomes an issue. This script allows ops to clear out all cached images or just cached images that aren't in current use. Change-Id: If87bd10ef3f893c416d2f0615358ba65aef17a2d
fc89664fd75f787b03953d8eac3ec99b6fdf19de
lesson5/exceptions_except.py
lesson5/exceptions_except.py
def take_beer(fridge, number=1): if "beer" not in fridge: raise Exception("No beer at all:(") if number > fridge["beer"]: raise Exception("Not enough beer:(") fridge["beer"] -= number if __name__ == "__main__": fridge = { "beer": 2, "milk": 1, "meat": 3, } print("I wanna drink 1 bottle of beer...") take_beer(fridge) print("Oooh, great!") print("I wanna drink 2 bottle of beer...") try: take_beer(fridge, 2) except Exception as e: print("Error: {}. Let's continue".format(e)) print("Fallback. Try to take 1 bottle of beer...") take_beer(fridge, 1) print("Oooh, awesome!")
Add y.a. script for showing except working
Add y.a. script for showing except working
Python
bsd-2-clause
drednout/letspython,drednout/letspython
def take_beer(fridge, number=1): if "beer" not in fridge: raise Exception("No beer at all:(") if number > fridge["beer"]: raise Exception("Not enough beer:(") fridge["beer"] -= number if __name__ == "__main__": fridge = { "beer": 2, "milk": 1, "meat": 3, } print("I wanna drink 1 bottle of beer...") take_beer(fridge) print("Oooh, great!") print("I wanna drink 2 bottle of beer...") try: take_beer(fridge, 2) except Exception as e: print("Error: {}. Let's continue".format(e)) print("Fallback. Try to take 1 bottle of beer...") take_beer(fridge, 1) print("Oooh, awesome!")
Add y.a. script for showing except working
d7299fd931ae62cc661b48dbc84aa161a395f1fa
fermipy/__init__.py
fermipy/__init__.py
import os __version__ = "unknown" try: from version import get_git_version __version__ = get_git_version() except Exception as message: print(message) __author__ = "Matthew Wood" PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) PACKAGE_DATA = os.path.join(PACKAGE_ROOT,'data') os.environ['FERMIPY_ROOT'] = PACKAGE_ROOT os.environ['FERMIPY_DATA_DIR'] = PACKAGE_DATA
from __future__ import absolute_import, division, print_function import os __version__ = "unknown" try: from .version import get_git_version __version__ = get_git_version() except Exception as message: print(message) __author__ = "Matthew Wood" PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) PACKAGE_DATA = os.path.join(PACKAGE_ROOT,'data') os.environ['FERMIPY_ROOT'] = PACKAGE_ROOT os.environ['FERMIPY_DATA_DIR'] = PACKAGE_DATA
Fix version module import for Python 3
Fix version module import for Python 3
Python
bsd-3-clause
jefemagril/fermipy,jefemagril/fermipy,jefemagril/fermipy,fermiPy/fermipy
from __future__ import absolute_import, division, print_function import os __version__ = "unknown" try: from .version import get_git_version __version__ = get_git_version() except Exception as message: print(message) __author__ = "Matthew Wood" PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) PACKAGE_DATA = os.path.join(PACKAGE_ROOT,'data') os.environ['FERMIPY_ROOT'] = PACKAGE_ROOT os.environ['FERMIPY_DATA_DIR'] = PACKAGE_DATA
Fix version module import for Python 3 import os __version__ = "unknown" try: from version import get_git_version __version__ = get_git_version() except Exception as message: print(message) __author__ = "Matthew Wood" PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) PACKAGE_DATA = os.path.join(PACKAGE_ROOT,'data') os.environ['FERMIPY_ROOT'] = PACKAGE_ROOT os.environ['FERMIPY_DATA_DIR'] = PACKAGE_DATA
7d45fd15f9d2fa4e0d830e7f404fb77d531adc29
examples/test-combo-box.py
examples/test-combo-box.py
""" This test is adopted form nbtk, but since it's summer it uses Munich's most famous Beergarden instead of places in London ;) """ import clutter import nbtk def title_changed_cb(box, pspec): print 'title now:', box.get_title() def index_changed_cb(box, pspec): print 'index now:', box.get_index() def stage_key_press_cb(actor, event, box): from clutter import keysyms if event.keyval == keysyms.r: box.set_title('Munich') elif event.keyval >= ord('0') and event.keyval <= ord('9'): box.set_index(event.keyval - 48) if __name__ == '__main__': stage = clutter.Stage() stage.connect('destroy', clutter.main_quit) combo = nbtk.ComboBox() stage.add(combo) combo.set_title('Munich') combo.append_text('Augustinerkeller') combo.append_text('Hirschgarten') combo.append_text('Nockherberg') combo.append_text('Seehaus') combo.append_text('Chinesischer Turm') combo.append_text('Zum Flaucher') combo.connect('notify::title', title_changed_cb) combo.connect('notify::index', index_changed_cb) stage.connect('key-press-event', stage_key_press_cb, combo) stage.show() clutter.main()
Add a simple test for ComboBox
Add a simple test for ComboBox
Python
lgpl-2.1
buztard/mxpy,buztard/mxpy,buztard/mxpy
""" This test is adopted form nbtk, but since it's summer it uses Munich's most famous Beergarden instead of places in London ;) """ import clutter import nbtk def title_changed_cb(box, pspec): print 'title now:', box.get_title() def index_changed_cb(box, pspec): print 'index now:', box.get_index() def stage_key_press_cb(actor, event, box): from clutter import keysyms if event.keyval == keysyms.r: box.set_title('Munich') elif event.keyval >= ord('0') and event.keyval <= ord('9'): box.set_index(event.keyval - 48) if __name__ == '__main__': stage = clutter.Stage() stage.connect('destroy', clutter.main_quit) combo = nbtk.ComboBox() stage.add(combo) combo.set_title('Munich') combo.append_text('Augustinerkeller') combo.append_text('Hirschgarten') combo.append_text('Nockherberg') combo.append_text('Seehaus') combo.append_text('Chinesischer Turm') combo.append_text('Zum Flaucher') combo.connect('notify::title', title_changed_cb) combo.connect('notify::index', index_changed_cb) stage.connect('key-press-event', stage_key_press_cb, combo) stage.show() clutter.main()
Add a simple test for ComboBox
f3eee368e13ee37048d52bde0d067efea057fef8
monkeylearn/extraction.py
monkeylearn/extraction.py
# -*- coding: utf-8 -*- from __future__ import ( print_function, unicode_literals, division, absolute_import) from six.moves import range from monkeylearn.utils import SleepRequestsMixin, MonkeyLearnResponse, HandleErrorsMixin from monkeylearn.settings import DEFAULT_BASE_ENDPOINT, DEFAULT_BATCH_SIZE class Extraction(SleepRequestsMixin, HandleErrorsMixin): def __init__(self, token, base_endpoint=DEFAULT_BASE_ENDPOINT): self.token = token self.endpoint = base_endpoint + 'extractors/' def extract(self, module_id, text_list, batch_size=DEFAULT_BATCH_SIZE, sleep_if_throttled=True): text_list = list(text_list) self.check_batch_limits(text_list, batch_size) url = self.endpoint + module_id + '/extract/' res = [] responses = [] for i in range(0, len(text_list), batch_size): data = { 'text_list': text_list[i:i+batch_size] } response = self.make_request(url, 'POST', data, sleep_if_throttled) self.handle_errors(response) responses.append(response) res.extend(response.json()['result']) return MonkeyLearnResponse(res, responses)
# -*- coding: utf-8 -*- from __future__ import ( print_function, unicode_literals, division, absolute_import) from six.moves import range from monkeylearn.utils import SleepRequestsMixin, MonkeyLearnResponse, HandleErrorsMixin from monkeylearn.settings import DEFAULT_BASE_ENDPOINT, DEFAULT_BATCH_SIZE class Extraction(SleepRequestsMixin, HandleErrorsMixin): def __init__(self, token, base_endpoint=DEFAULT_BASE_ENDPOINT): self.token = token self.endpoint = base_endpoint + 'extractors/' def extract(self, module_id, text_list, batch_size=DEFAULT_BATCH_SIZE, sleep_if_throttled=True): text_list = list(text_list) self.check_batch_limits(text_list, batch_size) url = self.endpoint + module_id + '/extract/' res = [] responses = [] for i in range(0, len(text_list), batch_size): data = { 'text_list': text_list[i:i+batch_size] } if kwargs is not None: for key, value in kwargs.iteritems(): data[key] = value response = self.make_request(url, 'POST', data, sleep_if_throttled) self.handle_errors(response) responses.append(response) res.extend(response.json()['result']) return MonkeyLearnResponse(res, responses)
Support for extra parameters in extractors
Support for extra parameters in extractors
Python
mit
monkeylearn/monkeylearn-python
# -*- coding: utf-8 -*- from __future__ import ( print_function, unicode_literals, division, absolute_import) from six.moves import range from monkeylearn.utils import SleepRequestsMixin, MonkeyLearnResponse, HandleErrorsMixin from monkeylearn.settings import DEFAULT_BASE_ENDPOINT, DEFAULT_BATCH_SIZE class Extraction(SleepRequestsMixin, HandleErrorsMixin): def __init__(self, token, base_endpoint=DEFAULT_BASE_ENDPOINT): self.token = token self.endpoint = base_endpoint + 'extractors/' def extract(self, module_id, text_list, batch_size=DEFAULT_BATCH_SIZE, sleep_if_throttled=True): text_list = list(text_list) self.check_batch_limits(text_list, batch_size) url = self.endpoint + module_id + '/extract/' res = [] responses = [] for i in range(0, len(text_list), batch_size): data = { 'text_list': text_list[i:i+batch_size] } if kwargs is not None: for key, value in kwargs.iteritems(): data[key] = value response = self.make_request(url, 'POST', data, sleep_if_throttled) self.handle_errors(response) responses.append(response) res.extend(response.json()['result']) return MonkeyLearnResponse(res, responses)
Support for extra parameters in extractors # -*- coding: utf-8 -*- from __future__ import ( print_function, unicode_literals, division, absolute_import) from six.moves import range from monkeylearn.utils import SleepRequestsMixin, MonkeyLearnResponse, HandleErrorsMixin from monkeylearn.settings import DEFAULT_BASE_ENDPOINT, DEFAULT_BATCH_SIZE class Extraction(SleepRequestsMixin, HandleErrorsMixin): def __init__(self, token, base_endpoint=DEFAULT_BASE_ENDPOINT): self.token = token self.endpoint = base_endpoint + 'extractors/' def extract(self, module_id, text_list, batch_size=DEFAULT_BATCH_SIZE, sleep_if_throttled=True): text_list = list(text_list) self.check_batch_limits(text_list, batch_size) url = self.endpoint + module_id + '/extract/' res = [] responses = [] for i in range(0, len(text_list), batch_size): data = { 'text_list': text_list[i:i+batch_size] } response = self.make_request(url, 'POST', data, sleep_if_throttled) self.handle_errors(response) responses.append(response) res.extend(response.json()['result']) return MonkeyLearnResponse(res, responses)
fc94d60066692e6e8dc496bb854039bb66af3311
scout.py
scout.py
# Python does not require explicit interfaces, # but I believe that code which does is more # maintainable. Thus I include this explicit # interface for Problems. class Problem: def getStartState(self): return None def getEndState(self): return None def isValidState(self, state): return False def getSuccessors(self, state): return [] def getStringRepr(self, state): return "BadProblem" def search(problem): print "Searching..." if (__name__ == '__main__'): problem = Problem(); search(problem)
# Python does not require explicit interfaces, # but I believe that code which does is more # maintainable. Thus I include this explicit # interface for Problems. class Problem: def getStartState(self): return None def getEndState(self): return None def isValidState(self, state): return False def getSuccessors(self, state): return [] def getStringRepr(self, state): return "BadProblem" class SquareProblem(Problem): def __init__(self, size): self.size = size def getStartState(self): return (0, 0) def getEndState(self): return (self.size, self.size) def isValidState(self, state): return 0 <= state[0] <= self.size and 0 <= state[1] <= self.size def getSuccessors(self, state): return [(state[0]+dx, state[1]+dy) for (dx, dy) in [(1, 0), (0, 1), (-1, 0), (0, -1)]] def getStringRepr(self, state): return "(%d, %d)" % state def search(problem): print "Searching..." if (__name__ == '__main__'): problem = SquareProblem(2); search(problem)
Add a simple problem for testing
Add a simple problem for testing
Python
mit
SpexGuy/Scout
# Python does not require explicit interfaces, # but I believe that code which does is more # maintainable. Thus I include this explicit # interface for Problems. class Problem: def getStartState(self): return None def getEndState(self): return None def isValidState(self, state): return False def getSuccessors(self, state): return [] def getStringRepr(self, state): return "BadProblem" class SquareProblem(Problem): def __init__(self, size): self.size = size def getStartState(self): return (0, 0) def getEndState(self): return (self.size, self.size) def isValidState(self, state): return 0 <= state[0] <= self.size and 0 <= state[1] <= self.size def getSuccessors(self, state): return [(state[0]+dx, state[1]+dy) for (dx, dy) in [(1, 0), (0, 1), (-1, 0), (0, -1)]] def getStringRepr(self, state): return "(%d, %d)" % state def search(problem): print "Searching..." if (__name__ == '__main__'): problem = SquareProblem(2); search(problem)
Add a simple problem for testing # Python does not require explicit interfaces, # but I believe that code which does is more # maintainable. Thus I include this explicit # interface for Problems. class Problem: def getStartState(self): return None def getEndState(self): return None def isValidState(self, state): return False def getSuccessors(self, state): return [] def getStringRepr(self, state): return "BadProblem" def search(problem): print "Searching..." if (__name__ == '__main__'): problem = Problem(); search(problem)
21eb7e06f175a08b4d90146d1bfb48670577e59b
bin/analysis/create_static_model.py
bin/analysis/create_static_model.py
# The old seed pipeline import logging import emission.analysis.classification.inference.mode.seed.pipeline as pipeline if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) seed_pipeline = pipeline.ModeInferencePipelineMovesFormat() seed_pipeline.runPipeline()
Check in a simple script to create and save a model based on old-style data
Check in a simple script to create and save a model based on old-style data Since the analysis pipeline has already been defined, this was pretty easy. And it is even tested. Testing done: Ran it, there was a json file created.
Python
bsd-3-clause
shankari/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server
# The old seed pipeline import logging import emission.analysis.classification.inference.mode.seed.pipeline as pipeline if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) seed_pipeline = pipeline.ModeInferencePipelineMovesFormat() seed_pipeline.runPipeline()
Check in a simple script to create and save a model based on old-style data Since the analysis pipeline has already been defined, this was pretty easy. And it is even tested. Testing done: Ran it, there was a json file created.
56ab983036bcb5c78eee91483c1e610da69216d1
kubernetes/client/apis/__init__.py
kubernetes/client/apis/__init__.py
from __future__ import absolute_import import warnings # flake8: noqa # alias kubernetes.client.api package and print deprecation warning from kubernetes.client.api import * warnings.filterwarnings('default', module='kubernetes.client.apis') warnings.warn( "The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).", DeprecationWarning )
Add kubernetes.client.apis as an alias to kubernetes.client.api
Add kubernetes.client.apis as an alias to kubernetes.client.api Reference: https://github.com/kubernetes-client/python/issues/974 Signed-off-by: Nabarun Pal <[email protected]>
Python
apache-2.0
kubernetes-client/python,kubernetes-client/python
from __future__ import absolute_import import warnings # flake8: noqa # alias kubernetes.client.api package and print deprecation warning from kubernetes.client.api import * warnings.filterwarnings('default', module='kubernetes.client.apis') warnings.warn( "The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).", DeprecationWarning )
Add kubernetes.client.apis as an alias to kubernetes.client.api Reference: https://github.com/kubernetes-client/python/issues/974 Signed-off-by: Nabarun Pal <[email protected]>
f2a6897aaa20d2c5a312b1a87d5a7f515f3cdd4b
lightware_parse.py
lightware_parse.py
#!/usr/bin/env python import serial s = serial.Serial('/dev/ttyUSB0', baudrate=115200) while True: line = s.readline() dist = line.lstrip(' ').split(' ')[0] print dist
Add lightware LRF parsing code
Add lightware LRF parsing code
Python
mit
UCSD-E4E/aerial_lidar,UCSD-E4E/aerial_lidar,UCSD-E4E/aerial_lidar,UCSD-E4E/aerial_lidar,UCSD-E4E/aerial_lidar,UCSD-E4E/aerial_lidar
#!/usr/bin/env python import serial s = serial.Serial('/dev/ttyUSB0', baudrate=115200) while True: line = s.readline() dist = line.lstrip(' ').split(' ')[0] print dist
Add lightware LRF parsing code
f80b9f42db599e0416bd4e28f69c81e0fda494d2
todoist/managers/generic.py
todoist/managers/generic.py
# -*- coding: utf-8 -*- class Manager(object): # should be re-defined in a subclass state_name = None object_type = None def __init__(self, api): self.api = api # shortcuts @property def state(self): return self.api.state @property def queue(self): return self.api.queue @property def token(self): return self.api.token class AllMixin(object): def all(self, filt=None): return list(filter(filt, self.state[self.state_name])) class GetByIdMixin(object): def get_by_id(self, obj_id, only_local=False): """ Finds and returns the object based on its id. """ for obj in self.state[self.state_name]: if obj['id'] == obj_id or obj.temp_id == str(obj_id): return obj if not only_local and self.object_type is not None: getter = getattr(eval('self.api.%ss' % self.object_type), 'get') return getter(obj_id) return None class SyncMixin(object): """ Syncs this specific type of objects. """ def sync(self): return self.api.sync()
# -*- coding: utf-8 -*- class Manager(object): # should be re-defined in a subclass state_name = None object_type = None def __init__(self, api): self.api = api # shortcuts @property def state(self): return self.api.state @property def queue(self): return self.api.queue @property def token(self): return self.api.token class AllMixin(object): def all(self, filt=None): return list(filter(filt, self.state[self.state_name])) class GetByIdMixin(object): def get_by_id(self, obj_id, only_local=False): """ Finds and returns the object based on its id. """ for obj in self.state[self.state_name]: if obj['id'] == obj_id or obj.temp_id == str(obj_id): return obj if not only_local and self.object_type is not None: getter = getattr(eval('self.api.%ss' % self.object_type), 'get') data = getter(obj_id) # retrieves from state, otherwise we return the raw data for obj in self.state[self.state_name]: if obj['id'] == obj_id or obj.temp_id == str(obj_id): return obj return data return None class SyncMixin(object): """ Syncs this specific type of objects. """ def sync(self): return self.api.sync()
Fix the case of using `get_by_id` when there's not state
Fix the case of using `get_by_id` when there's not state Previous to this commit, if there's no state we would return raw data, which would break object chaining. Now we check the state one last time before giving up and returning the raw data because it may have been populated by methods like `get_by_id` (instead of sync())
Python
mit
Doist/todoist-python
# -*- coding: utf-8 -*- class Manager(object): # should be re-defined in a subclass state_name = None object_type = None def __init__(self, api): self.api = api # shortcuts @property def state(self): return self.api.state @property def queue(self): return self.api.queue @property def token(self): return self.api.token class AllMixin(object): def all(self, filt=None): return list(filter(filt, self.state[self.state_name])) class GetByIdMixin(object): def get_by_id(self, obj_id, only_local=False): """ Finds and returns the object based on its id. """ for obj in self.state[self.state_name]: if obj['id'] == obj_id or obj.temp_id == str(obj_id): return obj if not only_local and self.object_type is not None: getter = getattr(eval('self.api.%ss' % self.object_type), 'get') data = getter(obj_id) # retrieves from state, otherwise we return the raw data for obj in self.state[self.state_name]: if obj['id'] == obj_id or obj.temp_id == str(obj_id): return obj return data return None class SyncMixin(object): """ Syncs this specific type of objects. """ def sync(self): return self.api.sync()
Fix the case of using `get_by_id` when there's not state Previous to this commit, if there's no state we would return raw data, which would break object chaining. Now we check the state one last time before giving up and returning the raw data because it may have been populated by methods like `get_by_id` (instead of sync()) # -*- coding: utf-8 -*- class Manager(object): # should be re-defined in a subclass state_name = None object_type = None def __init__(self, api): self.api = api # shortcuts @property def state(self): return self.api.state @property def queue(self): return self.api.queue @property def token(self): return self.api.token class AllMixin(object): def all(self, filt=None): return list(filter(filt, self.state[self.state_name])) class GetByIdMixin(object): def get_by_id(self, obj_id, only_local=False): """ Finds and returns the object based on its id. """ for obj in self.state[self.state_name]: if obj['id'] == obj_id or obj.temp_id == str(obj_id): return obj if not only_local and self.object_type is not None: getter = getattr(eval('self.api.%ss' % self.object_type), 'get') return getter(obj_id) return None class SyncMixin(object): """ Syncs this specific type of objects. """ def sync(self): return self.api.sync()
346a6b5cc5426ce38195dd5ce4507894710ee8a7
fix-gpt-ubuntu.py
fix-gpt-ubuntu.py
#!/usr/bin/env python # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import subprocess """ WARNING: This script will remove all partitions in resource disk and create a new one using the entire disk space. """ if __name__ == '__main__': print 'Umnout resource disk...' subprocess.call(['umount', '/dev/sdb1']) print 'Remove old partitions...' subprocess.call(['parted', '/dev/sdb', 'rm', '1']) subprocess.call(['parted', '/dev/sdb', 'rm', '2']) print 'Create new partition using the entire resource disk...' subprocess.call(['parted', '/dev/sdb','mkpart', 'primary', '0%', '100%']) subprocess.call(['mkfs.ext4', '/dev/sdb1']) subprocess.call(['mount', '/dev/sdb1', '/mnt']) print 'Resource disk(/dev/sdb1) is mounted at /mnt'
Add script to fix gpt mounting issue with ubuntu.
Add script to fix gpt mounting issue with ubuntu.
Python
apache-2.0
fieryorc/WALinuxAgent,lizzha/WALinuxAgent,jerickso/WALinuxAgent,SuperScottz/WALinuxAgent,yuezh/WALinuxAgent,thomas1206/WALinuxAgent,AbelHu/WALinuxAgent,thomas1206/WALinuxAgent,karataliu/WALinuxAgent,karataliu/WALinuxAgent,AbelHu/WALinuxAgent,ryanmiao/WALinuxAgent,ryanmiao/WALinuxAgent,SuperScottz/WALinuxAgent,yuezh/WALinuxAgent,jerickso/WALinuxAgent,imikushin/WALinuxAgent,lizzha/WALinuxAgent,imikushin/WALinuxAgent,fieryorc/WALinuxAgent
#!/usr/bin/env python # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import subprocess """ WARNING: This script will remove all partitions in resource disk and create a new one using the entire disk space. """ if __name__ == '__main__': print 'Umnout resource disk...' subprocess.call(['umount', '/dev/sdb1']) print 'Remove old partitions...' subprocess.call(['parted', '/dev/sdb', 'rm', '1']) subprocess.call(['parted', '/dev/sdb', 'rm', '2']) print 'Create new partition using the entire resource disk...' subprocess.call(['parted', '/dev/sdb','mkpart', 'primary', '0%', '100%']) subprocess.call(['mkfs.ext4', '/dev/sdb1']) subprocess.call(['mount', '/dev/sdb1', '/mnt']) print 'Resource disk(/dev/sdb1) is mounted at /mnt'
Add script to fix gpt mounting issue with ubuntu.
6dc4cb5ec0f0e2373d364e93b7d342beaad6dc4b
setup.py
setup.py
# !/usr/bin/env python from setuptools import setup, find_packages setup(name='symbtrsynthesis', version='1.0.1-dev', description='An (adaptive) synthesizer for SymbTr-MusicXML scores', author='Hasan Sercan Atli', url='https://github.com/hsercanatli/symbtrsynthesis', packages=find_packages(), include_package_data=True, install_requires=['numpy'] )
# !/usr/bin/env python from setuptools import setup, find_packages setup(name='symbtrsynthesis', version='1.0.1-dev', description='An (adaptive) synthesizer for SymbTr-MusicXML scores', author='Hasan Sercan Atli', url='https://github.com/hsercanatli/symbtrsynthesis', packages=find_packages(), package_data={'symbtrsynthesis': ['data/*.json']}, include_package_data=True, install_requires=['numpy'] )
Include data files in built package
Include data files in built package
Python
agpl-3.0
hsercanatli/adaptivetuning
# !/usr/bin/env python from setuptools import setup, find_packages setup(name='symbtrsynthesis', version='1.0.1-dev', description='An (adaptive) synthesizer for SymbTr-MusicXML scores', author='Hasan Sercan Atli', url='https://github.com/hsercanatli/symbtrsynthesis', packages=find_packages(), package_data={'symbtrsynthesis': ['data/*.json']}, include_package_data=True, install_requires=['numpy'] )
Include data files in built package # !/usr/bin/env python from setuptools import setup, find_packages setup(name='symbtrsynthesis', version='1.0.1-dev', description='An (adaptive) synthesizer for SymbTr-MusicXML scores', author='Hasan Sercan Atli', url='https://github.com/hsercanatli/symbtrsynthesis', packages=find_packages(), include_package_data=True, install_requires=['numpy'] )
2f308fbefad5f5cee8b6e160e9a89fda7f4e1ba9
tests/test_renderers.py
tests/test_renderers.py
from flask import Flask from flask_webapi import WebAPI, APIView, renderer, route from flask_webapi.renderers import PickleRenderer from unittest import TestCase class TestRenderer(TestCase): def setUp(self): self.app = Flask(__name__) self.api = WebAPI(self.app) self.api.load_module('tests.test_renderers') self.client = self.app.test_client() def test_pickle_renderer(self): response = self.client.post('/add') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'application/pickle') class BasicView(APIView): @route('/add', methods=['POST']) @renderer(PickleRenderer) def add(self): return {}
Add unit tests for pickle renderer
Add unit tests for pickle renderer
Python
mit
viniciuschiele/flask-webapi
from flask import Flask from flask_webapi import WebAPI, APIView, renderer, route from flask_webapi.renderers import PickleRenderer from unittest import TestCase class TestRenderer(TestCase): def setUp(self): self.app = Flask(__name__) self.api = WebAPI(self.app) self.api.load_module('tests.test_renderers') self.client = self.app.test_client() def test_pickle_renderer(self): response = self.client.post('/add') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'application/pickle') class BasicView(APIView): @route('/add', methods=['POST']) @renderer(PickleRenderer) def add(self): return {}
Add unit tests for pickle renderer
56dc9af410907780faba79699d274bef96a18675
functionaltests/common/base.py
functionaltests/common/base.py
""" Copyright 2015 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import tempest_lib.base from functionaltests.common.config import read_config class BaseDesignateTest(tempest_lib.base.BaseTestCase): def __init__(self, *args, **kwargs): super(BaseDesignateTest, self).__init__(*args, **kwargs) @classmethod def setUpClass(cls): super(BaseDesignateTest, cls).setUpClass() read_config()
""" Copyright 2015 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import tempest_lib.base from functionaltests.common.config import read_config class BaseDesignateTest(tempest_lib.base.BaseTestCase): @classmethod def setUpClass(cls): super(BaseDesignateTest, cls).setUpClass() read_config()
Remove unnecessary __init__ from functionaltests
Remove unnecessary __init__ from functionaltests The __init__ just passes the same arguments, so it is not necessary to implement it. This patch removes it for the cleanup. Change-Id: Ib465356c47d06bfc66bef69126b089be24d19474
Python
apache-2.0
openstack/designate,openstack/designate,openstack/designate
""" Copyright 2015 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import tempest_lib.base from functionaltests.common.config import read_config class BaseDesignateTest(tempest_lib.base.BaseTestCase): @classmethod def setUpClass(cls): super(BaseDesignateTest, cls).setUpClass() read_config()
Remove unnecessary __init__ from functionaltests The __init__ just passes the same arguments, so it is not necessary to implement it. This patch removes it for the cleanup. Change-Id: Ib465356c47d06bfc66bef69126b089be24d19474 """ Copyright 2015 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import tempest_lib.base from functionaltests.common.config import read_config class BaseDesignateTest(tempest_lib.base.BaseTestCase): def __init__(self, *args, **kwargs): super(BaseDesignateTest, self).__init__(*args, **kwargs) @classmethod def setUpClass(cls): super(BaseDesignateTest, cls).setUpClass() read_config()
c19c69926e54e8268b7587a91264a976724a8801
setup.py
setup.py
from distutils.core import setup setup( name='scrAPI Utils', version='0.4.7', author='Chris Seto', author_email='[email protected]', packages=['scrapi.linter'], package_data={'scrapi.linter': ['../__init__.py']}, url='http://www.github.com/chrisseto/scrapi', license='LICENSE.txt', description='Package to aid in consumer creation for scrAPI', long_description=open('README.md').read(), )
from distutils.core import setup setup( name='scrAPI Utils', version='0.4.8', author='Chris Seto', author_email='[email protected]', packages=['scrapi.linter'], package_data={'scrapi.linter': ['../__init__.py']}, url='http://www.github.com/chrisseto/scrapi', license='LICENSE.txt', description='Package to aid in consumer creation for scrAPI', long_description=open('README.md').read(), )
Increment version number for latest linter version
Increment version number for latest linter version
Python
apache-2.0
fabianvf/scrapi,mehanig/scrapi,fabianvf/scrapi,mehanig/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi,ostwald/scrapi,jeffreyliu3230/scrapi,icereval/scrapi,felliott/scrapi,erinspace/scrapi,alexgarciac/scrapi,felliott/scrapi,CenterForOpenScience/scrapi
from distutils.core import setup setup( name='scrAPI Utils', version='0.4.8', author='Chris Seto', author_email='[email protected]', packages=['scrapi.linter'], package_data={'scrapi.linter': ['../__init__.py']}, url='http://www.github.com/chrisseto/scrapi', license='LICENSE.txt', description='Package to aid in consumer creation for scrAPI', long_description=open('README.md').read(), )
Increment version number for latest linter version from distutils.core import setup setup( name='scrAPI Utils', version='0.4.7', author='Chris Seto', author_email='[email protected]', packages=['scrapi.linter'], package_data={'scrapi.linter': ['../__init__.py']}, url='http://www.github.com/chrisseto/scrapi', license='LICENSE.txt', description='Package to aid in consumer creation for scrAPI', long_description=open('README.md').read(), )
f61b81e968384859eb51a2ff14ca7709e8322ae8
yunity/walls/models.py
yunity/walls/models.py
from django.db.models import ForeignKey, TextField from config import settings from yunity.base.models import BaseModel class Wall(BaseModel): pass class WallPost(BaseModel): wall = ForeignKey(Wall) author = ForeignKey(settings.AUTH_USER_MODEL) class WallPostContent(BaseModel): post = ForeignKey(WallPost) author = ForeignKey(settings.AUTH_USER_MODEL) body = TextField()
from django.db.models import ForeignKey, TextField from config import settings from yunity.base.models import BaseModel class Wall(BaseModel): def resolve_permissions(self, collector): h = self.hub if h.target_content_type.model == 'group': g = h.target """:type : Group""" collector.add_hub(h, 'read') if g.is_content_included_in_parent: g = g.parent while g: collector.add_hub(g.hub, 'read') g = g.parent class WallPost(BaseModel): wall = ForeignKey(Wall) author = ForeignKey(settings.AUTH_USER_MODEL) class WallPostContent(BaseModel): post = ForeignKey(WallPost) author = ForeignKey(settings.AUTH_USER_MODEL) body = TextField()
Implement basic permissions resolver for walls
Implement basic permissions resolver for walls To be seen as a poc, collect all hub permissions for a basic permission and settings/inheritance model for reading a wall. with @nicksellen
Python
agpl-3.0
yunity/foodsaving-backend,yunity/yunity-core,yunity/foodsaving-backend,yunity/yunity-core,yunity/foodsaving-backend
from django.db.models import ForeignKey, TextField from config import settings from yunity.base.models import BaseModel class Wall(BaseModel): def resolve_permissions(self, collector): h = self.hub if h.target_content_type.model == 'group': g = h.target """:type : Group""" collector.add_hub(h, 'read') if g.is_content_included_in_parent: g = g.parent while g: collector.add_hub(g.hub, 'read') g = g.parent class WallPost(BaseModel): wall = ForeignKey(Wall) author = ForeignKey(settings.AUTH_USER_MODEL) class WallPostContent(BaseModel): post = ForeignKey(WallPost) author = ForeignKey(settings.AUTH_USER_MODEL) body = TextField()
Implement basic permissions resolver for walls To be seen as a poc, collect all hub permissions for a basic permission and settings/inheritance model for reading a wall. with @nicksellen from django.db.models import ForeignKey, TextField from config import settings from yunity.base.models import BaseModel class Wall(BaseModel): pass class WallPost(BaseModel): wall = ForeignKey(Wall) author = ForeignKey(settings.AUTH_USER_MODEL) class WallPostContent(BaseModel): post = ForeignKey(WallPost) author = ForeignKey(settings.AUTH_USER_MODEL) body = TextField()
95e61ccdebc33c1c610d0672558cd00798c3105f
packages/grid/backend/grid/api/users/models.py
packages/grid/backend/grid/api/users/models.py
# stdlib from typing import Optional from typing import Union # third party from nacl.encoding import HexEncoder from nacl.signing import SigningKey from pydantic import BaseModel from pydantic import EmailStr class BaseUser(BaseModel): email: Optional[EmailStr] name: Optional[str] role: Union[Optional[int], Optional[str]] # TODO: Should be int in SyftUser daa_pdf: Optional[bytes] = b"" class Config: orm_mode = True class UserCreate(BaseUser): email: EmailStr role: str = "Data Scientist" name: str password: str class UserUpdate(BaseUser): password: Optional[str] budget: Optional[float] class UserCandidate(BaseUser): email: EmailStr status: str = "pending" name: str class User(BaseUser): id: int role: Union[int, str] # TODO: This should be int. Perhaps add role_name instead? budget_spent: Optional[float] institution: Optional[str] website: Optional[str] added_by: Optional[str] class UserPrivate(User): private_key: str def get_signing_key(self) -> SigningKey: return SigningKey(self.private_key.encode(), encoder=HexEncoder) class UserSyft(User): hashed_password: str salt: str verify_key: str
# stdlib from typing import Optional from typing import Union # third party from nacl.encoding import HexEncoder from nacl.signing import SigningKey from pydantic import BaseModel from pydantic import EmailStr class BaseUser(BaseModel): email: Optional[EmailStr] name: Optional[str] role: Union[Optional[int], Optional[str]] # TODO: Should be int in SyftUser daa_pdf: Optional[bytes] = b"" class Config: orm_mode = True class UserCreate(BaseUser): email: EmailStr role: str = "Data Scientist" name: str password: str institution: Optional[str] website: Optional[str] class ApplicantStatus(BaseModel): status: str class UserUpdate(BaseUser): password: Optional[str] budget: Optional[float] class UserCandidate(BaseUser): email: EmailStr status: str = "pending" name: str class User(BaseUser): id: int role: Union[int, str] # TODO: This should be int. Perhaps add role_name instead? budget_spent: Optional[float] institution: Optional[str] website: Optional[str] added_by: Optional[str] class UserPrivate(User): private_key: str def get_signing_key(self) -> SigningKey: return SigningKey(self.private_key.encode(), encoder=HexEncoder) class UserSyft(User): hashed_password: str salt: str verify_key: str
ADD institution / website as optional fields during user creation
ADD institution / website as optional fields during user creation
Python
apache-2.0
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
# stdlib from typing import Optional from typing import Union # third party from nacl.encoding import HexEncoder from nacl.signing import SigningKey from pydantic import BaseModel from pydantic import EmailStr class BaseUser(BaseModel): email: Optional[EmailStr] name: Optional[str] role: Union[Optional[int], Optional[str]] # TODO: Should be int in SyftUser daa_pdf: Optional[bytes] = b"" class Config: orm_mode = True class UserCreate(BaseUser): email: EmailStr role: str = "Data Scientist" name: str password: str institution: Optional[str] website: Optional[str] class ApplicantStatus(BaseModel): status: str class UserUpdate(BaseUser): password: Optional[str] budget: Optional[float] class UserCandidate(BaseUser): email: EmailStr status: str = "pending" name: str class User(BaseUser): id: int role: Union[int, str] # TODO: This should be int. Perhaps add role_name instead? budget_spent: Optional[float] institution: Optional[str] website: Optional[str] added_by: Optional[str] class UserPrivate(User): private_key: str def get_signing_key(self) -> SigningKey: return SigningKey(self.private_key.encode(), encoder=HexEncoder) class UserSyft(User): hashed_password: str salt: str verify_key: str
ADD institution / website as optional fields during user creation # stdlib from typing import Optional from typing import Union # third party from nacl.encoding import HexEncoder from nacl.signing import SigningKey from pydantic import BaseModel from pydantic import EmailStr class BaseUser(BaseModel): email: Optional[EmailStr] name: Optional[str] role: Union[Optional[int], Optional[str]] # TODO: Should be int in SyftUser daa_pdf: Optional[bytes] = b"" class Config: orm_mode = True class UserCreate(BaseUser): email: EmailStr role: str = "Data Scientist" name: str password: str class UserUpdate(BaseUser): password: Optional[str] budget: Optional[float] class UserCandidate(BaseUser): email: EmailStr status: str = "pending" name: str class User(BaseUser): id: int role: Union[int, str] # TODO: This should be int. Perhaps add role_name instead? budget_spent: Optional[float] institution: Optional[str] website: Optional[str] added_by: Optional[str] class UserPrivate(User): private_key: str def get_signing_key(self) -> SigningKey: return SigningKey(self.private_key.encode(), encoder=HexEncoder) class UserSyft(User): hashed_password: str salt: str verify_key: str
19f09bb432a9e2232f0c23743d75315bb2ad2295
cfgov/sheerlike/external_links.py
cfgov/sheerlike/external_links.py
import warnings from bs4 import BeautifulSoup from v1 import parse_links def process_external_links(doc): warnings.filterwarnings('ignore') for key, value in doc.iteritems(): doc[key] = _process_data(value) warnings.resetwarnings() return doc def _process_data(field): if isinstance(field, basestring): soup = BeautifulSoup(field, 'html.parser') field = parse_links(soup).encode(formatter="html") elif isinstance(field, list): for i, value in enumerate(field): field[i] = _process_data(value) elif isinstance(field, dict): for key, value in field.iteritems(): field[key] = _process_data(value) return field
import warnings from bs4 import BeautifulSoup from v1 import parse_links def process_external_links(doc): warnings.filterwarnings('ignore') for key, value in doc.iteritems(): doc[key] = _process_data(value) warnings.resetwarnings() return doc def _process_data(field): if isinstance(field, basestring): soup = BeautifulSoup(field, 'html.parser') field = parse_links(soup).encode(formatter=None) elif isinstance(field, list): for i, value in enumerate(field): field[i] = _process_data(value) elif isinstance(field, dict): for key, value in field.iteritems(): field[key] = _process_data(value) return field
Remove output formatting to get back what was put in
Remove output formatting to get back what was put in
Python
cc0-1.0
kave/cfgov-refresh,kave/cfgov-refresh,kave/cfgov-refresh,kave/cfgov-refresh
import warnings from bs4 import BeautifulSoup from v1 import parse_links def process_external_links(doc): warnings.filterwarnings('ignore') for key, value in doc.iteritems(): doc[key] = _process_data(value) warnings.resetwarnings() return doc def _process_data(field): if isinstance(field, basestring): soup = BeautifulSoup(field, 'html.parser') field = parse_links(soup).encode(formatter=None) elif isinstance(field, list): for i, value in enumerate(field): field[i] = _process_data(value) elif isinstance(field, dict): for key, value in field.iteritems(): field[key] = _process_data(value) return field
Remove output formatting to get back what was put in import warnings from bs4 import BeautifulSoup from v1 import parse_links def process_external_links(doc): warnings.filterwarnings('ignore') for key, value in doc.iteritems(): doc[key] = _process_data(value) warnings.resetwarnings() return doc def _process_data(field): if isinstance(field, basestring): soup = BeautifulSoup(field, 'html.parser') field = parse_links(soup).encode(formatter="html") elif isinstance(field, list): for i, value in enumerate(field): field[i] = _process_data(value) elif isinstance(field, dict): for key, value in field.iteritems(): field[key] = _process_data(value) return field
0137d5440f86a8f1424598beea4468ae8c68f985
demos/dlgr/demos/iterated_drawing/models.py
demos/dlgr/demos/iterated_drawing/models.py
from dallinger.nodes import Source import random import base64 import os import json class DrawingSource(Source): """A Source that reads in a random image from a file and transmits it.""" __mapper_args__ = { "polymorphic_identity": "drawing_source" } def _contents(self): """Define the contents of new Infos. transmit() -> _what() -> create_information() -> _contents(). """ images = [ "owl.png", ] image = random.choice(images) image_path = os.path.join("static", "stimuli", image) uri_encoded_image = ( b"data:image/png;base64," + base64.b64encode(open(image_path, "rb").read()) ) return json.dumps({ "image": uri_encoded_image.decode('utf-8'), "sketch": u"" })
from dallinger.nodes import Source import random import base64 import os import json class DrawingSource(Source): """A Source that reads in a random image from a file and transmits it.""" __mapper_args__ = { "polymorphic_identity": "drawing_source" } def _contents(self): """Define the contents of new Infos. transmit() -> _what() -> create_information() -> _contents(). """ images = [ "owl.png", ] # We're selecting from a list of only one item here, but it's a useful # technique to demonstrate: image = random.choice(images) image_path = os.path.join("static", "stimuli", image) uri_encoded_image = ( b"data:image/png;base64," + base64.b64encode(open(image_path, "rb").read()) ) return json.dumps({ "image": uri_encoded_image.decode('utf-8'), "sketch": u"" })
Comment explaining random.choice() on 1-item list
Comment explaining random.choice() on 1-item list
Python
mit
Dallinger/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger
from dallinger.nodes import Source import random import base64 import os import json class DrawingSource(Source): """A Source that reads in a random image from a file and transmits it.""" __mapper_args__ = { "polymorphic_identity": "drawing_source" } def _contents(self): """Define the contents of new Infos. transmit() -> _what() -> create_information() -> _contents(). """ images = [ "owl.png", ] # We're selecting from a list of only one item here, but it's a useful # technique to demonstrate: image = random.choice(images) image_path = os.path.join("static", "stimuli", image) uri_encoded_image = ( b"data:image/png;base64," + base64.b64encode(open(image_path, "rb").read()) ) return json.dumps({ "image": uri_encoded_image.decode('utf-8'), "sketch": u"" })
Comment explaining random.choice() on 1-item list from dallinger.nodes import Source import random import base64 import os import json class DrawingSource(Source): """A Source that reads in a random image from a file and transmits it.""" __mapper_args__ = { "polymorphic_identity": "drawing_source" } def _contents(self): """Define the contents of new Infos. transmit() -> _what() -> create_information() -> _contents(). """ images = [ "owl.png", ] image = random.choice(images) image_path = os.path.join("static", "stimuli", image) uri_encoded_image = ( b"data:image/png;base64," + base64.b64encode(open(image_path, "rb").read()) ) return json.dumps({ "image": uri_encoded_image.decode('utf-8'), "sketch": u"" })
3b6eaabe93a92782a1a5198ae4b03fa5e501a770
agir/activity/serializers.py
agir/activity/serializers.py
from rest_framework import serializers from agir.events.serializers import EventSerializer from agir.groups.serializers import SupportGroupSerializer from agir.lib.serializers import FlexibleFieldsMixin from agir.people.serializers import PersonSerializer class ActivitySerializer(FlexibleFieldsMixin, serializers.Serializer): id = serializers.CharField() type = serializers.CharField() subtype = serializers.CharField(source="type") timestamp = serializers.DateTimeField() event = EventSerializer( fields=[ "id", "name", "startTime", "endTime", "participantCount", "illustration", "schedule", "location", "rsvp", "routes", ] ) supportGroup = SupportGroupSerializer(source="supportgroup", fields=["name", "url"]) individual = PersonSerializer(fields=["fullName"]) status = serializers.CharField()
from rest_framework import serializers from agir.events.serializers import EventSerializer from agir.groups.serializers import SupportGroupSerializer from agir.lib.serializers import FlexibleFieldsMixin from agir.people.serializers import PersonSerializer class ActivitySerializer(FlexibleFieldsMixin, serializers.Serializer): id = serializers.CharField() type = serializers.CharField() subtype = serializers.CharField(source="type") timestamp = serializers.DateTimeField() event = EventSerializer( fields=[ "id", "name", "startTime", "endTime", "participantCount", "illustration", "schedule", "location", "rsvp", "routes", ] ) supportGroup = SupportGroupSerializer(source="supportgroup", fields=["name", "url"]) individual = PersonSerializer(fields=["fullName", "email"]) status = serializers.CharField()
Add individual email field to activity serializer
Add individual email field to activity serializer
Python
agpl-3.0
lafranceinsoumise/api-django,lafranceinsoumise/api-django,lafranceinsoumise/api-django,lafranceinsoumise/api-django
from rest_framework import serializers from agir.events.serializers import EventSerializer from agir.groups.serializers import SupportGroupSerializer from agir.lib.serializers import FlexibleFieldsMixin from agir.people.serializers import PersonSerializer class ActivitySerializer(FlexibleFieldsMixin, serializers.Serializer): id = serializers.CharField() type = serializers.CharField() subtype = serializers.CharField(source="type") timestamp = serializers.DateTimeField() event = EventSerializer( fields=[ "id", "name", "startTime", "endTime", "participantCount", "illustration", "schedule", "location", "rsvp", "routes", ] ) supportGroup = SupportGroupSerializer(source="supportgroup", fields=["name", "url"]) individual = PersonSerializer(fields=["fullName", "email"]) status = serializers.CharField()
Add individual email field to activity serializer from rest_framework import serializers from agir.events.serializers import EventSerializer from agir.groups.serializers import SupportGroupSerializer from agir.lib.serializers import FlexibleFieldsMixin from agir.people.serializers import PersonSerializer class ActivitySerializer(FlexibleFieldsMixin, serializers.Serializer): id = serializers.CharField() type = serializers.CharField() subtype = serializers.CharField(source="type") timestamp = serializers.DateTimeField() event = EventSerializer( fields=[ "id", "name", "startTime", "endTime", "participantCount", "illustration", "schedule", "location", "rsvp", "routes", ] ) supportGroup = SupportGroupSerializer(source="supportgroup", fields=["name", "url"]) individual = PersonSerializer(fields=["fullName"]) status = serializers.CharField()
098044c80fff2ff639da088f87f7fc6952813fc1
txircd/channel.py
txircd/channel.py
from txircd.utils import CaseInsensitiveDictionary, now() class IRCChannel(object): def __init__(self, ircd, name): self.ircd = ircd self.name = name self.created = now() self.topic = "" self.topicSetter = "" self.topicTime = now() self.mode = {} self.users = CaseInsensitiveDictionary() self.metadata = {} self.cache = {} def modeString(self, user): modes = "+" params = [] for mode, param in self.mode.iteritems(): modetype = self.ircd.channel_mode_type[mode] if modetype > 0: modes += mode if param: params.append(self.ircd.channel_modes[modetype][mode].showParam(user, param)) return ("{} {}".format(modes, " ".join(params)) if params else modes) def setTopic(self, topic, setter): self.topic = topic self.topicSetter = setter self.topicTime = now() def getMetadata(self, key): if key in self.metadata: return self.metadata[key] return ""
from txircd.utils import CaseInsensitiveDictionary, now() class IRCChannel(object): def __init__(self, ircd, name): self.ircd = ircd self.name = name self.created = now() self.topic = "" self.topicSetter = "" self.topicTime = now() self.mode = {} self.users = CaseInsensitiveDictionary() self.metadata = {} self.cache = {} def modeString(self, user): modes = [] # Since we're appending characters to this string, it's more efficient to store the array of characters and join it rather than keep making new strings params = [] for mode, param in self.mode.iteritems(): modetype = self.ircd.channel_mode_type[mode] if modetype > 0: modes.append(mode) if param: params.append(self.ircd.channel_modes[modetype][mode].showParam(user, param)) return ("+{} {}".format("".join(modes), " ".join(params)) if params else "".join(modes)) def setTopic(self, topic, setter): self.topic = topic self.topicSetter = setter self.topicTime = now() def getMetadata(self, key): if key in self.metadata: return self.metadata[key] return ""
Change how mode strings are constructed
Change how mode strings are constructed
Python
bsd-3-clause
DesertBus/txircd,Heufneutje/txircd,ElementalAlchemist/txircd
from txircd.utils import CaseInsensitiveDictionary, now() class IRCChannel(object): def __init__(self, ircd, name): self.ircd = ircd self.name = name self.created = now() self.topic = "" self.topicSetter = "" self.topicTime = now() self.mode = {} self.users = CaseInsensitiveDictionary() self.metadata = {} self.cache = {} def modeString(self, user): modes = [] # Since we're appending characters to this string, it's more efficient to store the array of characters and join it rather than keep making new strings params = [] for mode, param in self.mode.iteritems(): modetype = self.ircd.channel_mode_type[mode] if modetype > 0: modes.append(mode) if param: params.append(self.ircd.channel_modes[modetype][mode].showParam(user, param)) return ("+{} {}".format("".join(modes), " ".join(params)) if params else "".join(modes)) def setTopic(self, topic, setter): self.topic = topic self.topicSetter = setter self.topicTime = now() def getMetadata(self, key): if key in self.metadata: return self.metadata[key] return ""
Change how mode strings are constructed from txircd.utils import CaseInsensitiveDictionary, now() class IRCChannel(object): def __init__(self, ircd, name): self.ircd = ircd self.name = name self.created = now() self.topic = "" self.topicSetter = "" self.topicTime = now() self.mode = {} self.users = CaseInsensitiveDictionary() self.metadata = {} self.cache = {} def modeString(self, user): modes = "+" params = [] for mode, param in self.mode.iteritems(): modetype = self.ircd.channel_mode_type[mode] if modetype > 0: modes += mode if param: params.append(self.ircd.channel_modes[modetype][mode].showParam(user, param)) return ("{} {}".format(modes, " ".join(params)) if params else modes) def setTopic(self, topic, setter): self.topic = topic self.topicSetter = setter self.topicTime = now() def getMetadata(self, key): if key in self.metadata: return self.metadata[key] return ""
5e8218a2fb5b0c63df4394e299ad75fec2494b29
setup.py
setup.py
import os from setuptools import setup from withtool import __version__ def read(fname): path = os.path.join(os.path.dirname(__file__), fname) with open(path, encoding='utf-8') as f: return f.read() setup( name='with', version=__version__, description='A shell context manager', long_description=read('README.rst'), author='Renan Ivo', author_email='[email protected]', url='https://github.com/renanivo/with', keywords='context manager shell command line repl', scripts=['bin/with'], install_requires=[ 'appdirs==1.4.3', 'docopt==0.6.2', 'prompt-toolkit==1.0', 'python-slugify==1.2.1', ], packages=['withtool'], classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ] )
import os from setuptools import setup from withtool import __version__ def read(fname): path = os.path.join(os.path.dirname(__file__), fname) with open(path, encoding='utf-8') as f: return f.read() setup( name='with', version=__version__, description='A shell context manager', long_description=read('README.rst'), author='Renan Ivo', author_email='[email protected]', url='https://github.com/renanivo/with', keywords='context manager shell command line repl', scripts=['bin/with'], install_requires=[ 'appdirs==1.4.3', 'docopt==0.6.2', 'prompt-toolkit==1.0', 'python-slugify==1.2.2', ], packages=['withtool'], classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ] )
Upgrade dependency python-slugify to ==1.2.2
Upgrade dependency python-slugify to ==1.2.2
Python
mit
renanivo/with
import os from setuptools import setup from withtool import __version__ def read(fname): path = os.path.join(os.path.dirname(__file__), fname) with open(path, encoding='utf-8') as f: return f.read() setup( name='with', version=__version__, description='A shell context manager', long_description=read('README.rst'), author='Renan Ivo', author_email='[email protected]', url='https://github.com/renanivo/with', keywords='context manager shell command line repl', scripts=['bin/with'], install_requires=[ 'appdirs==1.4.3', 'docopt==0.6.2', 'prompt-toolkit==1.0', 'python-slugify==1.2.2', ], packages=['withtool'], classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ] )
Upgrade dependency python-slugify to ==1.2.2 import os from setuptools import setup from withtool import __version__ def read(fname): path = os.path.join(os.path.dirname(__file__), fname) with open(path, encoding='utf-8') as f: return f.read() setup( name='with', version=__version__, description='A shell context manager', long_description=read('README.rst'), author='Renan Ivo', author_email='[email protected]', url='https://github.com/renanivo/with', keywords='context manager shell command line repl', scripts=['bin/with'], install_requires=[ 'appdirs==1.4.3', 'docopt==0.6.2', 'prompt-toolkit==1.0', 'python-slugify==1.2.1', ], packages=['withtool'], classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ] )
99ab527550b91d17342ef3112e35f3cdb1be9867
src/binsearch.py
src/binsearch.py
""" Binary search """ def binary_search0(xs, x): """ Perform binary search for a specific value in the given sorted list :param xs: a sorted list :param x: the target value :return: an index if the value was found, or None if not """ lft, rgt = 0, len(xs) - 1 while lft <= rgt: mid = (lft + rgt) // 2 if xs[mid] == x: return mid elif x < xs[mid]: rgt = mid - 1 elif x > xs[mid]: lft = mid + 1 return None def binary_search1(xs, x): """ Perform binary search for a specific value in the given sorted list :param xs: a sorted list :param x: the target value :return: an index if the value was found, or None if not """ lft, rgt = 0, len(xs) while lft < rgt: mid = (lft + rgt) // 2 if xs[mid] == x: return mid elif x < xs[mid]: rgt = mid elif x > xs[mid]: lft = mid + 1 return None
""" Binary search """ def binary_search0(xs, x): """ Perform binary search for a specific value in the given sorted list :param xs: a sorted list :param x: the target value :return: an index if the value was found, or None if not """ lft, rgt = 0, len(xs) - 1 while lft <= rgt: mid = (lft + rgt) // 2 if xs[mid] == x: return mid if xs[mid] < x: lft = mid + 1 else: rgt = mid - 1 return None def binary_search1(xs, x): """ Perform binary search for a specific value in the given sorted list :param xs: a sorted list :param x: the target value :return: an index if the value was found, or None if not """ lft, rgt = 0, len(xs) while lft < rgt: mid = (lft + rgt) // 2 if xs[mid] == x: return mid if xs[mid] < x: lft = mid + 1 else: rgt = mid return None
Fix the "no else after return" lint
Fix the "no else after return" lint
Python
mit
all3fox/algos-py
""" Binary search """ def binary_search0(xs, x): """ Perform binary search for a specific value in the given sorted list :param xs: a sorted list :param x: the target value :return: an index if the value was found, or None if not """ lft, rgt = 0, len(xs) - 1 while lft <= rgt: mid = (lft + rgt) // 2 if xs[mid] == x: return mid if xs[mid] < x: lft = mid + 1 else: rgt = mid - 1 return None def binary_search1(xs, x): """ Perform binary search for a specific value in the given sorted list :param xs: a sorted list :param x: the target value :return: an index if the value was found, or None if not """ lft, rgt = 0, len(xs) while lft < rgt: mid = (lft + rgt) // 2 if xs[mid] == x: return mid if xs[mid] < x: lft = mid + 1 else: rgt = mid return None
Fix the "no else after return" lint """ Binary search """ def binary_search0(xs, x): """ Perform binary search for a specific value in the given sorted list :param xs: a sorted list :param x: the target value :return: an index if the value was found, or None if not """ lft, rgt = 0, len(xs) - 1 while lft <= rgt: mid = (lft + rgt) // 2 if xs[mid] == x: return mid elif x < xs[mid]: rgt = mid - 1 elif x > xs[mid]: lft = mid + 1 return None def binary_search1(xs, x): """ Perform binary search for a specific value in the given sorted list :param xs: a sorted list :param x: the target value :return: an index if the value was found, or None if not """ lft, rgt = 0, len(xs) while lft < rgt: mid = (lft + rgt) // 2 if xs[mid] == x: return mid elif x < xs[mid]: rgt = mid elif x > xs[mid]: lft = mid + 1 return None
effc09edd607d7975a01b3652b4932e40fb0f7f9
bin/combine-examples.py
bin/combine-examples.py
#!/usr/bin/python import re import sys def main(argv): examples = {} requires = set() for filename in argv[1:]: lines = open(filename).readlines() if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'): continue requires.update(line for line in lines if line.startswith('goog.require')) examples[filename] = [line for line in lines if not line.startswith('goog.require')] for require in sorted(requires): print require, for filename in sorted(examples.keys()): print '// ', filename print '(function(){' for line in examples[filename]: print line, print '})();' if __name__ == '__main__': sys.exit(main(sys.argv))
Add script to combine examples
Add script to combine examples
Python
bsd-2-clause
kkuunnddaannkk/ol3,landonb/ol3,pmlrsg/ol3,tamarmot/ol3,elemoine/ol3,alexbrault/ol3,Distem/ol3,t27/ol3,Andrey-Pavlov/ol3,bogdanvaduva/ol3,fblackburn/ol3,jacmendt/ol3,bogdanvaduva/ol3,ahocevar/ol3,Distem/ol3,thhomas/ol3,jmiller-boundless/ol3,klokantech/ol3raster,geekdenz/openlayers,stweil/openlayers,antonio83moura/ol3,alexbrault/ol3,jmiller-boundless/ol3,Morgul/ol3,ahocevar/openlayers,alvinlindstam/ol3,Antreasgr/ol3,fredj/ol3,gingerik/ol3,wlerner/ol3,xiaoqqchen/ol3,klokantech/ol3raster,adube/ol3,geekdenz/ol3,oterral/ol3,Morgul/ol3,geekdenz/openlayers,fblackburn/ol3,landonb/ol3,mzur/ol3,NOAA-ORR-ERD/ol3,stweil/openlayers,ahocevar/openlayers,geonux/ol3,mechdrew/ol3,fredj/ol3,ahocevar/openlayers,freylis/ol3,itayod/ol3,gingerik/ol3,fperucic/ol3,thomasmoelhave/ol3,pmlrsg/ol3,itayod/ol3,Andrey-Pavlov/ol3,CandoImage/ol3,tsauerwein/ol3,wlerner/ol3,Andrey-Pavlov/ol3,openlayers/openlayers,gingerik/ol3,elemoine/ol3,stweil/ol3,fredj/ol3,bartvde/ol3,NOAA-ORR-ERD/ol3,planetlabs/ol3,kkuunnddaannkk/ol3,bartvde/ol3,klokantech/ol3,geonux/ol3,jacmendt/ol3,thhomas/ol3,ahocevar/ol3,bogdanvaduva/ol3,bogdanvaduva/ol3,epointal/ol3,geekdenz/ol3,thomasmoelhave/ol3,thomasmoelhave/ol3,thomasmoelhave/ol3,freylis/ol3,richstoner/ol3,adube/ol3,tschaub/ol3,kjelderg/ol3,planetlabs/ol3,llambanna/ol3,kjelderg/ol3,das-peter/ol3,jacmendt/ol3,fblackburn/ol3,mechdrew/ol3,elemoine/ol3,hafenr/ol3,klokantech/ol3,epointal/ol3,tschaub/ol3,xiaoqqchen/ol3,bill-chadwick/ol3,tsauerwein/ol3,aisaacs/ol3,ahocevar/ol3,fperucic/ol3,stweil/ol3,tsauerwein/ol3,aisaacs/ol3,kjelderg/ol3,planetlabs/ol3,wlerner/ol3,fblackburn/ol3,richstoner/ol3,jmiller-boundless/ol3,freylis/ol3,jacmendt/ol3,klokantech/ol3raster,alexbrault/ol3,tamarmot/ol3,pmlrsg/ol3,mzur/ol3,adube/ol3,Antreasgr/ol3,ahocevar/ol3,mzur/ol3,bjornharrtell/ol3,llambanna/ol3,kjelderg/ol3,t27/ol3,klokantech/ol3,itayod/ol3,tamarmot/ol3,t27/ol3,hafenr/ol3,alvinlindstam/ol3,pmlrsg/ol3,jmiller-boundless/ol3,yinxiaomei/ol3,epointal/ol3,alvinlindstam/ol3,alvinlindstam/ol3,xiaoqqchen/ol3,das-peter/ol3,klokantech/ol3raster,richstoner/ol3,yinxiaomei/ol3,kkuunnddaannkk/ol3,alexbrault/ol3,stweil/ol3,antonio83moura/ol3,elemoine/ol3,richstoner/ol3,Andrey-Pavlov/ol3,thhomas/ol3,wlerner/ol3,aisaacs/ol3,oterral/ol3,geekdenz/ol3,t27/ol3,mechdrew/ol3,tschaub/ol3,bjornharrtell/ol3,epointal/ol3,fperucic/ol3,denilsonsa/ol3,kkuunnddaannkk/ol3,Morgul/ol3,Antreasgr/ol3,Distem/ol3,gingerik/ol3,geekdenz/ol3,fperucic/ol3,Distem/ol3,aisaacs/ol3,stweil/openlayers,bill-chadwick/ol3,landonb/ol3,denilsonsa/ol3,bjornharrtell/ol3,openlayers/openlayers,oterral/ol3,bartvde/ol3,mechdrew/ol3,CandoImage/ol3,CandoImage/ol3,hafenr/ol3,denilsonsa/ol3,das-peter/ol3,fredj/ol3,xiaoqqchen/ol3,planetlabs/ol3,llambanna/ol3,antonio83moura/ol3,tsauerwein/ol3,denilsonsa/ol3,tamarmot/ol3,Morgul/ol3,NOAA-ORR-ERD/ol3,klokantech/ol3,thhomas/ol3,freylis/ol3,bartvde/ol3,geonux/ol3,jmiller-boundless/ol3,stweil/ol3,tschaub/ol3,yinxiaomei/ol3,yinxiaomei/ol3,llambanna/ol3,geekdenz/openlayers,hafenr/ol3,antonio83moura/ol3,mzur/ol3,Antreasgr/ol3,NOAA-ORR-ERD/ol3,bill-chadwick/ol3,CandoImage/ol3,geonux/ol3,das-peter/ol3,bill-chadwick/ol3,openlayers/openlayers,landonb/ol3,itayod/ol3
#!/usr/bin/python import re import sys def main(argv): examples = {} requires = set() for filename in argv[1:]: lines = open(filename).readlines() if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'): continue requires.update(line for line in lines if line.startswith('goog.require')) examples[filename] = [line for line in lines if not line.startswith('goog.require')] for require in sorted(requires): print require, for filename in sorted(examples.keys()): print '// ', filename print '(function(){' for line in examples[filename]: print line, print '})();' if __name__ == '__main__': sys.exit(main(sys.argv))
Add script to combine examples
fa09d3b526bdf04dcabda603ef1e0adac8ae68bd
setup.py
setup.py
from setuptools import setup setup( name='python-binary-memcached', version='0.24.6', author='Jayson Reis', author_email='[email protected]', description='A pure python module to access memcached via it\'s binary protocol with SASL auth support', url='https://github.com/jaysonsantos/python-binary-memcached', packages=['bmemcached'], classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', ], install_requires=[ 'six' ] )
from setuptools import setup setup( name='python-binary-memcached', version='0.24.6', author='Jayson Reis', author_email='[email protected]', description='A pure python module to access memcached via its binary protocol with SASL auth support', url='https://github.com/jaysonsantos/python-binary-memcached', packages=['bmemcached'], classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', ], install_requires=[ 'six' ] )
Fix a typo in description: it's => its
Fix a typo in description: it's => its
Python
mit
jaysonsantos/python-binary-memcached,jaysonsantos/python-binary-memcached
from setuptools import setup setup( name='python-binary-memcached', version='0.24.6', author='Jayson Reis', author_email='[email protected]', description='A pure python module to access memcached via its binary protocol with SASL auth support', url='https://github.com/jaysonsantos/python-binary-memcached', packages=['bmemcached'], classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', ], install_requires=[ 'six' ] )
Fix a typo in description: it's => its from setuptools import setup setup( name='python-binary-memcached', version='0.24.6', author='Jayson Reis', author_email='[email protected]', description='A pure python module to access memcached via it\'s binary protocol with SASL auth support', url='https://github.com/jaysonsantos/python-binary-memcached', packages=['bmemcached'], classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', ], install_requires=[ 'six' ] )
d0a2f82686158f6610ec5f57f586598be7569c6d
students/psbriant/final_project/clean_data.py
students/psbriant/final_project/clean_data.py
""" Name: Paul Briant Date: 12/11/16 Class: Introduction to Python Assignment: Final Project Description: Code for Final Project """ import pandas from datetime import datetime # Change source to smaller file. data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv") print(data["Date Text"].head()) first_date = data["Date Text"].values[0] # print(first_date) # datetime.strptime(first_date, "%Y-%m-%d") # datetime(2012, 3, 10, 0, 0) data.date = data.date.apply(lambda d: datetime.strptime(d, "%Y-%m-%d")) # print(data.date.head()) data.index = data.date # print(data) # print(data.ix[datetime(2012, 8, 19)]) # Remove date column data = data.drop(["date"], axis=1) # print(data.columns) # Determine what values are missing empty = data.apply(lambda col: pandas.isnull(col))
""" Name: Paul Briant Date: 12/11/16 Class: Introduction to Python Assignment: Final Project Description: Code for Final Project """ import pandas from datetime import datetime # Change source to smaller file. data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv") print(data["Date Text"].head()) first_date = data["Date Text"].values[0] # print(first_date) # datetime.strptime(first_date, "%Y-%m-%d") # datetime(2012, 3, 10, 0, 0) # data.date = data.date.apply(lambda d: datetime.strptime(d, "%Y-%m-%d")) # print(data.date.head()) # data.index = data.date # print(data) # print(data.ix[datetime(2012, 8, 19)]) # Remove date column # data = data.drop(["date"], axis=1) # print(data.columns) # Determine what values are missing # empty = data.apply(lambda col: pandas.isnull(col))
Comment out code to test first_date variable.
Comment out code to test first_date variable.
Python
unlicense
UWPCE-PythonCert/IntroPython2016,weidnem/IntroPython2016,weidnem/IntroPython2016,UWPCE-PythonCert/IntroPython2016,UWPCE-PythonCert/IntroPython2016,weidnem/IntroPython2016
""" Name: Paul Briant Date: 12/11/16 Class: Introduction to Python Assignment: Final Project Description: Code for Final Project """ import pandas from datetime import datetime # Change source to smaller file. data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv") print(data["Date Text"].head()) first_date = data["Date Text"].values[0] # print(first_date) # datetime.strptime(first_date, "%Y-%m-%d") # datetime(2012, 3, 10, 0, 0) # data.date = data.date.apply(lambda d: datetime.strptime(d, "%Y-%m-%d")) # print(data.date.head()) # data.index = data.date # print(data) # print(data.ix[datetime(2012, 8, 19)]) # Remove date column # data = data.drop(["date"], axis=1) # print(data.columns) # Determine what values are missing # empty = data.apply(lambda col: pandas.isnull(col))
Comment out code to test first_date variable. """ Name: Paul Briant Date: 12/11/16 Class: Introduction to Python Assignment: Final Project Description: Code for Final Project """ import pandas from datetime import datetime # Change source to smaller file. data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv") print(data["Date Text"].head()) first_date = data["Date Text"].values[0] # print(first_date) # datetime.strptime(first_date, "%Y-%m-%d") # datetime(2012, 3, 10, 0, 0) data.date = data.date.apply(lambda d: datetime.strptime(d, "%Y-%m-%d")) # print(data.date.head()) data.index = data.date # print(data) # print(data.ix[datetime(2012, 8, 19)]) # Remove date column data = data.drop(["date"], axis=1) # print(data.columns) # Determine what values are missing empty = data.apply(lambda col: pandas.isnull(col))
ff3e0eb9d38d2cbed1fab7b67a374915bf65b8f5
engine/logger.py
engine/logger.py
# # dp for Tornado # YoungYong Park ([email protected]) # 2014.10.23 # from .singleton import Singleton class Logger(object, metaclass=Singleton): def exception(self, e=None): pass
# # dp for Tornado # YoungYong Park ([email protected]) # 2014.10.23 # import logging from .singleton import Singleton class Logger(object, metaclass=Singleton): def exception(self, msg, *args, **kwargs): logging.exception(msg, *args, **kwargs) def error(self, msg, *args, **kwargs): logging.error(msg, *args, **kwargs) def info(self, msg, *args, **kwargs): logging.info(msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): logging.warning(msg, *args, **kwargs) def debug(self, msg, *args, **kwargs): logging.debug(msg, *args, **kwargs)
Add logging helper. (exception, error, warning, info, debug)
Add logging helper. (exception, error, warning, info, debug)
Python
mit
why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado
# # dp for Tornado # YoungYong Park ([email protected]) # 2014.10.23 # import logging from .singleton import Singleton class Logger(object, metaclass=Singleton): def exception(self, msg, *args, **kwargs): logging.exception(msg, *args, **kwargs) def error(self, msg, *args, **kwargs): logging.error(msg, *args, **kwargs) def info(self, msg, *args, **kwargs): logging.info(msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): logging.warning(msg, *args, **kwargs) def debug(self, msg, *args, **kwargs): logging.debug(msg, *args, **kwargs)
Add logging helper. (exception, error, warning, info, debug) # # dp for Tornado # YoungYong Park ([email protected]) # 2014.10.23 # from .singleton import Singleton class Logger(object, metaclass=Singleton): def exception(self, e=None): pass
f623775309c75cd0742b03df4ff4759efee4470d
Code/Python/Kamaelia/Test/Internet/test_MulticastTransceiverSystem.py
Code/Python/Kamaelia/Test/Internet/test_MulticastTransceiverSystem.py
#!/usr/bin/python # # Basic acceptance test harness for the Multicast_sender and receiver # components. # import socket import Axon def tests(): from Axon.Scheduler import scheduler from Kamaelia.Util.ConsoleEcho import consoleEchoer from Kamaelia.Util.Chargen import Chargen from Kamaelia.Internet.Multicast_sender import Multicast_sender from Kamaelia.Internet.Multicast_receiver import Multicast_receiver from Kamaelia.Internet.Multicast_transceiver import Multicast_transceiver class testComponent(Axon.Component.component): def main(self): chargen= Chargen() sender = Multicast_transceiver("0.0.0.0", 0, "224.168.2.9", 1600) receiver = Multicast_transceiver("0.0.0.0", 1600, "224.168.2.9", 0) display = consoleEchoer() self.link((chargen,"outbox"), (sender,"inbox")) self.link((receiver,"outbox"), (display,"inbox")) self.addChildren(chargen, sender, receiver, display) yield Axon.Ipc.newComponent(*(self.children)) while 1: self.pause() yield 1 harness = testComponent() harness.activate() scheduler.run.runThreads(slowmo=0.1) if __name__=="__main__": tests()
Test harness for the multicast transceiver.
Test harness for the multicast transceiver. Michael.
Python
apache-2.0
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
#!/usr/bin/python # # Basic acceptance test harness for the Multicast_sender and receiver # components. # import socket import Axon def tests(): from Axon.Scheduler import scheduler from Kamaelia.Util.ConsoleEcho import consoleEchoer from Kamaelia.Util.Chargen import Chargen from Kamaelia.Internet.Multicast_sender import Multicast_sender from Kamaelia.Internet.Multicast_receiver import Multicast_receiver from Kamaelia.Internet.Multicast_transceiver import Multicast_transceiver class testComponent(Axon.Component.component): def main(self): chargen= Chargen() sender = Multicast_transceiver("0.0.0.0", 0, "224.168.2.9", 1600) receiver = Multicast_transceiver("0.0.0.0", 1600, "224.168.2.9", 0) display = consoleEchoer() self.link((chargen,"outbox"), (sender,"inbox")) self.link((receiver,"outbox"), (display,"inbox")) self.addChildren(chargen, sender, receiver, display) yield Axon.Ipc.newComponent(*(self.children)) while 1: self.pause() yield 1 harness = testComponent() harness.activate() scheduler.run.runThreads(slowmo=0.1) if __name__=="__main__": tests()
Test harness for the multicast transceiver. Michael.
532b0809b040318abbb8e62848f18ad0cdf72547
src/workspace/workspace_managers.py
src/workspace/workspace_managers.py
from workspace.models import GroupPublishedWorkspace, PublishedWorkSpace, WorkSpace def ref_from_workspace(workspace): if isinstance(workspace, WorkSpace): return 'group/' + str(workspace.id) elif isinstance(workspace, PublishedWorkSpace): return 'group_published/' + str(workspace.id) class OrganizationWorkspaceManager: def get_id(self): return 'ezweb_organizations' def update_base_workspaces(self, user, current_workspace_refs): workspaces_to_remove = current_workspace_refs[:] workspaces_to_add = [] user_groups = user.groups.all() # workspaces assigned to the user's groups # the compression list outside the inside compression list is for flattening # the inside list workspaces = [workspace for sublist in [WorkSpace.objects.filter(targetOrganizations=org) for org in user_groups] for workspace in sublist] # published workspaces assigned to the user's groups # the compression list outside the inside compression list is for flattening # the inside list workspaces += [relation.workspace for sublist in [GroupPublishedWorkspace.objects.filter(group=group) for group in user_groups] for relation in sublist] workspaces = set(workspaces) for workspace in workspaces: ref = ref_from_workspace(workspace) if ref not in current_workspace_refs: workspaces_to_add.append((ref, workspace)) else: workspaces_to_remove.remove(ref) return (workspaces_to_remove, workspaces_to_add)
from workspace.models import GroupPublishedWorkspace, PublishedWorkSpace, WorkSpace def ref_from_workspace(workspace): if isinstance(workspace, WorkSpace): return 'group/' + str(workspace.id) elif isinstance(workspace, PublishedWorkSpace): return 'group_published/' + str(workspace.id) class OrganizationWorkspaceManager: def get_id(self): return 'ezweb_organizations' def update_base_workspaces(self, user, current_workspace_refs): workspaces_to_remove = current_workspace_refs[:] workspaces_to_add = [] user_groups = user.groups.all() # workspaces assigned to the user's groups # the compression list outside the inside compression list is for flattening # the inside list workspaces = [workspace for sublist in [WorkSpace.objects.filter(targetOrganizations=org) for org in user_groups] for workspace in sublist] # published workspaces assigned to the user's groups # the compression list outside the inside compression list is for flattening # the inside list workspaces += [relation.workspace for sublist in [GroupPublishedWorkspace.objects.filter(group=group) for group in user_groups] for relation in sublist] workspaces = set(workspaces) for workspace in workspaces: if workspace.creator == user: # Ignore workspaces created by the user continue ref = ref_from_workspace(workspace) if ref not in current_workspace_refs: workspaces_to_add.append((ref, workspace)) else: workspaces_to_remove.remove(ref) return (workspaces_to_remove, workspaces_to_add)
Make OrganizationWorkspaceManager ignore the original workspace when sharing workspaces with groups
Make OrganizationWorkspaceManager ignore the original workspace when sharing workspaces with groups
Python
agpl-3.0
rockneurotiko/wirecloud,jpajuelo/wirecloud,rockneurotiko/wirecloud,jpajuelo/wirecloud,jpajuelo/wirecloud,rockneurotiko/wirecloud,jpajuelo/wirecloud,rockneurotiko/wirecloud
from workspace.models import GroupPublishedWorkspace, PublishedWorkSpace, WorkSpace def ref_from_workspace(workspace): if isinstance(workspace, WorkSpace): return 'group/' + str(workspace.id) elif isinstance(workspace, PublishedWorkSpace): return 'group_published/' + str(workspace.id) class OrganizationWorkspaceManager: def get_id(self): return 'ezweb_organizations' def update_base_workspaces(self, user, current_workspace_refs): workspaces_to_remove = current_workspace_refs[:] workspaces_to_add = [] user_groups = user.groups.all() # workspaces assigned to the user's groups # the compression list outside the inside compression list is for flattening # the inside list workspaces = [workspace for sublist in [WorkSpace.objects.filter(targetOrganizations=org) for org in user_groups] for workspace in sublist] # published workspaces assigned to the user's groups # the compression list outside the inside compression list is for flattening # the inside list workspaces += [relation.workspace for sublist in [GroupPublishedWorkspace.objects.filter(group=group) for group in user_groups] for relation in sublist] workspaces = set(workspaces) for workspace in workspaces: if workspace.creator == user: # Ignore workspaces created by the user continue ref = ref_from_workspace(workspace) if ref not in current_workspace_refs: workspaces_to_add.append((ref, workspace)) else: workspaces_to_remove.remove(ref) return (workspaces_to_remove, workspaces_to_add)
Make OrganizationWorkspaceManager ignore the original workspace when sharing workspaces with groups from workspace.models import GroupPublishedWorkspace, PublishedWorkSpace, WorkSpace def ref_from_workspace(workspace): if isinstance(workspace, WorkSpace): return 'group/' + str(workspace.id) elif isinstance(workspace, PublishedWorkSpace): return 'group_published/' + str(workspace.id) class OrganizationWorkspaceManager: def get_id(self): return 'ezweb_organizations' def update_base_workspaces(self, user, current_workspace_refs): workspaces_to_remove = current_workspace_refs[:] workspaces_to_add = [] user_groups = user.groups.all() # workspaces assigned to the user's groups # the compression list outside the inside compression list is for flattening # the inside list workspaces = [workspace for sublist in [WorkSpace.objects.filter(targetOrganizations=org) for org in user_groups] for workspace in sublist] # published workspaces assigned to the user's groups # the compression list outside the inside compression list is for flattening # the inside list workspaces += [relation.workspace for sublist in [GroupPublishedWorkspace.objects.filter(group=group) for group in user_groups] for relation in sublist] workspaces = set(workspaces) for workspace in workspaces: ref = ref_from_workspace(workspace) if ref not in current_workspace_refs: workspaces_to_add.append((ref, workspace)) else: workspaces_to_remove.remove(ref) return (workspaces_to_remove, workspaces_to_add)
f3f363e8911d3a635d68c7dbe767ee2585ed4f36
checkDuplicates.py
checkDuplicates.py
import pandas as pd from astropy import coordinates as coord from astropy import units as u class Sweetcat: """Load SWEET-Cat database""" def __init__(self): self.fname_sc = 'WEBSITE_online_EU-NASA_full_database.rdb' # Loading the SweetCat database self.readSC() def readSC(self): # TODO: Use the ra and dec, and match with coordinates instead of name # stored in self.coordinates. # Read the current version of SWEET-Cat names_ = ['name', 'hd', 'ra', 'dec', 'V', 'Verr', 'p', 'perr', 'pflag', 'Teff', 'Tefferr', 'logg', 'logger', 'n1', 'n2', 'vt', 'vterr', 'feh', 'feherr', 'M', 'Merr', 'author', 'link', 'source', 'update', 'comment', 'database', 'n3'] # SC = pd.read_csv('WEBSITE_online.rdb', delimiter='\t', names=names_) SC = pd.read_csv(self.fname_sc, delimiter='\t', names=names_) # Clean star names self.sc_names = [x.lower().replace(' ', '').replace('-', '') for x in SC.name] self.sc_names = list(map(str.strip, self.sc_names)) # Original star names self.sc_names_orig = [x.strip() for x in SC.name] # Coordinates of the stars in SWEET-Cat self.coordinates = SC.loc[:, ['ra', 'dec']] # SWEET-Cat (used to automatically update the database label) self.SC = SC if __name__ == '__main__': # Loading SWEET Cat sc = Sweetcat() # Check for duplicates, subset of columns can be changed print(sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)]) # Indexes of the duplicates indexes = sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)].index # Remove a row # new_sc = sc.SC.drop([2728]) # new_sc.to_csv('WEBSITE_online_EU-NASA_full_database_minusHD21749.rdb', # sep='\t', index=False, header=False) # Select only the EU data sc_EU = new_sc[new_sc['database'].str.contains('EU')] # Drop the database column sc_like_old = sc_EU.drop(columns=['database']) #sc_like_old.to_csv('WEBSITE_online_EU-updated_04-03-2020.rdb', # sep='\t', index=False, header=False)
Check for duplicates based on coordinates and select only one database (EU/NASA)
Check for duplicates based on coordinates and select only one database (EU/NASA)
Python
mit
DanielAndreasen/SWEET-Cat
import pandas as pd from astropy import coordinates as coord from astropy import units as u class Sweetcat: """Load SWEET-Cat database""" def __init__(self): self.fname_sc = 'WEBSITE_online_EU-NASA_full_database.rdb' # Loading the SweetCat database self.readSC() def readSC(self): # TODO: Use the ra and dec, and match with coordinates instead of name # stored in self.coordinates. # Read the current version of SWEET-Cat names_ = ['name', 'hd', 'ra', 'dec', 'V', 'Verr', 'p', 'perr', 'pflag', 'Teff', 'Tefferr', 'logg', 'logger', 'n1', 'n2', 'vt', 'vterr', 'feh', 'feherr', 'M', 'Merr', 'author', 'link', 'source', 'update', 'comment', 'database', 'n3'] # SC = pd.read_csv('WEBSITE_online.rdb', delimiter='\t', names=names_) SC = pd.read_csv(self.fname_sc, delimiter='\t', names=names_) # Clean star names self.sc_names = [x.lower().replace(' ', '').replace('-', '') for x in SC.name] self.sc_names = list(map(str.strip, self.sc_names)) # Original star names self.sc_names_orig = [x.strip() for x in SC.name] # Coordinates of the stars in SWEET-Cat self.coordinates = SC.loc[:, ['ra', 'dec']] # SWEET-Cat (used to automatically update the database label) self.SC = SC if __name__ == '__main__': # Loading SWEET Cat sc = Sweetcat() # Check for duplicates, subset of columns can be changed print(sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)]) # Indexes of the duplicates indexes = sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)].index # Remove a row # new_sc = sc.SC.drop([2728]) # new_sc.to_csv('WEBSITE_online_EU-NASA_full_database_minusHD21749.rdb', # sep='\t', index=False, header=False) # Select only the EU data sc_EU = new_sc[new_sc['database'].str.contains('EU')] # Drop the database column sc_like_old = sc_EU.drop(columns=['database']) #sc_like_old.to_csv('WEBSITE_online_EU-updated_04-03-2020.rdb', # sep='\t', index=False, header=False)
Check for duplicates based on coordinates and select only one database (EU/NASA)
b2c51babee88a53704219cb4c2a639c8e71ad621
tests/functions_tests/test_copy.py
tests/functions_tests/test_copy.py
import unittest import numpy import chainer from chainer import functions from chainer import gradient_check class Copy(unittest.TestCase): def setUp(self): self.x_data = numpy.random.uniform( -1, 1, (10, 5)).astype(numpy.float32) self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32) def test_check_forward_cpu(self): x = chainer.Variable(self.x_data) y = functions.copy(x, -1) gradient_check.assert_allclose(self.x_data, y.data, atol=0, rtol=0) def test_check_backward_cpu(self): x = chainer.Variable(self.x_data) y = functions.copy(x, -1) y.grad = self.gy y.backward() gradient_check.assert_allclose(x.grad, self.gy, atol=0, rtol=0)
Add unittest for Copy function
Add unittest for Copy function
Python
mit
tscohen/chainer,okuta/chainer,tkerola/chainer,keisuke-umezawa/chainer,sou81821/chainer,ktnyt/chainer,aonotas/chainer,truongdq/chainer,jnishi/chainer,keisuke-umezawa/chainer,hvy/chainer,kiyukuta/chainer,keisuke-umezawa/chainer,1986ks/chainer,wkentaro/chainer,laysakura/chainer,niboshi/chainer,kuwa32/chainer,kikusu/chainer,jnishi/chainer,elviswf/chainer,yanweifu/chainer,chainer/chainer,wavelets/chainer,cemoody/chainer,cupy/cupy,rezoo/chainer,sinhrks/chainer,ktnyt/chainer,t-abe/chainer,anaruse/chainer,cupy/cupy,ytoyama/yans_chainer_hackathon,minhpqn/chainer,wkentaro/chainer,pfnet/chainer,ikasumi/chainer,keisuke-umezawa/chainer,wkentaro/chainer,t-abe/chainer,bayerj/chainer,ktnyt/chainer,benob/chainer,ktnyt/chainer,chainer/chainer,ysekky/chainer,muupan/chainer,niboshi/chainer,woodshop/complex-chainer,hvy/chainer,woodshop/chainer,AlpacaDB/chainer,jfsantos/chainer,okuta/chainer,muupan/chainer,hidenori-t/chainer,jnishi/chainer,benob/chainer,okuta/chainer,truongdq/chainer,cupy/cupy,sinhrks/chainer,chainer/chainer,masia02/chainer,delta2323/chainer,jnishi/chainer,hvy/chainer,chainer/chainer,Kaisuke5/chainer,wkentaro/chainer,cupy/cupy,kikusu/chainer,niboshi/chainer,tigerneil/chainer,kashif/chainer,AlpacaDB/chainer,okuta/chainer,umitanuki/chainer,niboshi/chainer,hvy/chainer,ronekko/chainer
import unittest import numpy import chainer from chainer import functions from chainer import gradient_check class Copy(unittest.TestCase): def setUp(self): self.x_data = numpy.random.uniform( -1, 1, (10, 5)).astype(numpy.float32) self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32) def test_check_forward_cpu(self): x = chainer.Variable(self.x_data) y = functions.copy(x, -1) gradient_check.assert_allclose(self.x_data, y.data, atol=0, rtol=0) def test_check_backward_cpu(self): x = chainer.Variable(self.x_data) y = functions.copy(x, -1) y.grad = self.gy y.backward() gradient_check.assert_allclose(x.grad, self.gy, atol=0, rtol=0)
Add unittest for Copy function
33c518d34b7657549e5231aa5e5cd1a1206da1a5
setup.py
setup.py
import os from setuptools import setup def get_version_from_git_most_recent_tag(): return os.popen("git tag -l v* | tail --lines=1").read().strip().lstrip("v") def get_readme_content(): current_file_dir = os.path.dirname(__file__) readme_file_path = os.path.join(current_file_dir, "README.md") return open(readme_file_path).read() setup( name='telegram-bot', version=get_version_from_git_most_recent_tag(), description='Python Telegram bot API framework', long_description=get_readme_content(), url='https://github.com/alvarogzp/telegram-bot', author='Alvaro Gutierrez Perez', author_email='[email protected]', license='GPL-3.0', packages=['bot'], install_requires=[ 'requests', 'pytz' ], python_requires='>=3', )
import os from setuptools import setup, find_packages def get_version_from_git_most_recent_tag(): return os.popen("git tag -l v* | tail --lines=1").read().strip().lstrip("v") def get_readme_content(): current_file_dir = os.path.dirname(__file__) readme_file_path = os.path.join(current_file_dir, "README.md") return open(readme_file_path).read() setup( name='telegram-bot', version=get_version_from_git_most_recent_tag(), description='Python Telegram bot API framework', long_description=get_readme_content(), url='https://github.com/alvarogzp/telegram-bot', author='Alvaro Gutierrez Perez', author_email='[email protected]', license='GPL-3.0', packages=find_packages(), install_requires=[ 'requests', 'pytz' ], python_requires='>=3', )
Use find_packages() to export all packages automatically on install
Use find_packages() to export all packages automatically on install
Python
agpl-3.0
alvarogzp/telegram-bot,alvarogzp/telegram-bot
import os from setuptools import setup, find_packages def get_version_from_git_most_recent_tag(): return os.popen("git tag -l v* | tail --lines=1").read().strip().lstrip("v") def get_readme_content(): current_file_dir = os.path.dirname(__file__) readme_file_path = os.path.join(current_file_dir, "README.md") return open(readme_file_path).read() setup( name='telegram-bot', version=get_version_from_git_most_recent_tag(), description='Python Telegram bot API framework', long_description=get_readme_content(), url='https://github.com/alvarogzp/telegram-bot', author='Alvaro Gutierrez Perez', author_email='[email protected]', license='GPL-3.0', packages=find_packages(), install_requires=[ 'requests', 'pytz' ], python_requires='>=3', )
Use find_packages() to export all packages automatically on install import os from setuptools import setup def get_version_from_git_most_recent_tag(): return os.popen("git tag -l v* | tail --lines=1").read().strip().lstrip("v") def get_readme_content(): current_file_dir = os.path.dirname(__file__) readme_file_path = os.path.join(current_file_dir, "README.md") return open(readme_file_path).read() setup( name='telegram-bot', version=get_version_from_git_most_recent_tag(), description='Python Telegram bot API framework', long_description=get_readme_content(), url='https://github.com/alvarogzp/telegram-bot', author='Alvaro Gutierrez Perez', author_email='[email protected]', license='GPL-3.0', packages=['bot'], install_requires=[ 'requests', 'pytz' ], python_requires='>=3', )
1be9c51d4029c0fa32f7071072c171db42d21c83
doc-src/index.py
doc-src/index.py
import countershape from countershape import Page, Directory, PythonModule import countershape.grok this.layout = countershape.Layout("_layout.html") this.markdown = "rst" ns.docTitle = "Countershape Manual" ns.docMaintainer = "Aldo Cortesi" ns.docMaintainerEmail = "[email protected]" ns.copyright = "Copyright Nullcube 2007" ns.head = countershape.template.File(None, "_banner.html") ns.sidebar = countershape.widgets.SiblingPageIndex( '/index.html', exclude=['countershape'] ) ns.parse = countershape.grok.parse("../countershape") pages = [ Page("index.html", "Introduction"), Page("structure.html", "Document Structure"), Page("doc.html", "Documenting Code"), Page("api/apiref.html", "API Reference"), Directory("api"), PythonModule("../countershape", "Source"), Page("admin.html", "Administrivia") ] ns.imgBanner = countershape.html.IMG( src=countershape.model.UrlTo("countershape.png"), width="280", height="77", align="right" )
import countershape from countershape import Page, Directory, PythonModule import countershape.grok this.layout = countershape.Layout("_layout.html") this.markdown = "rst" ns.docTitle = "Countershape Manual" ns.docMaintainer = "Aldo Cortesi" ns.docMaintainerEmail = "[email protected]" ns.copyright = "Copyright Nullcube 2007" ns.head = countershape.template.File(None, "_banner.html") ns.sidebar = countershape.widgets.SiblingPageIndex( '/index.html', exclude=['countershape'] ) ns.parse = countershape.grok.parse("../countershape") pages = [ Page("index.html", "Introduction"), Page("structure/structure.html", "Document Structure"), Directory("structure"), Page("doc.html", "Documenting Code"), Page("api/apiref.html", "API Reference"), Directory("api"), PythonModule("../countershape", "Source"), Page("admin.html", "Administrivia") ] ns.imgBanner = countershape.html.IMG( src=countershape.model.UrlTo("countershape.png"), width="280", height="77", align="right" )
Move structure to a separate directory
Move structure to a separate directory
Python
mit
mhils/countershape,cortesi/countershape,samtaufa/countershape,mhils/countershape,cortesi/countershape,samtaufa/countershape
import countershape from countershape import Page, Directory, PythonModule import countershape.grok this.layout = countershape.Layout("_layout.html") this.markdown = "rst" ns.docTitle = "Countershape Manual" ns.docMaintainer = "Aldo Cortesi" ns.docMaintainerEmail = "[email protected]" ns.copyright = "Copyright Nullcube 2007" ns.head = countershape.template.File(None, "_banner.html") ns.sidebar = countershape.widgets.SiblingPageIndex( '/index.html', exclude=['countershape'] ) ns.parse = countershape.grok.parse("../countershape") pages = [ Page("index.html", "Introduction"), Page("structure/structure.html", "Document Structure"), Directory("structure"), Page("doc.html", "Documenting Code"), Page("api/apiref.html", "API Reference"), Directory("api"), PythonModule("../countershape", "Source"), Page("admin.html", "Administrivia") ] ns.imgBanner = countershape.html.IMG( src=countershape.model.UrlTo("countershape.png"), width="280", height="77", align="right" )
Move structure to a separate directory import countershape from countershape import Page, Directory, PythonModule import countershape.grok this.layout = countershape.Layout("_layout.html") this.markdown = "rst" ns.docTitle = "Countershape Manual" ns.docMaintainer = "Aldo Cortesi" ns.docMaintainerEmail = "[email protected]" ns.copyright = "Copyright Nullcube 2007" ns.head = countershape.template.File(None, "_banner.html") ns.sidebar = countershape.widgets.SiblingPageIndex( '/index.html', exclude=['countershape'] ) ns.parse = countershape.grok.parse("../countershape") pages = [ Page("index.html", "Introduction"), Page("structure.html", "Document Structure"), Page("doc.html", "Documenting Code"), Page("api/apiref.html", "API Reference"), Directory("api"), PythonModule("../countershape", "Source"), Page("admin.html", "Administrivia") ] ns.imgBanner = countershape.html.IMG( src=countershape.model.UrlTo("countershape.png"), width="280", height="77", align="right" )
b0edec6bc9a4d77a1f0ea0f803ea892f35cc2f4f
text_field.py
text_field.py
# Created On: 2012/01/23 # Copyright 2011 Hardcoded Software (http://www.hardcoded.net) # # This software is licensed under the "BSD" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.hardcoded.net/licenses/bsd_license class TextField: def __init__(self, model, view): self.model = model self.view = view self.model.view = self self.view.editingFinished.connect(self.editingFinished) def editingFinished(self): self.model.text = self.view.text() # model --> view def refresh(self): self.view.setText(self.model.text)
# Created On: 2012/01/23 # Copyright 2011 Hardcoded Software (http://www.hardcoded.net) # # This software is licensed under the "BSD" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.hardcoded.net/licenses/bsd_license class TextField: def __init__(self, model, view): self.model = model self.view = view self.model.view = self # Make TextField also work for QLabel, which doesn't allow editing if hasattr(self.view, 'editingFinished'): self.view.editingFinished.connect(self.editingFinished) def editingFinished(self): self.model.text = self.view.text() # model --> view def refresh(self): self.view.setText(self.model.text)
Make TextField also work with a QLabel view, which doesn't allow editing.
Make TextField also work with a QLabel view, which doesn't allow editing.
Python
bsd-3-clause
hsoft/qtlib
# Created On: 2012/01/23 # Copyright 2011 Hardcoded Software (http://www.hardcoded.net) # # This software is licensed under the "BSD" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.hardcoded.net/licenses/bsd_license class TextField: def __init__(self, model, view): self.model = model self.view = view self.model.view = self # Make TextField also work for QLabel, which doesn't allow editing if hasattr(self.view, 'editingFinished'): self.view.editingFinished.connect(self.editingFinished) def editingFinished(self): self.model.text = self.view.text() # model --> view def refresh(self): self.view.setText(self.model.text)
Make TextField also work with a QLabel view, which doesn't allow editing. # Created On: 2012/01/23 # Copyright 2011 Hardcoded Software (http://www.hardcoded.net) # # This software is licensed under the "BSD" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.hardcoded.net/licenses/bsd_license class TextField: def __init__(self, model, view): self.model = model self.view = view self.model.view = self self.view.editingFinished.connect(self.editingFinished) def editingFinished(self): self.model.text = self.view.text() # model --> view def refresh(self): self.view.setText(self.model.text)
f4510b9b6402ddbe2412eb5524c7a44eb6bc966d
setup.py
setup.py
#!/usr/bin/env python # coding: utf8 # Copyright 2014-2015 Vincent Jacques <[email protected]> import contextlib import os import setuptools import setuptools.command.test version = "0.2.1" setuptools.setup( name="LowVoltage", version=version, description="Standalone DynamoDB client not hiding any feature", author="Vincent Jacques", author_email="[email protected]", url="http://jacquev6.github.io/LowVoltage", packages=sorted(dirpath.replace("/", ".") for dirpath, dirnames, filenames in os.walk("LowVoltage") if "__init__.py" in filenames), classifiers=[ "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Environment :: Web Environment", ], test_suite="LowVoltage.tests" if "AWS_ACCESS_KEY_ID" in os.environ else "LowVoltage.tests.local", test_loader="testresources:TestLoader", use_2to3=True, )
#!/usr/bin/env python # coding: utf8 # Copyright 2014-2015 Vincent Jacques <[email protected]> import contextlib import os import setuptools import setuptools.command.test version = "0.2.3" setuptools.setup( name="LowVoltage", version=version, description="Standalone DynamoDB client not hiding any feature", author="Vincent Jacques", author_email="[email protected]", url="http://jacquev6.github.io/LowVoltage", packages=sorted(dirpath.replace("/", ".") for dirpath, dirnames, filenames in os.walk("LowVoltage") if "__init__.py" in filenames), classifiers=[ "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Environment :: Web Environment", ], test_suite="LowVoltage.tests" if "AWS_ACCESS_KEY_ID" in os.environ else "LowVoltage.tests.local", test_loader="testresources:TestLoader", use_2to3=True, )
Fix version (0.2.2 never made it to PyPI)
Fix version (0.2.2 never made it to PyPI)
Python
mit
jacquev6/LowVoltage,jacquev6/LowVoltage
#!/usr/bin/env python # coding: utf8 # Copyright 2014-2015 Vincent Jacques <[email protected]> import contextlib import os import setuptools import setuptools.command.test version = "0.2.3" setuptools.setup( name="LowVoltage", version=version, description="Standalone DynamoDB client not hiding any feature", author="Vincent Jacques", author_email="[email protected]", url="http://jacquev6.github.io/LowVoltage", packages=sorted(dirpath.replace("/", ".") for dirpath, dirnames, filenames in os.walk("LowVoltage") if "__init__.py" in filenames), classifiers=[ "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Environment :: Web Environment", ], test_suite="LowVoltage.tests" if "AWS_ACCESS_KEY_ID" in os.environ else "LowVoltage.tests.local", test_loader="testresources:TestLoader", use_2to3=True, )
Fix version (0.2.2 never made it to PyPI) #!/usr/bin/env python # coding: utf8 # Copyright 2014-2015 Vincent Jacques <[email protected]> import contextlib import os import setuptools import setuptools.command.test version = "0.2.1" setuptools.setup( name="LowVoltage", version=version, description="Standalone DynamoDB client not hiding any feature", author="Vincent Jacques", author_email="[email protected]", url="http://jacquev6.github.io/LowVoltage", packages=sorted(dirpath.replace("/", ".") for dirpath, dirnames, filenames in os.walk("LowVoltage") if "__init__.py" in filenames), classifiers=[ "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Environment :: Web Environment", ], test_suite="LowVoltage.tests" if "AWS_ACCESS_KEY_ID" in os.environ else "LowVoltage.tests.local", test_loader="testresources:TestLoader", use_2to3=True, )
3aacfd7147836ef95133aa88d558a1d69bbcd0cd
mopidy/exceptions.py
mopidy/exceptions.py
from __future__ import absolute_import, unicode_literals class MopidyException(Exception): def __init__(self, message, *args, **kwargs): super(MopidyException, self).__init__(message, *args, **kwargs) self._message = message @property def message(self): """Reimplement message field that was deprecated in Python 2.6""" return self._message @message.setter # noqa def message(self, message): self._message = message class BackendError(MopidyException): pass class CoreError(MopidyException): def __init(self, message, errno=None): super(CoreError, self).__init(message, errno) self.errno = errno class ExtensionError(MopidyException): pass class FindError(MopidyException): def __init__(self, message, errno=None): super(FindError, self).__init__(message, errno) self.errno = errno class FrontendError(MopidyException): pass class MixerError(MopidyException): pass class ScannerError(MopidyException): pass class TracklistFull(CoreError): def __init(self, message, errno=None): super(TracklistFull, self).__init(message, errno) self.errno = errno class AudioException(MopidyException): pass class ValidationError(ValueError): pass
from __future__ import absolute_import, unicode_literals class MopidyException(Exception): def __init__(self, message, *args, **kwargs): super(MopidyException, self).__init__(message, *args, **kwargs) self._message = message @property def message(self): """Reimplement message field that was deprecated in Python 2.6""" return self._message @message.setter # noqa def message(self, message): self._message = message class BackendError(MopidyException): pass class CoreError(MopidyException): def __init__(self, message, errno=None): super(CoreError, self).__init__(message, errno) self.errno = errno class ExtensionError(MopidyException): pass class FindError(MopidyException): def __init__(self, message, errno=None): super(FindError, self).__init__(message, errno) self.errno = errno class FrontendError(MopidyException): pass class MixerError(MopidyException): pass class ScannerError(MopidyException): pass class TracklistFull(CoreError): def __init__(self, message, errno=None): super(TracklistFull, self).__init__(message, errno) self.errno = errno class AudioException(MopidyException): pass class ValidationError(ValueError): pass
Fix typo in new CoreErrors
exception: Fix typo in new CoreErrors
Python
apache-2.0
mopidy/mopidy,hkariti/mopidy,tkem/mopidy,bacontext/mopidy,swak/mopidy,mokieyue/mopidy,ZenithDK/mopidy,ali/mopidy,mokieyue/mopidy,bencevans/mopidy,jcass77/mopidy,bencevans/mopidy,bacontext/mopidy,diandiankan/mopidy,hkariti/mopidy,dbrgn/mopidy,ZenithDK/mopidy,bacontext/mopidy,mopidy/mopidy,pacificIT/mopidy,SuperStarPL/mopidy,hkariti/mopidy,hkariti/mopidy,mopidy/mopidy,pacificIT/mopidy,kingosticks/mopidy,jodal/mopidy,swak/mopidy,diandiankan/mopidy,jmarsik/mopidy,jmarsik/mopidy,ZenithDK/mopidy,bencevans/mopidy,kingosticks/mopidy,quartz55/mopidy,dbrgn/mopidy,vrs01/mopidy,adamcik/mopidy,quartz55/mopidy,adamcik/mopidy,quartz55/mopidy,adamcik/mopidy,pacificIT/mopidy,swak/mopidy,kingosticks/mopidy,ali/mopidy,vrs01/mopidy,jcass77/mopidy,dbrgn/mopidy,jcass77/mopidy,jodal/mopidy,SuperStarPL/mopidy,jmarsik/mopidy,mokieyue/mopidy,vrs01/mopidy,bacontext/mopidy,ZenithDK/mopidy,tkem/mopidy,jodal/mopidy,jmarsik/mopidy,SuperStarPL/mopidy,quartz55/mopidy,tkem/mopidy,mokieyue/mopidy,SuperStarPL/mopidy,ali/mopidy,bencevans/mopidy,ali/mopidy,diandiankan/mopidy,pacificIT/mopidy,tkem/mopidy,diandiankan/mopidy,swak/mopidy,dbrgn/mopidy,vrs01/mopidy
from __future__ import absolute_import, unicode_literals class MopidyException(Exception): def __init__(self, message, *args, **kwargs): super(MopidyException, self).__init__(message, *args, **kwargs) self._message = message @property def message(self): """Reimplement message field that was deprecated in Python 2.6""" return self._message @message.setter # noqa def message(self, message): self._message = message class BackendError(MopidyException): pass class CoreError(MopidyException): def __init__(self, message, errno=None): super(CoreError, self).__init__(message, errno) self.errno = errno class ExtensionError(MopidyException): pass class FindError(MopidyException): def __init__(self, message, errno=None): super(FindError, self).__init__(message, errno) self.errno = errno class FrontendError(MopidyException): pass class MixerError(MopidyException): pass class ScannerError(MopidyException): pass class TracklistFull(CoreError): def __init__(self, message, errno=None): super(TracklistFull, self).__init__(message, errno) self.errno = errno class AudioException(MopidyException): pass class ValidationError(ValueError): pass
exception: Fix typo in new CoreErrors from __future__ import absolute_import, unicode_literals class MopidyException(Exception): def __init__(self, message, *args, **kwargs): super(MopidyException, self).__init__(message, *args, **kwargs) self._message = message @property def message(self): """Reimplement message field that was deprecated in Python 2.6""" return self._message @message.setter # noqa def message(self, message): self._message = message class BackendError(MopidyException): pass class CoreError(MopidyException): def __init(self, message, errno=None): super(CoreError, self).__init(message, errno) self.errno = errno class ExtensionError(MopidyException): pass class FindError(MopidyException): def __init__(self, message, errno=None): super(FindError, self).__init__(message, errno) self.errno = errno class FrontendError(MopidyException): pass class MixerError(MopidyException): pass class ScannerError(MopidyException): pass class TracklistFull(CoreError): def __init(self, message, errno=None): super(TracklistFull, self).__init(message, errno) self.errno = errno class AudioException(MopidyException): pass class ValidationError(ValueError): pass
4ebdd73bab19e83d52e03ac4afb7e1b3f78004f5
drftutorial/catalog/views.py
drftutorial/catalog/views.py
from django.http import HttpResponse from django.http import Http404 from rest_framework import generics from rest_framework.response import Response from rest_framework.views import APIView from rest_framework import status from .permissions import IsAdminOrReadOnly from .models import Product from .serializers import ProductSerializer class ProductList(generics.ListCreateAPIView): queryset = Product.objects.all() serializer_class = ProductSerializer permission_classes = (IsAdminOrReadOnly, ) class ProductDetail(APIView): def get_object(self, pk): try: return Product.objects.get(pk=pk) except Product.DoesNotExist: raise Http404 def get(self, request, pk, format=None): product = self.get_object(pk) serializer = ProductSerializer(product) return Response(serializer.data) def put(self, request, pk, format=None): product = self.get_object(pk) serializer = ProductSerializer(product, data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def delete(self, request, pk, format=None): product = self.get_object(pk) product.delete() return Response(status=status.HTTP_204_NO_CONTENT)
from rest_framework import generics from .permissions import IsAdminOrReadOnly from .models import Product from .serializers import ProductSerializer class ProductList(generics.ListCreateAPIView): queryset = Product.objects.all() serializer_class = ProductSerializer permission_classes = (IsAdminOrReadOnly, ) class ProductDetail(generics.RetrieveUpdateDestroyAPIView): queryset = Product.objects.all() serializer_class = ProductSerializer permission_classes = (IsAdminOrReadOnly, )
Implement ProductDetail with a generic RetrieveUpdateDestroyAPIView class
Implement ProductDetail with a generic RetrieveUpdateDestroyAPIView class
Python
mit
andreagrandi/drf-tutorial
from rest_framework import generics from .permissions import IsAdminOrReadOnly from .models import Product from .serializers import ProductSerializer class ProductList(generics.ListCreateAPIView): queryset = Product.objects.all() serializer_class = ProductSerializer permission_classes = (IsAdminOrReadOnly, ) class ProductDetail(generics.RetrieveUpdateDestroyAPIView): queryset = Product.objects.all() serializer_class = ProductSerializer permission_classes = (IsAdminOrReadOnly, )
Implement ProductDetail with a generic RetrieveUpdateDestroyAPIView class from django.http import HttpResponse from django.http import Http404 from rest_framework import generics from rest_framework.response import Response from rest_framework.views import APIView from rest_framework import status from .permissions import IsAdminOrReadOnly from .models import Product from .serializers import ProductSerializer class ProductList(generics.ListCreateAPIView): queryset = Product.objects.all() serializer_class = ProductSerializer permission_classes = (IsAdminOrReadOnly, ) class ProductDetail(APIView): def get_object(self, pk): try: return Product.objects.get(pk=pk) except Product.DoesNotExist: raise Http404 def get(self, request, pk, format=None): product = self.get_object(pk) serializer = ProductSerializer(product) return Response(serializer.data) def put(self, request, pk, format=None): product = self.get_object(pk) serializer = ProductSerializer(product, data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def delete(self, request, pk, format=None): product = self.get_object(pk) product.delete() return Response(status=status.HTTP_204_NO_CONTENT)
1b0a5388c246dba1707f768e9be08b3a63503a31
samples/python/topology/tweepy/app.py
samples/python/topology/tweepy/app.py
from streamsx.topology.topology import * import streamsx.topology.context import sys import tweets # # Continually stream tweets that contain # the terms passed on the command line. # # python3 app.py Food GlutenFree # def main(): terms = sys.argv[1:] topo = Topology("TweetsUsingTweepy") # Event based source stream # Each tuple is a dictionary containing # the full tweet (converted from JSON) ts = topo.source(tweets.tweets(terms)) # get the text of the tweet ts = ts.transform(tweets.text) # just print it ts.print() streamsx.topology.context.submit("DISTRIBUTED", topo.graph) if __name__ == '__main__': main()
from streamsx.topology.topology import * import streamsx.topology.context import sys import tweets # # Continually stream tweets that contain # the terms passed on the command line. # # python3 app.py Food GlutenFree # # # Requires tweepy to be installed # # pip3 install tweepy # # http://www.tweepy.org/ # # You must create Twitter application authentication tokens # and set them in the mykeys.py module. # Note this is only intended as a simple sample, # def main(): terms = sys.argv[1:] topo = Topology("TweetsUsingTweepy") # Event based source stream # Each tuple is a dictionary containing # the full tweet (converted from JSON) ts = topo.source(tweets.tweets(terms)) # get the text of the tweet ts = ts.transform(tweets.text) # just print it ts.print() streamsx.topology.context.submit("DISTRIBUTED", topo.graph) if __name__ == '__main__': main()
Add some info about tweepy
Add some info about tweepy
Python
apache-2.0
IBMStreams/streamsx.topology,ddebrunner/streamsx.topology,wmarshall484/streamsx.topology,wmarshall484/streamsx.topology,ddebrunner/streamsx.topology,wmarshall484/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,ibmkendrick/streamsx.topology,ddebrunner/streamsx.topology,ibmkendrick/streamsx.topology,ibmkendrick/streamsx.topology,ibmkendrick/streamsx.topology,IBMStreams/streamsx.topology,wmarshall484/streamsx.topology,IBMStreams/streamsx.topology,ddebrunner/streamsx.topology,IBMStreams/streamsx.topology,ibmkendrick/streamsx.topology,wmarshall484/streamsx.topology,ibmkendrick/streamsx.topology,ibmkendrick/streamsx.topology,ddebrunner/streamsx.topology,wmarshall484/streamsx.topology,IBMStreams/streamsx.topology,ddebrunner/streamsx.topology,ddebrunner/streamsx.topology,wmarshall484/streamsx.topology,wmarshall484/streamsx.topology
from streamsx.topology.topology import * import streamsx.topology.context import sys import tweets # # Continually stream tweets that contain # the terms passed on the command line. # # python3 app.py Food GlutenFree # # # Requires tweepy to be installed # # pip3 install tweepy # # http://www.tweepy.org/ # # You must create Twitter application authentication tokens # and set them in the mykeys.py module. # Note this is only intended as a simple sample, # def main(): terms = sys.argv[1:] topo = Topology("TweetsUsingTweepy") # Event based source stream # Each tuple is a dictionary containing # the full tweet (converted from JSON) ts = topo.source(tweets.tweets(terms)) # get the text of the tweet ts = ts.transform(tweets.text) # just print it ts.print() streamsx.topology.context.submit("DISTRIBUTED", topo.graph) if __name__ == '__main__': main()
Add some info about tweepy from streamsx.topology.topology import * import streamsx.topology.context import sys import tweets # # Continually stream tweets that contain # the terms passed on the command line. # # python3 app.py Food GlutenFree # def main(): terms = sys.argv[1:] topo = Topology("TweetsUsingTweepy") # Event based source stream # Each tuple is a dictionary containing # the full tweet (converted from JSON) ts = topo.source(tweets.tweets(terms)) # get the text of the tweet ts = ts.transform(tweets.text) # just print it ts.print() streamsx.topology.context.submit("DISTRIBUTED", topo.graph) if __name__ == '__main__': main()
50f3233a8560120cc0c55b02849f1b586cf1aa27
languages_plus/utils.py
languages_plus/utils.py
from django.core.exceptions import ObjectDoesNotExist from countries_plus.models import Country from .models import Language, CultureCode def associate_countries_and_languages(): for country in Country.objects.all(): langs = country.languages.strip(',') if langs: codes = langs.split(",") for code in codes: if '-' in code: lang_code, country_code = code.split('-') try: language = Language.objects.get(iso_639_1=lang_code) except ObjectDoesNotExist: print("Cannot find language identified by code %s" % lang_code) continue try: country = Country.objects.get(iso=country_code) except ObjectDoesNotExist: print("Cannot find country identified by code %s" % country_code) continue country.language_set.add(language) CultureCode.objects.get_or_create(code=code, language=language, country=country) else: try: language = Language.objects.get_by_code(code) country.language_set.add(language) except ObjectDoesNotExist: print("Cannot find language identified by code %s" % code) continue else: print ("No langauges found for country %s" % country)
from django.core.exceptions import ObjectDoesNotExist from countries_plus.models import Country from .models import Language, CultureCode def associate_countries_and_languages(): for country in Country.objects.all(): langs = '' try: langs = country.languages.strip(',') if langs: codes = langs.split(",") for code in codes: if '-' in code: lang_code, country_code = code.split('-') try: language = Language.objects.get(iso_639_1=lang_code) except ObjectDoesNotExist: print("Cannot find language identified by code %s" % lang_code) continue try: country = Country.objects.get(iso=country_code) except ObjectDoesNotExist: print("Cannot find country identified by code %s" % country_code) continue country.language_set.add(language) CultureCode.objects.get_or_create(code=code, language=language, country=country) else: try: language = Language.objects.get_by_code(code) country.language_set.add(language) except ObjectDoesNotExist: print("Cannot find language identified by code %s" % code) continue else: print ("No langauges found for country %s" % country)
Fix a crash if a country has no languages spoken
Fix a crash if a country has no languages spoken
Python
mit
cordery/django-languages-plus
from django.core.exceptions import ObjectDoesNotExist from countries_plus.models import Country from .models import Language, CultureCode def associate_countries_and_languages(): for country in Country.objects.all(): langs = '' try: langs = country.languages.strip(',') if langs: codes = langs.split(",") for code in codes: if '-' in code: lang_code, country_code = code.split('-') try: language = Language.objects.get(iso_639_1=lang_code) except ObjectDoesNotExist: print("Cannot find language identified by code %s" % lang_code) continue try: country = Country.objects.get(iso=country_code) except ObjectDoesNotExist: print("Cannot find country identified by code %s" % country_code) continue country.language_set.add(language) CultureCode.objects.get_or_create(code=code, language=language, country=country) else: try: language = Language.objects.get_by_code(code) country.language_set.add(language) except ObjectDoesNotExist: print("Cannot find language identified by code %s" % code) continue else: print ("No langauges found for country %s" % country)
Fix a crash if a country has no languages spoken from django.core.exceptions import ObjectDoesNotExist from countries_plus.models import Country from .models import Language, CultureCode def associate_countries_and_languages(): for country in Country.objects.all(): langs = country.languages.strip(',') if langs: codes = langs.split(",") for code in codes: if '-' in code: lang_code, country_code = code.split('-') try: language = Language.objects.get(iso_639_1=lang_code) except ObjectDoesNotExist: print("Cannot find language identified by code %s" % lang_code) continue try: country = Country.objects.get(iso=country_code) except ObjectDoesNotExist: print("Cannot find country identified by code %s" % country_code) continue country.language_set.add(language) CultureCode.objects.get_or_create(code=code, language=language, country=country) else: try: language = Language.objects.get_by_code(code) country.language_set.add(language) except ObjectDoesNotExist: print("Cannot find language identified by code %s" % code) continue else: print ("No langauges found for country %s" % country)
0ac671d554f322524741a795f4a3250ef705f872
server/ec2spotmanager/migrations/0010_extend_instance_types.py
server/ec2spotmanager/migrations/0010_extend_instance_types.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-08-24 14:55 from __future__ import unicode_literals from django.db import migrations, models import ec2spotmanager.models class Migration(migrations.Migration): dependencies = [ ('ec2spotmanager', '0009_add_instance_size'), ] operations = [ migrations.AlterField( model_name='poolconfiguration', name='ec2_instance_types', field=models.CharField(blank=True, max_length=4095, null=True), ), ]
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-08-24 14:55 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ec2spotmanager', '0009_add_instance_size'), ] operations = [ migrations.AlterField( model_name='poolconfiguration', name='ec2_instance_types', field=models.CharField(blank=True, max_length=4095, null=True), ), ]
Fix Flake8 error in migration.
Fix Flake8 error in migration.
Python
mpl-2.0
MozillaSecurity/FuzzManager,MozillaSecurity/FuzzManager,MozillaSecurity/FuzzManager,MozillaSecurity/FuzzManager
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-08-24 14:55 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ec2spotmanager', '0009_add_instance_size'), ] operations = [ migrations.AlterField( model_name='poolconfiguration', name='ec2_instance_types', field=models.CharField(blank=True, max_length=4095, null=True), ), ]
Fix Flake8 error in migration. # -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-08-24 14:55 from __future__ import unicode_literals from django.db import migrations, models import ec2spotmanager.models class Migration(migrations.Migration): dependencies = [ ('ec2spotmanager', '0009_add_instance_size'), ] operations = [ migrations.AlterField( model_name='poolconfiguration', name='ec2_instance_types', field=models.CharField(blank=True, max_length=4095, null=True), ), ]
1d95063d1416f82115fa26d72d548ada0616e239
gensabenchmarks/go_func_utils.py
gensabenchmarks/go_func_utils.py
import sys import contextlib import inspect import gensabenchmarks.go_benchmark_functions as gbf def goclass(): """ Generator to get global optimization test classes/functions defined in SciPy """ bench_members = inspect.getmembers(gbf, inspect.isclass) benchmark_functions = [item for item in bench_members if issubclass(item[1], gbf.Benchmark)] for name, klass in benchmark_functions: yield (name, klass) class DummyFile(object): def write(self, x): pass def flush(self): pass @contextlib.contextmanager def nostdout(): save_stdout = sys.stdout save_stderr = sys.stderr sys.stdout = DummyFile() sys.stderr = DummyFile() yield sys.stdout = save_stdout sys.stderr = save_stderr
Fix import with full path
Fix import with full path
Python
bsd-2-clause
sgubianpm/gensabench,sgubianpm/gensabench,sgubianpm/pygensa,sgubianpm/HyGSA,sgubianpm/pygensa,sgubianpm/HyGSA
import sys import contextlib import inspect import gensabenchmarks.go_benchmark_functions as gbf def goclass(): """ Generator to get global optimization test classes/functions defined in SciPy """ bench_members = inspect.getmembers(gbf, inspect.isclass) benchmark_functions = [item for item in bench_members if issubclass(item[1], gbf.Benchmark)] for name, klass in benchmark_functions: yield (name, klass) class DummyFile(object): def write(self, x): pass def flush(self): pass @contextlib.contextmanager def nostdout(): save_stdout = sys.stdout save_stderr = sys.stderr sys.stdout = DummyFile() sys.stderr = DummyFile() yield sys.stdout = save_stdout sys.stderr = save_stderr
Fix import with full path
0fd464dcd405faa356c18d69a0b7419c5cff0f21
pmxbot/__init__.py
pmxbot/__init__.py
# -*- coding: utf-8 -*- # vim:ts=4:sw=4:noexpandtab import importlib from .dictlib import ConfigDict config = ConfigDict( bot_nickname = 'pmxbot', database = 'sqlite:pmxbot.sqlite', server_host = 'irc.freenode.net', server_port = 6667, use_ssl = False, password = None, silent_bot = False, log_channels = [], other_channels = [], places = ['London', 'Tokyo', 'New York'], feed_interval = 15, # minutes feeds = [dict( name = 'pmxbot bitbucket', channel = '#inane', linkurl = 'http://bitbucket.org/yougov/pmxbot', url = 'http://bitbucket.org/yougov/pmxbot', ), ], librarypaste = 'http://paste.jaraco.com', ) "The config object" if __name__ == '__main__': importlib.import_module('pmxbot.core').run()
# -*- coding: utf-8 -*- # vim:ts=4:sw=4:noexpandtab import importlib from .dictlib import ConfigDict config = ConfigDict( bot_nickname = 'pmxbot', database = 'sqlite:pmxbot.sqlite', server_host = 'localhost', server_port = 6667, use_ssl = False, password = None, silent_bot = False, log_channels = [], other_channels = [], places = ['London', 'Tokyo', 'New York'], feed_interval = 15, # minutes feeds = [dict( name = 'pmxbot bitbucket', channel = '#inane', linkurl = 'http://bitbucket.org/yougov/pmxbot', url = 'http://bitbucket.org/yougov/pmxbot', ), ], librarypaste = 'http://paste.jaraco.com', ) "The config object" if __name__ == '__main__': importlib.import_module('pmxbot.core').run()
Use IRC server on localhost by default
Use IRC server on localhost by default
Python
bsd-3-clause
jamwt/diesel-pmxbot,jamwt/diesel-pmxbot
# -*- coding: utf-8 -*- # vim:ts=4:sw=4:noexpandtab import importlib from .dictlib import ConfigDict config = ConfigDict( bot_nickname = 'pmxbot', database = 'sqlite:pmxbot.sqlite', server_host = 'localhost', server_port = 6667, use_ssl = False, password = None, silent_bot = False, log_channels = [], other_channels = [], places = ['London', 'Tokyo', 'New York'], feed_interval = 15, # minutes feeds = [dict( name = 'pmxbot bitbucket', channel = '#inane', linkurl = 'http://bitbucket.org/yougov/pmxbot', url = 'http://bitbucket.org/yougov/pmxbot', ), ], librarypaste = 'http://paste.jaraco.com', ) "The config object" if __name__ == '__main__': importlib.import_module('pmxbot.core').run()
Use IRC server on localhost by default # -*- coding: utf-8 -*- # vim:ts=4:sw=4:noexpandtab import importlib from .dictlib import ConfigDict config = ConfigDict( bot_nickname = 'pmxbot', database = 'sqlite:pmxbot.sqlite', server_host = 'irc.freenode.net', server_port = 6667, use_ssl = False, password = None, silent_bot = False, log_channels = [], other_channels = [], places = ['London', 'Tokyo', 'New York'], feed_interval = 15, # minutes feeds = [dict( name = 'pmxbot bitbucket', channel = '#inane', linkurl = 'http://bitbucket.org/yougov/pmxbot', url = 'http://bitbucket.org/yougov/pmxbot', ), ], librarypaste = 'http://paste.jaraco.com', ) "The config object" if __name__ == '__main__': importlib.import_module('pmxbot.core').run()
cc929731dbbf51e00d748aa6cc335d4cd8bb705b
soco/__init__.py
soco/__init__.py
"""SoCo (Sonos Controller) is a simple library to control Sonos speakers.""" # There is no need for all strings here to be unicode, and Py2 cannot import # modules with unicode names so do not use from __future__ import # unicode_literals # https://github.com/SoCo/SoCo/issues/98 # import logging from .core import SoCo from .discovery import discover from .exceptions import SoCoException, UnknownSoCoException # Will be parsed by setup.py to determine package metadata __author__ = "The SoCo-Team <[email protected]>" # Please increment the version number and add the suffix "-dev" after # a release, to make it possible to identify in-development code __version__ = "0.22.0" __website__ = "https://github.com/SoCo/SoCo" __license__ = "MIT License" # You really should not `import *` - it is poor practice # but if you do, here is what you get: __all__ = [ "discover", "SoCo", "SoCoException", "UnknownSoCoException", ] # http://docs.python.org/2/howto/logging.html#library-config # Avoids spurious error messages if no logger is configured by the user logging.getLogger(__name__).addHandler(logging.NullHandler())
"""SoCo (Sonos Controller) is a simple library to control Sonos speakers.""" # There is no need for all strings here to be unicode, and Py2 cannot import # modules with unicode names so do not use from __future__ import # unicode_literals # https://github.com/SoCo/SoCo/issues/98 # import logging from .core import SoCo from .discovery import discover from .exceptions import SoCoException, UnknownSoCoException # Will be parsed by setup.py to determine package metadata __author__ = "The SoCo-Team <[email protected]>" # Please increment the version number and add the suffix "-dev" after # a release, to make it possible to identify in-development code __version__ = "0.23-dev" __website__ = "https://github.com/SoCo/SoCo" __license__ = "MIT License" # You really should not `import *` - it is poor practice # but if you do, here is what you get: __all__ = [ "discover", "SoCo", "SoCoException", "UnknownSoCoException", ] # http://docs.python.org/2/howto/logging.html#library-config # Avoids spurious error messages if no logger is configured by the user logging.getLogger(__name__).addHandler(logging.NullHandler())
Set up for v0.23 development
Set up for v0.23 development
Python
mit
SoCo/SoCo,SoCo/SoCo
"""SoCo (Sonos Controller) is a simple library to control Sonos speakers.""" # There is no need for all strings here to be unicode, and Py2 cannot import # modules with unicode names so do not use from __future__ import # unicode_literals # https://github.com/SoCo/SoCo/issues/98 # import logging from .core import SoCo from .discovery import discover from .exceptions import SoCoException, UnknownSoCoException # Will be parsed by setup.py to determine package metadata __author__ = "The SoCo-Team <[email protected]>" # Please increment the version number and add the suffix "-dev" after # a release, to make it possible to identify in-development code __version__ = "0.23-dev" __website__ = "https://github.com/SoCo/SoCo" __license__ = "MIT License" # You really should not `import *` - it is poor practice # but if you do, here is what you get: __all__ = [ "discover", "SoCo", "SoCoException", "UnknownSoCoException", ] # http://docs.python.org/2/howto/logging.html#library-config # Avoids spurious error messages if no logger is configured by the user logging.getLogger(__name__).addHandler(logging.NullHandler())
Set up for v0.23 development """SoCo (Sonos Controller) is a simple library to control Sonos speakers.""" # There is no need for all strings here to be unicode, and Py2 cannot import # modules with unicode names so do not use from __future__ import # unicode_literals # https://github.com/SoCo/SoCo/issues/98 # import logging from .core import SoCo from .discovery import discover from .exceptions import SoCoException, UnknownSoCoException # Will be parsed by setup.py to determine package metadata __author__ = "The SoCo-Team <[email protected]>" # Please increment the version number and add the suffix "-dev" after # a release, to make it possible to identify in-development code __version__ = "0.22.0" __website__ = "https://github.com/SoCo/SoCo" __license__ = "MIT License" # You really should not `import *` - it is poor practice # but if you do, here is what you get: __all__ = [ "discover", "SoCo", "SoCoException", "UnknownSoCoException", ] # http://docs.python.org/2/howto/logging.html#library-config # Avoids spurious error messages if no logger is configured by the user logging.getLogger(__name__).addHandler(logging.NullHandler())
b0261ec4757167cb3d5bf8ab3ded0273eb9477de
txircd/modules/umode_s.py
txircd/modules/umode_s.py
from txircd.modbase import Mode class ServerNoticeMode(Mode): pass class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): return { "modes": { "uns": ServerNoticeMode() } } def cleanup(self): self.ircd.removeMode("uns")
Implement usermode +s (currently doesn't do anything)
Implement usermode +s (currently doesn't do anything)
Python
bsd-3-clause
ElementalAlchemist/txircd,DesertBus/txircd,Heufneutje/txircd
from txircd.modbase import Mode class ServerNoticeMode(Mode): pass class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): return { "modes": { "uns": ServerNoticeMode() } } def cleanup(self): self.ircd.removeMode("uns")
Implement usermode +s (currently doesn't do anything)
7e5d8eb0d6eabb427d7e9bd02bac3ee7b90d228d
src/config.py
src/config.py
import urllib import urllib.request proxies = [ False, False ]
import urllib import urllib.request from pprint import pprint proxies = [ '', '' ] _tested_proxies = False def test_proxies(): global _tested_proxies if _tested_proxies: return _tested_proxies = {} def _testproxy(proxyid): if proxyid=='': return True if _tested_proxies.get(proxyid) is not None: return _tested_proxies.get(proxyid) print("Pretesting proxy",proxyid) proxy = urllib.request.ProxyHandler( {'http': proxyid , 'https': proxyid } ) opener = urllib.request.build_opener(proxy) #urllib.request.install_opener(opener) try: opened = opener.open('http://example.com') if not opened: _tested_proxies[proxyid] = False return False assert(opened.read().find(b"Example Domain")>-1) except urllib.error.URLError as e: try: opened = opener.open('http://google.com') if not opened: _tested_proxies[proxyid] = False return False except urllib.error.URLError as e: print("Proxy error",proxyid,e) _tested_proxies[proxyid] = False return False _tested_proxies[proxyid] = True return True proxies[:] = [tup for tup in proxies if _testproxy(tup)] _tested_proxies = True
Test proxies before using them.
Test proxies before using them.
Python
mit
koivunen/whoisabusetool
import urllib import urllib.request from pprint import pprint proxies = [ '', '' ] _tested_proxies = False def test_proxies(): global _tested_proxies if _tested_proxies: return _tested_proxies = {} def _testproxy(proxyid): if proxyid=='': return True if _tested_proxies.get(proxyid) is not None: return _tested_proxies.get(proxyid) print("Pretesting proxy",proxyid) proxy = urllib.request.ProxyHandler( {'http': proxyid , 'https': proxyid } ) opener = urllib.request.build_opener(proxy) #urllib.request.install_opener(opener) try: opened = opener.open('http://example.com') if not opened: _tested_proxies[proxyid] = False return False assert(opened.read().find(b"Example Domain")>-1) except urllib.error.URLError as e: try: opened = opener.open('http://google.com') if not opened: _tested_proxies[proxyid] = False return False except urllib.error.URLError as e: print("Proxy error",proxyid,e) _tested_proxies[proxyid] = False return False _tested_proxies[proxyid] = True return True proxies[:] = [tup for tup in proxies if _testproxy(tup)] _tested_proxies = True
Test proxies before using them. import urllib import urllib.request proxies = [ False, False ]
41236c2be66b6f790308cba321cb482807814323
ubersmith/calls/device.py
ubersmith/calls/device.py
"""Device call classes. These classes implement any response cleaning and validation needed. If a call class isn't defined for a given method then one is created using ubersmith.calls.BaseCall. """ from ubersmith.calls import BaseCall, GroupCall from ubersmith.utils import prepend_base __all__ = [ 'GetCall', 'ListCall', ] _ = prepend_base(__name__.split('.')[-1]) class GetCall(BaseCall): method = _('get') required_fields = ['device_id'] class ListCall(GroupCall): method = _('list') rename_fields = {'clientid': 'client_id'} int_fields = ['client_id']
"""Device call classes. These classes implement any response cleaning and validation needed. If a call class isn't defined for a given method then one is created using ubersmith.calls.BaseCall. """ from ubersmith.calls import BaseCall, GroupCall from ubersmith.utils import prepend_base __all__ = [ 'GetCall', 'ListCall', ] _ = prepend_base(__name__.split('.')[-1]) class GetCall(BaseCall): method = _('get') required_fields = ['device_id'] class ListCall(GroupCall): method = _('list') rename_fields = {'clientid': 'client_id'} int_fields = ['client_id'] class ModuleGraphCall(FileCall): method = _('module_graph')
Make module graph call return a file.
Make module graph call return a file.
Python
mit
jasonkeene/python-ubersmith,jasonkeene/python-ubersmith,hivelocity/python-ubersmith,hivelocity/python-ubersmith
"""Device call classes. These classes implement any response cleaning and validation needed. If a call class isn't defined for a given method then one is created using ubersmith.calls.BaseCall. """ from ubersmith.calls import BaseCall, GroupCall from ubersmith.utils import prepend_base __all__ = [ 'GetCall', 'ListCall', ] _ = prepend_base(__name__.split('.')[-1]) class GetCall(BaseCall): method = _('get') required_fields = ['device_id'] class ListCall(GroupCall): method = _('list') rename_fields = {'clientid': 'client_id'} int_fields = ['client_id'] class ModuleGraphCall(FileCall): method = _('module_graph')
Make module graph call return a file. """Device call classes. These classes implement any response cleaning and validation needed. If a call class isn't defined for a given method then one is created using ubersmith.calls.BaseCall. """ from ubersmith.calls import BaseCall, GroupCall from ubersmith.utils import prepend_base __all__ = [ 'GetCall', 'ListCall', ] _ = prepend_base(__name__.split('.')[-1]) class GetCall(BaseCall): method = _('get') required_fields = ['device_id'] class ListCall(GroupCall): method = _('list') rename_fields = {'clientid': 'client_id'} int_fields = ['client_id']
4a5e798fe23d720315a7cab60824b70ce0983f8e
Kane1985/Chapter2/Ex4.1.py
Kane1985/Chapter2/Ex4.1.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Exercise 4.1 from Kane 1985""" from sympy.physics.mechanics import dot, dynamicsymbols, MechanicsStrPrinter from sympy.physics.mechanics import ReferenceFrame, Point from sympy import solve, symbols, pi from sympy.simplify.simplify import trigsimp def msprint(expr): pr = MechanicsStrPrinter() return pr.doprint(expr) theta1, theta2, theta3 = symbols('theta1 theta2 theta3') x1, x2, x3 = symbols('x1 x2 x3') A = ReferenceFrame('A') A_1 = A.orientnew('A_1', 'Axis', [theta1, A.x]) A_2 = A_1.orientnew('A_2', 'Axis', [theta2, A.y]) B = A_2.orientnew('B', 'Axis', [theta3, A.z]) O = Point('O') P = O.locatenew('P', x1 * A.x + x2 * A.y + x3 * A.z) p = P.pos_from(O) # Point P is on L (span(B.x)) when: print("{0} = 0".format(trigsimp(dot(p, B.x))))
#!/usr/bin/env python # -*- coding: utf-8 -*- """Exercise 4.1 from Kane 1985""" from sympy.physics.mechanics import dot, dynamicsymbols, MechanicsStrPrinter from sympy.physics.mechanics import ReferenceFrame, Point from sympy import solve, symbols, pi, sin, cos from sympy.simplify.simplify import trigsimp def msprint(expr): pr = MechanicsStrPrinter() return pr.doprint(expr) theta = symbols('theta:3') x = symbols('x:3') q = symbols('q') A = ReferenceFrame('A') B = A.orientnew('B', 'SPACE', theta, 'xyz') O = Point('O') P = O.locatenew('P', x[0] * A.x + x[1] * A.y + x[2] * A.z) p = P.pos_from(O) # From problem, point P is on L (span(B.x)) when: constraint_eqs = {x[0] : q*cos(theta[1])*cos(theta[2]), x[1] : q*cos(theta[1])*sin(theta[2]), x[2] : -q*sin(theta[1])} # If point P is on line L then r^{P/O} will have no components in the B.y or # B.z directions since point O is also on line L and B.x is parallel to L. assert(trigsimp(dot(P.pos_from(O), B.y).subs(constraint_eqs)) == 0) assert(trigsimp(dot(P.pos_from(O), B.z).subs(constraint_eqs)) == 0)
Simplify formulation and change from print() to assert()
Simplify formulation and change from print() to assert()
Python
bsd-3-clause
jcrist/pydy,Shekharrajak/pydy,oliverlee/pydy,jcrist/pydy,jcrist/pydy,oliverlee/pydy,jcrist/pydy,Shekharrajak/pydy,oliverlee/pydy,jcrist/pydy,Shekharrajak/pydy,jcrist/pydy,skidzo/pydy,Shekharrajak/pydy,skidzo/pydy,skidzo/pydy,jcrist/pydy,skidzo/pydy
#!/usr/bin/env python # -*- coding: utf-8 -*- """Exercise 4.1 from Kane 1985""" from sympy.physics.mechanics import dot, dynamicsymbols, MechanicsStrPrinter from sympy.physics.mechanics import ReferenceFrame, Point from sympy import solve, symbols, pi, sin, cos from sympy.simplify.simplify import trigsimp def msprint(expr): pr = MechanicsStrPrinter() return pr.doprint(expr) theta = symbols('theta:3') x = symbols('x:3') q = symbols('q') A = ReferenceFrame('A') B = A.orientnew('B', 'SPACE', theta, 'xyz') O = Point('O') P = O.locatenew('P', x[0] * A.x + x[1] * A.y + x[2] * A.z) p = P.pos_from(O) # From problem, point P is on L (span(B.x)) when: constraint_eqs = {x[0] : q*cos(theta[1])*cos(theta[2]), x[1] : q*cos(theta[1])*sin(theta[2]), x[2] : -q*sin(theta[1])} # If point P is on line L then r^{P/O} will have no components in the B.y or # B.z directions since point O is also on line L and B.x is parallel to L. assert(trigsimp(dot(P.pos_from(O), B.y).subs(constraint_eqs)) == 0) assert(trigsimp(dot(P.pos_from(O), B.z).subs(constraint_eqs)) == 0)
Simplify formulation and change from print() to assert() #!/usr/bin/env python # -*- coding: utf-8 -*- """Exercise 4.1 from Kane 1985""" from sympy.physics.mechanics import dot, dynamicsymbols, MechanicsStrPrinter from sympy.physics.mechanics import ReferenceFrame, Point from sympy import solve, symbols, pi from sympy.simplify.simplify import trigsimp def msprint(expr): pr = MechanicsStrPrinter() return pr.doprint(expr) theta1, theta2, theta3 = symbols('theta1 theta2 theta3') x1, x2, x3 = symbols('x1 x2 x3') A = ReferenceFrame('A') A_1 = A.orientnew('A_1', 'Axis', [theta1, A.x]) A_2 = A_1.orientnew('A_2', 'Axis', [theta2, A.y]) B = A_2.orientnew('B', 'Axis', [theta3, A.z]) O = Point('O') P = O.locatenew('P', x1 * A.x + x2 * A.y + x3 * A.z) p = P.pos_from(O) # Point P is on L (span(B.x)) when: print("{0} = 0".format(trigsimp(dot(p, B.x))))
d0fb38da0200c1b780e296d6c5767438e2f82dc8
array/sudoku-check.py
array/sudoku-check.py
# Implement an algorithm that will check whether a given grid of numbers represents a valid Sudoku puzzle def check_rows(grid): i = 0 while i < len(grid): j = 0 ref_check = {} while j < len(grid[i]): if grid[i][j] != '.' and grid[i][j] in ref_check: return False else: ref_check[grid[i][j]] = 1 j += 1 i += 1 return True def check_columns(grid): column = 0 length = len(grid) while column < length: row = 0 ref_check = {} while row < length: if grid[row][column] != '.' and grid[row][column] in ref_check: return False else: ref_check[grid[row][column]] = 1 row += 1 column += 1 return True
# Implement an algorithm that will check whether a given grid of numbers represents a valid Sudoku puzzle def check_rows(grid): i = 0 while i < len(grid): j = 0 ref_check = {} while j < len(grid[i]): if grid[i][j] != '.' and grid[i][j] in ref_check: return False else: ref_check[grid[i][j]] = 1 j += 1 i += 1 return True def check_columns(grid): column = 0 length = len(grid) while column < length: row = 0 ref_check = {} while row < length: if grid[row][column] != '.' and grid[row][column] in ref_check: return False else: ref_check[grid[row][column]] = 1 row += 1 column += 1 return True def create_sub_grid(grid): ref_check = {} for square in grid: if square != '.' and square in ref_check: return False else: ref_check[square] = 1 return True
Add check sub grid method
Add check sub grid method
Python
mit
derekmpham/interview-prep,derekmpham/interview-prep
# Implement an algorithm that will check whether a given grid of numbers represents a valid Sudoku puzzle def check_rows(grid): i = 0 while i < len(grid): j = 0 ref_check = {} while j < len(grid[i]): if grid[i][j] != '.' and grid[i][j] in ref_check: return False else: ref_check[grid[i][j]] = 1 j += 1 i += 1 return True def check_columns(grid): column = 0 length = len(grid) while column < length: row = 0 ref_check = {} while row < length: if grid[row][column] != '.' and grid[row][column] in ref_check: return False else: ref_check[grid[row][column]] = 1 row += 1 column += 1 return True def create_sub_grid(grid): ref_check = {} for square in grid: if square != '.' and square in ref_check: return False else: ref_check[square] = 1 return True
Add check sub grid method # Implement an algorithm that will check whether a given grid of numbers represents a valid Sudoku puzzle def check_rows(grid): i = 0 while i < len(grid): j = 0 ref_check = {} while j < len(grid[i]): if grid[i][j] != '.' and grid[i][j] in ref_check: return False else: ref_check[grid[i][j]] = 1 j += 1 i += 1 return True def check_columns(grid): column = 0 length = len(grid) while column < length: row = 0 ref_check = {} while row < length: if grid[row][column] != '.' and grid[row][column] in ref_check: return False else: ref_check[grid[row][column]] = 1 row += 1 column += 1 return True
353728aba17695396c6167543e74181f9f853fdc
examples/template_render.py
examples/template_render.py
import django.template.loader import django.conf import sys sys.path.append('django_test') django.conf.settings.configure(INSTALLED_APPS=(), TEMPLATE_DIRS=('.', 'examples',)) for x in range(0,100): django.template.loader.render_to_string('template.html')
import django.template.loader import django.conf import sys, os os.chdir(os.path.dirname(__file__)) django.conf.settings.configure( INSTALLED_APPS=(), TEMPLATES=[{ "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": ['.'] }], ) django.setup() for x in range(0,100): django.template.loader.render_to_string('template.html')
Update template render example for Django 1.8+
Update template render example for Django 1.8+
Python
bsd-3-clause
joerick/pyinstrument,joerick/pyinstrument,joerick/pyinstrument,joerick/pyinstrument,joerick/pyinstrument,joerick/pyinstrument
import django.template.loader import django.conf import sys, os os.chdir(os.path.dirname(__file__)) django.conf.settings.configure( INSTALLED_APPS=(), TEMPLATES=[{ "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": ['.'] }], ) django.setup() for x in range(0,100): django.template.loader.render_to_string('template.html')
Update template render example for Django 1.8+ import django.template.loader import django.conf import sys sys.path.append('django_test') django.conf.settings.configure(INSTALLED_APPS=(), TEMPLATE_DIRS=('.', 'examples',)) for x in range(0,100): django.template.loader.render_to_string('template.html')
88da3432dc0676cbe74c0d9f170fbd6f18f97f8a
examples/tornado_server.py
examples/tornado_server.py
from tornado import ioloop, web from jsonrpcserver import method, async_dispatch as dispatch @method async def ping(): return "pong" class MainHandler(web.RequestHandler): async def post(self): request = self.request.body.decode() response = await dispatch(request) print(response) if response.wanted: self.write(str(response)) app = web.Application([(r"/", MainHandler)]) if __name__ == "__main__": app.listen(5000) ioloop.IOLoop.current().start()
from tornado import ioloop, web from jsonrpcserver import method, async_dispatch as dispatch @method async def ping() -> str: return "pong" class MainHandler(web.RequestHandler): async def post(self) -> None: request = self.request.body.decode() response = await dispatch(request) if response.wanted: self.write(str(response)) app = web.Application([(r"/", MainHandler)]) if __name__ == "__main__": app.listen(5000) ioloop.IOLoop.current().start()
Remove unwanted print statement from example
Remove unwanted print statement from example
Python
mit
bcb/jsonrpcserver
from tornado import ioloop, web from jsonrpcserver import method, async_dispatch as dispatch @method async def ping() -> str: return "pong" class MainHandler(web.RequestHandler): async def post(self) -> None: request = self.request.body.decode() response = await dispatch(request) if response.wanted: self.write(str(response)) app = web.Application([(r"/", MainHandler)]) if __name__ == "__main__": app.listen(5000) ioloop.IOLoop.current().start()
Remove unwanted print statement from example from tornado import ioloop, web from jsonrpcserver import method, async_dispatch as dispatch @method async def ping(): return "pong" class MainHandler(web.RequestHandler): async def post(self): request = self.request.body.decode() response = await dispatch(request) print(response) if response.wanted: self.write(str(response)) app = web.Application([(r"/", MainHandler)]) if __name__ == "__main__": app.listen(5000) ioloop.IOLoop.current().start()
92e1803a4c9e38a8672e00afbcfe0807ea808565
examples/reading/rtf15.py
examples/reading/rtf15.py
from pyth.plugins.rtf15.reader import Rtf15Reader from pyth.plugins.xhtml.writer import XHTMLWriter import sys if len(sys.argv) > 1: filename = sys.argv[1] else: filename = "sample.rtf" doc = Rtf15Reader.read(open(filename)) print XHTMLWriter.write(doc, pretty=True).read()
from pyth.plugins.rtf15.reader import Rtf15Reader from pyth.plugins.xhtml.writer import XHTMLWriter import sys if len(sys.argv) > 1: filename = sys.argv[1] else: filename = "sample.rtf" doc = Rtf15Reader.read(open(filename, "rb")) print XHTMLWriter.write(doc, pretty=True).read()
Make RTF reader sample open in 'rb' mode explicitly
Make RTF reader sample open in 'rb' mode explicitly
Python
mit
kippr/pyth,kippr/pyth,prechelt/pyth,eriol/pyth,brendonh/pyth,prechelt/pyth,sheepeatingtaz/pyth,pombredanne/pyth
from pyth.plugins.rtf15.reader import Rtf15Reader from pyth.plugins.xhtml.writer import XHTMLWriter import sys if len(sys.argv) > 1: filename = sys.argv[1] else: filename = "sample.rtf" doc = Rtf15Reader.read(open(filename, "rb")) print XHTMLWriter.write(doc, pretty=True).read()
Make RTF reader sample open in 'rb' mode explicitly from pyth.plugins.rtf15.reader import Rtf15Reader from pyth.plugins.xhtml.writer import XHTMLWriter import sys if len(sys.argv) > 1: filename = sys.argv[1] else: filename = "sample.rtf" doc = Rtf15Reader.read(open(filename)) print XHTMLWriter.write(doc, pretty=True).read()
7e7817fc5a90adf7b2fa4b8947dd46a75bc6e818
pystereovisiontoolkit.py
pystereovisiontoolkit.py
#! /usr/bin/env python # -*- coding:utf-8 -*- # # Application to capture, and calibrate stereo cameras # # # External dependencies # import argparse import Calibration import CvViewer # # Command line argument parser # parser = argparse.ArgumentParser( description='Camera calibration toolkit.' ) parser.add_argument( '-live', action='store_true', default=False, help='Stereo camera live display' ) parser.add_argument( '-rows', action='store', default=15, help='Number of rows in the chessboard pattern' ) parser.add_argument( '-cols', action='store', default=10, help='Number of columns in the chessboard pattern' ) parser.add_argument( '-grid', action='store_true', default=False, help='Asymmetric circles grid pattern' ) parser.add_argument( '-debug', action='store_true', default=False, help='Display the chessboard on each image' ) parser.add_argument( '-mono', action='store', help='Image file for mono camera calibration' ) parser.add_argument( '-stereo', action='store', nargs=2, metavar=('cam1', 'cam2'), help='Image file for stereo camera calibration' ) args = parser.parse_args() # # Calibration pattern setup # if args.grid : Calibration.pattern_type = 'CirclesGrid' Calibration.pattern_size = ( int(args.rows), int(args.cols) ) # # Stereo camera live capture # if args.live : # Stereo camera viewer CvViewer.VmbStereoViewer() # # Mono camera calibration # elif args.mono : # Launch calibration Calibration.CameraCalibration( args.mono, args.debug )
Introduce a single program to rule them all...
Introduce a single program to rule them all...
Python
mit
microy/VisionToolkit,microy/VisionToolkit,microy/StereoVision,microy/PyStereoVisionToolkit,microy/StereoVision,microy/PyStereoVisionToolkit
#! /usr/bin/env python # -*- coding:utf-8 -*- # # Application to capture, and calibrate stereo cameras # # # External dependencies # import argparse import Calibration import CvViewer # # Command line argument parser # parser = argparse.ArgumentParser( description='Camera calibration toolkit.' ) parser.add_argument( '-live', action='store_true', default=False, help='Stereo camera live display' ) parser.add_argument( '-rows', action='store', default=15, help='Number of rows in the chessboard pattern' ) parser.add_argument( '-cols', action='store', default=10, help='Number of columns in the chessboard pattern' ) parser.add_argument( '-grid', action='store_true', default=False, help='Asymmetric circles grid pattern' ) parser.add_argument( '-debug', action='store_true', default=False, help='Display the chessboard on each image' ) parser.add_argument( '-mono', action='store', help='Image file for mono camera calibration' ) parser.add_argument( '-stereo', action='store', nargs=2, metavar=('cam1', 'cam2'), help='Image file for stereo camera calibration' ) args = parser.parse_args() # # Calibration pattern setup # if args.grid : Calibration.pattern_type = 'CirclesGrid' Calibration.pattern_size = ( int(args.rows), int(args.cols) ) # # Stereo camera live capture # if args.live : # Stereo camera viewer CvViewer.VmbStereoViewer() # # Mono camera calibration # elif args.mono : # Launch calibration Calibration.CameraCalibration( args.mono, args.debug )
Introduce a single program to rule them all...
be1e23f068fbc34587caa0a796e259e42ed6f7c6
utils.py
utils.py
import re import textwrap import html2text text_maker = html2text.HTML2Text() text_maker.body_width = 0 def strip_html_tags(text): return re.sub('<[^<]+?>', '', text) def html_to_md(string, strip_html=True, markdown=False): if strip_html: string = strip_html_tags(string) if markdown: string = text_maker.handle(string) return string def get_formatted_book_data(book_data): template = textwrap.dedent("""\ *Title:* {0} by {1} *Rating:* {2} by {3} users *Description:* {4} *Link*: [click me]({5}) Tip: {6}""") title = book_data['title'] authors = book_data['authors'] average_rating = book_data['average_rating'] ratings_count = book_data['ratings_count'] description = html_to_md(book_data.get('description', '')) url = book_data['url'] tip = 'Use author name also for better search results' template = template.format(title, authors, average_rating, ratings_count, description, url, tip) return template
import re import textwrap import html2text text_maker = html2text.HTML2Text() text_maker.body_width = 0 def strip_html_tags(text): return re.sub('<[^<]+?>', '', text) def html_to_md(string, strip_html=True, markdown=False): if not string: return 'No Description Found' if strip_html: string = strip_html_tags(string) if markdown: string = text_maker.handle(string) return string def get_formatted_book_data(book_data): template = textwrap.dedent("""\ *Title:* {0} by {1} *Rating:* {2} by {3} users *Description:* {4} *Link*: [click me]({5}) Tip: {6}""") title = book_data['title'] authors = book_data['authors'] average_rating = book_data['average_rating'] ratings_count = book_data['ratings_count'] description = html_to_md(book_data.get('description', '')) url = book_data['url'] tip = 'Use author name also for better search results' template = template.format(title, authors, average_rating, ratings_count, description, url, tip) return template
Handle Nonetype values in `html_to_md`
Handle Nonetype values in `html_to_md`
Python
mit
avinassh/Laozi,avinassh/Laozi
import re import textwrap import html2text text_maker = html2text.HTML2Text() text_maker.body_width = 0 def strip_html_tags(text): return re.sub('<[^<]+?>', '', text) def html_to_md(string, strip_html=True, markdown=False): if not string: return 'No Description Found' if strip_html: string = strip_html_tags(string) if markdown: string = text_maker.handle(string) return string def get_formatted_book_data(book_data): template = textwrap.dedent("""\ *Title:* {0} by {1} *Rating:* {2} by {3} users *Description:* {4} *Link*: [click me]({5}) Tip: {6}""") title = book_data['title'] authors = book_data['authors'] average_rating = book_data['average_rating'] ratings_count = book_data['ratings_count'] description = html_to_md(book_data.get('description', '')) url = book_data['url'] tip = 'Use author name also for better search results' template = template.format(title, authors, average_rating, ratings_count, description, url, tip) return template
Handle Nonetype values in `html_to_md` import re import textwrap import html2text text_maker = html2text.HTML2Text() text_maker.body_width = 0 def strip_html_tags(text): return re.sub('<[^<]+?>', '', text) def html_to_md(string, strip_html=True, markdown=False): if strip_html: string = strip_html_tags(string) if markdown: string = text_maker.handle(string) return string def get_formatted_book_data(book_data): template = textwrap.dedent("""\ *Title:* {0} by {1} *Rating:* {2} by {3} users *Description:* {4} *Link*: [click me]({5}) Tip: {6}""") title = book_data['title'] authors = book_data['authors'] average_rating = book_data['average_rating'] ratings_count = book_data['ratings_count'] description = html_to_md(book_data.get('description', '')) url = book_data['url'] tip = 'Use author name also for better search results' template = template.format(title, authors, average_rating, ratings_count, description, url, tip) return template
4cb674093c95ebbe3f7dc61d0b6a262995337100
osf/migrations/0012_auto_20170411_1548.py
osf/migrations/0012_auto_20170411_1548.py
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2017-04-05 17:30 from __future__ import unicode_literals from django.db import migrations, models from django.contrib.auth.models import Permission, Group from django.contrib.contenttypes.models import ContentType from osf.models import OSFUser def fix_osfuser_view_permissions(*args): view_osfuser_permission = Permission.objects.get(codename='view_osfuser') wrong_osfuser_permission = Permission.objects.get(codename='view_user') wrong_osfuser_permission.delete() read_only = Group.objects.get(name='read_only') osf_admin = Group.objects.get(name='osf_admin') read_only.permissions.add(view_osfuser_permission) osf_admin.permissions.add(view_osfuser_permission) read_only.save() osf_admin.save() def revert_osfuser_view_permissions(*args): ctype = ContentType.get_for_model(OSFUser) wrong_osfuser_permission = Permission.objects.create(codename='view_user', name='Can view user details', content_type=ctype) view_osfuser_permission = Permission.objects.get(codename='view_osfuser') osf_admin = Group.objects.get(name='osf_admin') read_only = Group.objects.get(name='read_only') osf_admin.permissions.add(wrong_osfuser_permission) read_only.permissions.add(wrong_osfuser_permission) read_only.permissions.remove(view_osfuser_permission) osf_admin.permissions.remove(view_osfuser_permission) osf_admin.save() read_only.save() class Migration(migrations.Migration): dependencies = [ ('osf', '0011_auto_20170410_1711'), ] operations = [ migrations.RunPython(fix_osfuser_view_permissions, revert_osfuser_view_permissions), ]
Add proper view_osfuser permission to read_only and admin groups
Add proper view_osfuser permission to read_only and admin groups
Python
apache-2.0
chennan47/osf.io,adlius/osf.io,leb2dg/osf.io,binoculars/osf.io,mattclark/osf.io,icereval/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,caseyrollins/osf.io,sloria/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,chennan47/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,Nesiehr/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,caneruguz/osf.io,hmoco/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,pattisdr/osf.io,Nesiehr/osf.io,aaxelb/osf.io,crcresearch/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,cslzchen/osf.io,laurenrevere/osf.io,caneruguz/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,laurenrevere/osf.io,aaxelb/osf.io,crcresearch/osf.io,erinspace/osf.io,cslzchen/osf.io,felliott/osf.io,binoculars/osf.io,leb2dg/osf.io,adlius/osf.io,cslzchen/osf.io,aaxelb/osf.io,sloria/osf.io,mfraezz/osf.io,binoculars/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,chrisseto/osf.io,Nesiehr/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,erinspace/osf.io,mfraezz/osf.io,chrisseto/osf.io,sloria/osf.io,chrisseto/osf.io,icereval/osf.io,hmoco/osf.io,baylee-d/osf.io,crcresearch/osf.io,aaxelb/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,erinspace/osf.io,hmoco/osf.io,saradbowman/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,hmoco/osf.io,chennan47/osf.io,cwisecarver/osf.io,felliott/osf.io,cwisecarver/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,felliott/osf.io,cslzchen/osf.io,caneruguz/osf.io,pattisdr/osf.io,caneruguz/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,baylee-d/osf.io,icereval/osf.io,felliott/osf.io
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2017-04-05 17:30 from __future__ import unicode_literals from django.db import migrations, models from django.contrib.auth.models import Permission, Group from django.contrib.contenttypes.models import ContentType from osf.models import OSFUser def fix_osfuser_view_permissions(*args): view_osfuser_permission = Permission.objects.get(codename='view_osfuser') wrong_osfuser_permission = Permission.objects.get(codename='view_user') wrong_osfuser_permission.delete() read_only = Group.objects.get(name='read_only') osf_admin = Group.objects.get(name='osf_admin') read_only.permissions.add(view_osfuser_permission) osf_admin.permissions.add(view_osfuser_permission) read_only.save() osf_admin.save() def revert_osfuser_view_permissions(*args): ctype = ContentType.get_for_model(OSFUser) wrong_osfuser_permission = Permission.objects.create(codename='view_user', name='Can view user details', content_type=ctype) view_osfuser_permission = Permission.objects.get(codename='view_osfuser') osf_admin = Group.objects.get(name='osf_admin') read_only = Group.objects.get(name='read_only') osf_admin.permissions.add(wrong_osfuser_permission) read_only.permissions.add(wrong_osfuser_permission) read_only.permissions.remove(view_osfuser_permission) osf_admin.permissions.remove(view_osfuser_permission) osf_admin.save() read_only.save() class Migration(migrations.Migration): dependencies = [ ('osf', '0011_auto_20170410_1711'), ] operations = [ migrations.RunPython(fix_osfuser_view_permissions, revert_osfuser_view_permissions), ]
Add proper view_osfuser permission to read_only and admin groups
78ca15758018d52f1353b29410f97bba215e0be2
django_afip/views.py
django_afip/views.py
from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.generic import View from .pdf import generate_receipt_pdf class ReceiptHTMLView(View): template_name = 'django_afip/invoice.html' def get(self, request, pk): return HttpResponse( generate_receipt_pdf(pk, request, True), ) class ReceiptPDFView(View): def get(self, request, pk): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=' + \ _('receipt %s.pdf' % pk) generate_receipt_pdf(pk, response) return response
from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.generic import View from .pdf import generate_receipt_pdf class ReceiptHTMLView(View): def get(self, request, pk): return HttpResponse( generate_receipt_pdf(pk, request, True), ) class ReceiptPDFView(View): def get(self, request, pk): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=' + \ _('receipt %s.pdf' % pk) generate_receipt_pdf(pk, response) return response
Remove unused (albeit confusing) variable
Remove unused (albeit confusing) variable See #13
Python
isc
hobarrera/django-afip,hobarrera/django-afip
from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.generic import View from .pdf import generate_receipt_pdf class ReceiptHTMLView(View): def get(self, request, pk): return HttpResponse( generate_receipt_pdf(pk, request, True), ) class ReceiptPDFView(View): def get(self, request, pk): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=' + \ _('receipt %s.pdf' % pk) generate_receipt_pdf(pk, response) return response
Remove unused (albeit confusing) variable See #13 from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.generic import View from .pdf import generate_receipt_pdf class ReceiptHTMLView(View): template_name = 'django_afip/invoice.html' def get(self, request, pk): return HttpResponse( generate_receipt_pdf(pk, request, True), ) class ReceiptPDFView(View): def get(self, request, pk): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=' + \ _('receipt %s.pdf' % pk) generate_receipt_pdf(pk, response) return response
fdf7daf8abc4f8e1bfb8b729fd9ffc4d0c95c509
apps/xformmanager/management/commands/generate_xforms.py
apps/xformmanager/management/commands/generate_xforms.py
""" This script generates all the necessary data to synchronize with a remote CommCareHQ server on that server. This is only really useful if you intend to manually scp/rsync data to your local server, which requires a login to the remote server. So this is not the standard synchronization workflow (but is necessary for low-connectivity settings) """ import sys import urllib from optparse import make_option from django.core.management.base import LabelCommand, CommandError from xformmanager.management.commands import util from xformmanager.models import FormDefModel from receiver.models import Submission class Command(LabelCommand): option_list = LabelCommand.option_list + ( make_option('-a','--all', action='store_true', dest='download_all', \ default=False, help='Download all files'), ) help = "Generate synchronization files on a CommCareHQ remote server." args = "<remote_url username password>" label = 'IP address of the remote server (including port), username, and password' def handle(self, *args, **options): if len(args) != 3: raise CommandError('Please specify %s.' % self.label) remote_url = args[0] username = args[1] password = args[2] print "Generating synchronization data from %s" % remote_url download_all = options.get('download_all', False) generate_xforms(remote_url, username, password, not download_all) def __del__(self): pass def generate_xforms(remote_url, username, password, latest=True): """ Generate sync data from remote server remote_url: url of remote server (ip:port) username, password: credentials for logging in """ status = util.login(remote_url, username, password) if not status: print "Sorry. Your credentials were not accepted." sys.exit() def _generate_latest(url, django_model): # for now, we assume schemas and submissions appear with monotonically # increasing id's. I doubt this is always the case. # TODO: fix start_id = -1 received_count = django_model.objects.count() if url.find("?") == -1: url = url + "?" else: url = url + "&" url = url + ("received_count=%s" % received_count) print "Hitting %s" % url # TODO - update this to use content-disposition instead of FILE_NAME urllib.urlopen(url) print "Generated tar from %s" % url url = 'http://%s/api/xforms/?format=sync' % remote_url if latest: _generate_latest(url, FormDefModel) else: urllib.urlopen(url) print "Generated remote schemata archive" # TODO - move this to receiver/management? url = 'http://%s/api/submissions/' % remote_url if latest: _generate_latest(url, Submission) else: urllib.urlopen(url) print "Generated remote submissions archive" return
Add a command to generate xform archives on the remote server (without downloading)
Add a command to generate xform archives on the remote server (without downloading)
Python
bsd-3-clause
SEL-Columbia/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,puttarajubr/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,gmimano/commcaretest,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq
""" This script generates all the necessary data to synchronize with a remote CommCareHQ server on that server. This is only really useful if you intend to manually scp/rsync data to your local server, which requires a login to the remote server. So this is not the standard synchronization workflow (but is necessary for low-connectivity settings) """ import sys import urllib from optparse import make_option from django.core.management.base import LabelCommand, CommandError from xformmanager.management.commands import util from xformmanager.models import FormDefModel from receiver.models import Submission class Command(LabelCommand): option_list = LabelCommand.option_list + ( make_option('-a','--all', action='store_true', dest='download_all', \ default=False, help='Download all files'), ) help = "Generate synchronization files on a CommCareHQ remote server." args = "<remote_url username password>" label = 'IP address of the remote server (including port), username, and password' def handle(self, *args, **options): if len(args) != 3: raise CommandError('Please specify %s.' % self.label) remote_url = args[0] username = args[1] password = args[2] print "Generating synchronization data from %s" % remote_url download_all = options.get('download_all', False) generate_xforms(remote_url, username, password, not download_all) def __del__(self): pass def generate_xforms(remote_url, username, password, latest=True): """ Generate sync data from remote server remote_url: url of remote server (ip:port) username, password: credentials for logging in """ status = util.login(remote_url, username, password) if not status: print "Sorry. Your credentials were not accepted." sys.exit() def _generate_latest(url, django_model): # for now, we assume schemas and submissions appear with monotonically # increasing id's. I doubt this is always the case. # TODO: fix start_id = -1 received_count = django_model.objects.count() if url.find("?") == -1: url = url + "?" else: url = url + "&" url = url + ("received_count=%s" % received_count) print "Hitting %s" % url # TODO - update this to use content-disposition instead of FILE_NAME urllib.urlopen(url) print "Generated tar from %s" % url url = 'http://%s/api/xforms/?format=sync' % remote_url if latest: _generate_latest(url, FormDefModel) else: urllib.urlopen(url) print "Generated remote schemata archive" # TODO - move this to receiver/management? url = 'http://%s/api/submissions/' % remote_url if latest: _generate_latest(url, Submission) else: urllib.urlopen(url) print "Generated remote submissions archive" return
Add a command to generate xform archives on the remote server (without downloading)
23501afd09b13d1e5f33bdd60614fd9ac7210108
oratioignoreparser.py
oratioignoreparser.py
import os import re class OratioIgnoreParser(): def __init__(self): self.ignored_paths = ["oratiomodule.tar.gz"] def load(self, oratio_ignore_path): with open(oratio_ignore_path, "r") as f: self.ignored_paths.extend([line.strip() for line in f]) def should_be_ignored(self, filepath): for ig in self.ignored_paths: compiled_regex = re.compile( '^' + re.escape(ig).replace('\\*', '.*') + '$' ) if compiled_regex.search(filepath) or \ compiled_regex.search(filepath.split('/')[-1]): return True return False def list_files(self, directory): filepaths = [] ignored_files = [] for root, dirs, files in os.walk("."): for name in files: relative_path = os.path.join(root, name) if relative_path.startswith("./"): relative_path = relative_path[2:] if not self.should_be_ignored(relative_path): filepaths.append(relative_path) else: ignored_files.append(relative_path) return filepaths, ignored_files
import os import re class OratioIgnoreParser(): def __init__(self): self.ignored_paths = ["oratiomodule.tar.gz"] def load(self, oratio_ignore_path): with open(oratio_ignore_path, "r") as f: self.ignored_paths.extend([line.strip() for line in f]) def extend_list(self, ignored_paths_list): self.ignored_paths.extend(ignored_paths_list) def should_be_ignored(self, filepath): for ig in self.ignored_paths: compiled_regex = re.compile( '^' + re.escape(ig).replace('\\*', '.*') + '$' ) if compiled_regex.search(filepath) or \ compiled_regex.search(filepath.split('/')[-1]): return True return False def list_files(self, directory): filepaths = [] ignored_files = [] for root, dirs, files in os.walk("."): for name in files: relative_path = os.path.join(root, name) if relative_path.startswith("./"): relative_path = relative_path[2:] if not self.should_be_ignored(relative_path): filepaths.append(relative_path) else: ignored_files.append(relative_path) return filepaths, ignored_files
Add extend_list method to OratioIgnoreParser
Add extend_list method to OratioIgnoreParser To make oratioignoreparser.py easily testable using unit tests.
Python
mit
oratio-io/oratio-cli,oratio-io/oratio-cli
import os import re class OratioIgnoreParser(): def __init__(self): self.ignored_paths = ["oratiomodule.tar.gz"] def load(self, oratio_ignore_path): with open(oratio_ignore_path, "r") as f: self.ignored_paths.extend([line.strip() for line in f]) def extend_list(self, ignored_paths_list): self.ignored_paths.extend(ignored_paths_list) def should_be_ignored(self, filepath): for ig in self.ignored_paths: compiled_regex = re.compile( '^' + re.escape(ig).replace('\\*', '.*') + '$' ) if compiled_regex.search(filepath) or \ compiled_regex.search(filepath.split('/')[-1]): return True return False def list_files(self, directory): filepaths = [] ignored_files = [] for root, dirs, files in os.walk("."): for name in files: relative_path = os.path.join(root, name) if relative_path.startswith("./"): relative_path = relative_path[2:] if not self.should_be_ignored(relative_path): filepaths.append(relative_path) else: ignored_files.append(relative_path) return filepaths, ignored_files
Add extend_list method to OratioIgnoreParser To make oratioignoreparser.py easily testable using unit tests. import os import re class OratioIgnoreParser(): def __init__(self): self.ignored_paths = ["oratiomodule.tar.gz"] def load(self, oratio_ignore_path): with open(oratio_ignore_path, "r") as f: self.ignored_paths.extend([line.strip() for line in f]) def should_be_ignored(self, filepath): for ig in self.ignored_paths: compiled_regex = re.compile( '^' + re.escape(ig).replace('\\*', '.*') + '$' ) if compiled_regex.search(filepath) or \ compiled_regex.search(filepath.split('/')[-1]): return True return False def list_files(self, directory): filepaths = [] ignored_files = [] for root, dirs, files in os.walk("."): for name in files: relative_path = os.path.join(root, name) if relative_path.startswith("./"): relative_path = relative_path[2:] if not self.should_be_ignored(relative_path): filepaths.append(relative_path) else: ignored_files.append(relative_path) return filepaths, ignored_files
cb0f732545ea851af46a7c96525d6b5b418b8673
chatterbot/__init__.py
chatterbot/__init__.py
""" ChatterBot is a machine learning, conversational dialog engine. """ from .chatterbot import ChatBot __version__ = '0.7.1' __author__ = 'Gunther Cox' __email__ = '[email protected]' __url__ = 'https://github.com/gunthercox/ChatterBot' __all__ = ( 'ChatBot', )
""" ChatterBot is a machine learning, conversational dialog engine. """ from .chatterbot import ChatBot __version__ = '0.7.2' __author__ = 'Gunther Cox' __email__ = '[email protected]' __url__ = 'https://github.com/gunthercox/ChatterBot' __all__ = ( 'ChatBot', )
Update release version to 0.7.2
Update release version to 0.7.2
Python
bsd-3-clause
vkosuri/ChatterBot,gunthercox/ChatterBot
""" ChatterBot is a machine learning, conversational dialog engine. """ from .chatterbot import ChatBot __version__ = '0.7.2' __author__ = 'Gunther Cox' __email__ = '[email protected]' __url__ = 'https://github.com/gunthercox/ChatterBot' __all__ = ( 'ChatBot', )
Update release version to 0.7.2 """ ChatterBot is a machine learning, conversational dialog engine. """ from .chatterbot import ChatBot __version__ = '0.7.1' __author__ = 'Gunther Cox' __email__ = '[email protected]' __url__ = 'https://github.com/gunthercox/ChatterBot' __all__ = ( 'ChatBot', )
bd07980d9545de5ae82d6bdc87eab23060b0e859
sqflint.py
sqflint.py
import sys import argparse from sqf.parser import parse import sqf.analyser from sqf.exceptions import SQFParserError def analyze(code, writer=sys.stdout): try: result = parse(code) except SQFParserError as e: writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message)) return exceptions = sqf.analyser.analyze(result).exceptions for e in exceptions: writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message)) def _main(): parser = argparse.ArgumentParser(description="Static Analyser of SQF code") parser.add_argument('filename', nargs='?', type=argparse.FileType('r'), default=None, help='The full path of the file to be analyzed') args = parser.parse_args() if args.filename is not None: with open(args.filename) as file: code = file.read() else: code = sys.stdin.read() analyze(code) if __name__ == "__main__": _main()
import sys import argparse from sqf.parser import parse import sqf.analyser from sqf.exceptions import SQFParserError def analyze(code, writer=sys.stdout): try: result = parse(code) except SQFParserError as e: writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message)) return exceptions = sqf.analyser.analyze(result).exceptions for e in exceptions: writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message)) def _main(): parser = argparse.ArgumentParser(description="Static Analyser of SQF code") parser.add_argument('file', nargs='?', type=argparse.FileType('r'), default=None, help='The full path of the file to be analyzed') args = parser.parse_args() if args.file is not None: code = args.file.read() else: code = sys.stdin.read() analyze(code) if __name__ == "__main__": _main()
Fix parsing file - FileType already read
Fix parsing file - FileType already read
Python
bsd-3-clause
LordGolias/sqf
import sys import argparse from sqf.parser import parse import sqf.analyser from sqf.exceptions import SQFParserError def analyze(code, writer=sys.stdout): try: result = parse(code) except SQFParserError as e: writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message)) return exceptions = sqf.analyser.analyze(result).exceptions for e in exceptions: writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message)) def _main(): parser = argparse.ArgumentParser(description="Static Analyser of SQF code") parser.add_argument('file', nargs='?', type=argparse.FileType('r'), default=None, help='The full path of the file to be analyzed') args = parser.parse_args() if args.file is not None: code = args.file.read() else: code = sys.stdin.read() analyze(code) if __name__ == "__main__": _main()
Fix parsing file - FileType already read import sys import argparse from sqf.parser import parse import sqf.analyser from sqf.exceptions import SQFParserError def analyze(code, writer=sys.stdout): try: result = parse(code) except SQFParserError as e: writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message)) return exceptions = sqf.analyser.analyze(result).exceptions for e in exceptions: writer.write('[%d,%d]:%s\n' % (e.position[0], e.position[1] - 1, e.message)) def _main(): parser = argparse.ArgumentParser(description="Static Analyser of SQF code") parser.add_argument('filename', nargs='?', type=argparse.FileType('r'), default=None, help='The full path of the file to be analyzed') args = parser.parse_args() if args.filename is not None: with open(args.filename) as file: code = file.read() else: code = sys.stdin.read() analyze(code) if __name__ == "__main__": _main()
d7e4bdc6979e3ada1e28ce01e3b3e12d4d197bcf
html_table_parser/__init__.py
html_table_parser/__init__.py
from .parser import HTMLTableParser __author__ = 'Josua Schmid' __version__ = '0.1.1' __licence__ = 'GPLv3'
from .parser import HTMLTableParser __author__ = 'Josua Schmid' __version__ = '0.1.1' __licence__ = 'AGPLv3'
Correct license in module meta information
Correct license in module meta information
Python
agpl-3.0
schmijos/html-table-parser-python3,schmijos/html-table-parser-python3
from .parser import HTMLTableParser __author__ = 'Josua Schmid' __version__ = '0.1.1' __licence__ = 'AGPLv3'
Correct license in module meta information from .parser import HTMLTableParser __author__ = 'Josua Schmid' __version__ = '0.1.1' __licence__ = 'GPLv3'
c269315ec83a0cfc6ec6c5bd58945ba68d6f69f3
analyzarr/ui/custom_tools.py
analyzarr/ui/custom_tools.py
from chaco.tools.api import ScatterInspector from numpy import zeros class PeakSelectionTool(ScatterInspector): def _deselect(self, index=None): super(PeakSelectionTool, self)._deselect(index) self._update_mask() # override this method so that we only select one peak at a time def _select(self, index, append=False): super(PeakSelectionTool, self)._select(index, append) self._update_mask() def _update_mask(self): plot = self.component for name in ('index', 'value'): if not hasattr(plot, name): continue md = getattr(plot, name).metadata mask = zeros(getattr(plot, name).get_data().shape[0], dtype=bool) mask[list(md[self.selection_metadata_name])]=True md['selection_masks'] = mask
Add missing custom tools file
Add missing custom tools file
Python
bsd-2-clause
msarahan/analyzarr,msarahan/analyzarr
from chaco.tools.api import ScatterInspector from numpy import zeros class PeakSelectionTool(ScatterInspector): def _deselect(self, index=None): super(PeakSelectionTool, self)._deselect(index) self._update_mask() # override this method so that we only select one peak at a time def _select(self, index, append=False): super(PeakSelectionTool, self)._select(index, append) self._update_mask() def _update_mask(self): plot = self.component for name in ('index', 'value'): if not hasattr(plot, name): continue md = getattr(plot, name).metadata mask = zeros(getattr(plot, name).get_data().shape[0], dtype=bool) mask[list(md[self.selection_metadata_name])]=True md['selection_masks'] = mask
Add missing custom tools file
21059428d95c27cf043ada2e299a4cf3982a4233
python/printbag.py
python/printbag.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Convert a rosbag file to legacy lidar binary format. """ """LIDAR datatype format is: ( timestamp (long), flag (bool saved as int), accelerometer[3] (double), gps[3] (double), distance[LIDAR_NUM_ANGLES] (long), ) 'int' and 'long' are the same size on the raspberry pi (32 bits). """ import sys import rosbag def decode_bag(bag): topics = ['/scan', '/flagbutton_pressed'] return [message for message in bag.read_messages(topics=topics)] if __name__ == '__main__': if len(sys.argv) < 2: print(('Usage: {} <rosbag> [<outfile>] \n\n' 'Print contents of rosbag file. If <outfile> is provided, \n' 'write contents of rosbag file to <outfile> in the legacy \n' 'lidar binary format.').format(__file__)) sys.exit(1) outfile = None filename = sys.argv[1] if len(sys.argv) == 3: outfile = sys.argv[2] with rosbag.Bag(filename) as bag: print(decode_bag(bag)) sys.exit()
#!/usr/bin/env python # -*- coding: utf-8 -*- """Convert a rosbag file to legacy lidar binary format. """ """LIDAR datatype format is: ( timestamp (long), flag (bool saved as int), accelerometer[3] (double), gps[3] (double), distance[LIDAR_NUM_ANGLES] (long), ) 'int' and 'long' are the same size on the raspberry pi (32 bits). """ import sys import rosbag def print_bag(bag): topics = ['/scan', '/flagbutton_pressed'] for message in bag.read_messages(topics=topics): print(message) if __name__ == '__main__': if len(sys.argv) < 2: print(('Usage: {} <rosbag> [<outfile>] \n\n' 'Print contents of rosbag file. If <outfile> is provided, \n' 'write contents of rosbag file to <outfile> in the legacy \n' 'lidar binary format.').format(__file__)) sys.exit(1) outfile = None filename = sys.argv[1] if len(sys.argv) == 3: outfile = sys.argv[2] with rosbag.Bag(filename) as bag: print_bag(bag) sys.exit()
Print out bag contents for lidar and button topics
Print out bag contents for lidar and button topics
Python
bsd-2-clause
oliverlee/antlia
#!/usr/bin/env python # -*- coding: utf-8 -*- """Convert a rosbag file to legacy lidar binary format. """ """LIDAR datatype format is: ( timestamp (long), flag (bool saved as int), accelerometer[3] (double), gps[3] (double), distance[LIDAR_NUM_ANGLES] (long), ) 'int' and 'long' are the same size on the raspberry pi (32 bits). """ import sys import rosbag def print_bag(bag): topics = ['/scan', '/flagbutton_pressed'] for message in bag.read_messages(topics=topics): print(message) if __name__ == '__main__': if len(sys.argv) < 2: print(('Usage: {} <rosbag> [<outfile>] \n\n' 'Print contents of rosbag file. If <outfile> is provided, \n' 'write contents of rosbag file to <outfile> in the legacy \n' 'lidar binary format.').format(__file__)) sys.exit(1) outfile = None filename = sys.argv[1] if len(sys.argv) == 3: outfile = sys.argv[2] with rosbag.Bag(filename) as bag: print_bag(bag) sys.exit()
Print out bag contents for lidar and button topics #!/usr/bin/env python # -*- coding: utf-8 -*- """Convert a rosbag file to legacy lidar binary format. """ """LIDAR datatype format is: ( timestamp (long), flag (bool saved as int), accelerometer[3] (double), gps[3] (double), distance[LIDAR_NUM_ANGLES] (long), ) 'int' and 'long' are the same size on the raspberry pi (32 bits). """ import sys import rosbag def decode_bag(bag): topics = ['/scan', '/flagbutton_pressed'] return [message for message in bag.read_messages(topics=topics)] if __name__ == '__main__': if len(sys.argv) < 2: print(('Usage: {} <rosbag> [<outfile>] \n\n' 'Print contents of rosbag file. If <outfile> is provided, \n' 'write contents of rosbag file to <outfile> in the legacy \n' 'lidar binary format.').format(__file__)) sys.exit(1) outfile = None filename = sys.argv[1] if len(sys.argv) == 3: outfile = sys.argv[2] with rosbag.Bag(filename) as bag: print(decode_bag(bag)) sys.exit()
0c22486320b064c078fe009faf41e2d0c7f5e272
passwordless/views.py
passwordless/views.py
from django.shortcuts import render from django.views.generic.edit import FormView from . import forms # Create your views here. def logout(request): return render(request, 'passwordless/logout.html') def authn(request, token): return render(request, 'passwordless/authn.html') class LoginView(FormView): template_name = 'passwordless/login.html' form_class = forms.LoginForm success_url = '/' def form_valid(self, form): form.send_email() return super().form_valid(form) class RegisterView(FormView): template_name = 'passwordless/register.html' form_class = forms.RegistrationForm success_url = '/' def form_valid(self, form): form.create_user() form.send_email() return super().form_valid(form)
from django.shortcuts import render from django.views.generic.edit import FormView from . import forms # Create your views here. def logout(request): return render(request, 'passwordless/logout.html') def authn(request, token): return render(request, 'passwordless/authn.html') class LoginView(FormView): template_name = 'passwordless/login.html' form_class = forms.LoginForm success_url = '/' def form_valid(self, form): form.send_email() return super().form_valid(form) class RegisterView(LoginView): template_name = 'passwordless/register.html' form_class = forms.RegistrationForm def form_valid(self, form): form.create_user() return super().form_valid(form)
Refactor RegisterView as subclass of LoginView
Refactor RegisterView as subclass of LoginView They share much of the work, they should share the code as well
Python
mit
Kromey/fbxnano,Kromey/akwriters,Kromey/fbxnano,Kromey/akwriters,Kromey/akwriters,Kromey/fbxnano,Kromey/fbxnano,Kromey/akwriters
from django.shortcuts import render from django.views.generic.edit import FormView from . import forms # Create your views here. def logout(request): return render(request, 'passwordless/logout.html') def authn(request, token): return render(request, 'passwordless/authn.html') class LoginView(FormView): template_name = 'passwordless/login.html' form_class = forms.LoginForm success_url = '/' def form_valid(self, form): form.send_email() return super().form_valid(form) class RegisterView(LoginView): template_name = 'passwordless/register.html' form_class = forms.RegistrationForm def form_valid(self, form): form.create_user() return super().form_valid(form)
Refactor RegisterView as subclass of LoginView They share much of the work, they should share the code as well from django.shortcuts import render from django.views.generic.edit import FormView from . import forms # Create your views here. def logout(request): return render(request, 'passwordless/logout.html') def authn(request, token): return render(request, 'passwordless/authn.html') class LoginView(FormView): template_name = 'passwordless/login.html' form_class = forms.LoginForm success_url = '/' def form_valid(self, form): form.send_email() return super().form_valid(form) class RegisterView(FormView): template_name = 'passwordless/register.html' form_class = forms.RegistrationForm success_url = '/' def form_valid(self, form): form.create_user() form.send_email() return super().form_valid(form)
5dae59bc17f0f8a0ef97bbc461eb18c0ea725bc9
config-example.py
config-example.py
# Copy this file to config.py and change the settings. Don't forget to specify your own SECRET_KEY. # The app name will be used in several places. APP_NAME = 'Yelp Love' APP_BASE_URL = 'https://PROJECT_ID.appspot.com/' LOVE_SENDER_EMAIL = 'Yelp Love <love@PROJECT_ID.appspot.com>' # Flask's secret key, used to encrypt the session cookie. # Set this to any random string and make sure not to share this! SECRET_KEY = 'YOUR_SECRET_HERE' # Use default theme THEME = 'default' # Every employee needs a reference to a Google Account. This reference is based on the users # Google Account email address and created when employee data is imported: we take the *username* # and this DOMAIN DOMAIN = 'example.com' # Name of the S3 bucket used to import employee data from a file named employees.json # Check out /import/employees.json.example to see how this file should look like. S3_BUCKET = 'employees'
# Copy this file to config.py and change the settings. Don't forget to specify your own SECRET_KEY. # The app name will be used in several places. APP_NAME = 'Yelp Love' APP_BASE_URL = 'https://PROJECT_ID.appspot.com/' LOVE_SENDER_EMAIL = 'Yelp Love <love@PROJECT_ID.appspotmail.com>' # Flask's secret key, used to encrypt the session cookie. # Set this to any random string and make sure not to share this! SECRET_KEY = 'YOUR_SECRET_HERE' # Use default theme THEME = 'default' # Every employee needs a reference to a Google Account. This reference is based on the users # Google Account email address and created when employee data is imported: we take the *username* # and this DOMAIN DOMAIN = 'example.com' # Name of the S3 bucket used to import employee data from a file named employees.json # Check out /import/employees.json.example to see how this file should look like. S3_BUCKET = 'employees'
Use appspotmail.com instead of appspot.com for email sender
Use appspotmail.com instead of appspot.com for email sender
Python
mit
Yelp/love,Yelp/love,Yelp/love
# Copy this file to config.py and change the settings. Don't forget to specify your own SECRET_KEY. # The app name will be used in several places. APP_NAME = 'Yelp Love' APP_BASE_URL = 'https://PROJECT_ID.appspot.com/' LOVE_SENDER_EMAIL = 'Yelp Love <love@PROJECT_ID.appspotmail.com>' # Flask's secret key, used to encrypt the session cookie. # Set this to any random string and make sure not to share this! SECRET_KEY = 'YOUR_SECRET_HERE' # Use default theme THEME = 'default' # Every employee needs a reference to a Google Account. This reference is based on the users # Google Account email address and created when employee data is imported: we take the *username* # and this DOMAIN DOMAIN = 'example.com' # Name of the S3 bucket used to import employee data from a file named employees.json # Check out /import/employees.json.example to see how this file should look like. S3_BUCKET = 'employees'
Use appspotmail.com instead of appspot.com for email sender # Copy this file to config.py and change the settings. Don't forget to specify your own SECRET_KEY. # The app name will be used in several places. APP_NAME = 'Yelp Love' APP_BASE_URL = 'https://PROJECT_ID.appspot.com/' LOVE_SENDER_EMAIL = 'Yelp Love <love@PROJECT_ID.appspot.com>' # Flask's secret key, used to encrypt the session cookie. # Set this to any random string and make sure not to share this! SECRET_KEY = 'YOUR_SECRET_HERE' # Use default theme THEME = 'default' # Every employee needs a reference to a Google Account. This reference is based on the users # Google Account email address and created when employee data is imported: we take the *username* # and this DOMAIN DOMAIN = 'example.com' # Name of the S3 bucket used to import employee data from a file named employees.json # Check out /import/employees.json.example to see how this file should look like. S3_BUCKET = 'employees'
c9137bdaf551d0e1203120a9c00af60541e3597f
scikits/talkbox/lpc/lpc.py
scikits/talkbox/lpc/lpc.py
#! /usr/bin/env python # Last Change: Sun Sep 14 03:00 PM 2008 J import numpy as np from c_lpc import levinson as c_levinson def levinson(r, order, axis = -1): """Levinson-Durbin recursion, to efficiently solve symmetric linear systems with toeplitz structure. Arguments --------- r : array-like input array to invert (since the matrix is symmetric Toeplitz, the corresponding pxp matrix is defined by p items only). Generally the autocorrelation of the signal for linear prediction coefficients estimation. The first item must be a non zero real, and corresponds to the autocorelation at lag 0 for linear prediction. order : int order of the recursion. For order p, you will get p+1 coefficients. axis : int, optional axis over which the algorithm is applied. -1 by default. Returns -------- a : array-like the solution of the inversion (see notes). e : array-like the prediction error. k : array-like reflection coefficients. Notes ----- Levinson is a well-known algorithm to solve the Hermitian toeplitz equation: _ _ -R[1] = R[0] R[1] ... R[p-1] a[1] : : : : * : : : : _ * : -R[p] = R[p-1] R[p-2] ... R[0] a[p] _ with respect to a ( is the complex conjugate). Using the special symmetry in the matrix, the inversion can be done in O(p^2) instead of O(p^3). """ if axis != -1: r = np.swapaxes(r, -1) a, e, k = c_levinson(r, order) if axis != -1: a = np.swapaxes(a, -1) e = np.swapaxes(e, -1) k = np.swapaxes(k, -1) return a, e, k
Add python interface around C implementation of levinson.
Add python interface around C implementation of levinson.
Python
mit
cournape/talkbox,cournape/talkbox
#! /usr/bin/env python # Last Change: Sun Sep 14 03:00 PM 2008 J import numpy as np from c_lpc import levinson as c_levinson def levinson(r, order, axis = -1): """Levinson-Durbin recursion, to efficiently solve symmetric linear systems with toeplitz structure. Arguments --------- r : array-like input array to invert (since the matrix is symmetric Toeplitz, the corresponding pxp matrix is defined by p items only). Generally the autocorrelation of the signal for linear prediction coefficients estimation. The first item must be a non zero real, and corresponds to the autocorelation at lag 0 for linear prediction. order : int order of the recursion. For order p, you will get p+1 coefficients. axis : int, optional axis over which the algorithm is applied. -1 by default. Returns -------- a : array-like the solution of the inversion (see notes). e : array-like the prediction error. k : array-like reflection coefficients. Notes ----- Levinson is a well-known algorithm to solve the Hermitian toeplitz equation: _ _ -R[1] = R[0] R[1] ... R[p-1] a[1] : : : : * : : : : _ * : -R[p] = R[p-1] R[p-2] ... R[0] a[p] _ with respect to a ( is the complex conjugate). Using the special symmetry in the matrix, the inversion can be done in O(p^2) instead of O(p^3). """ if axis != -1: r = np.swapaxes(r, -1) a, e, k = c_levinson(r, order) if axis != -1: a = np.swapaxes(a, -1) e = np.swapaxes(e, -1) k = np.swapaxes(k, -1) return a, e, k
Add python interface around C implementation of levinson.
410b447f54838e4a28b28aa1a027bd058520d9b0
Python/HARPS-e2ds-to-order.py
Python/HARPS-e2ds-to-order.py
#!/usr/bin/env python # encoding: utf-8 """ HARPS-e2ds-to-order.py Created by Jonathan Whitmore on 2011-10-14. Copyright (c) 2011. All rights reserved. """ import sys import os import argparse import pyfits as pf import numpy as np help_message = ''' Takes reduced HARPS***e2ds_A.fits data and reads the header to output a fits file that has the wavelength per pixel per order. ''' class Usage(Exception): def __init__(self, msg): self.msg = msg def main(argv=None): parser = argparse.ArgumentParser(description='Process input file.') parser.add_argument('f', type=str, help='input a filename') args = parser.parse_args() inputfile = vars(args)['f'] outputFITSfile = str('order_' + inputfile) hdu = pf.open(inputfile) print len(hdu[0].data) polynomialOrder = hdu[0].header['HIERARCH ESO DRS CAL TH DEG LL'] orders = hdu[0].header['HIERARCH ESO DRS CAL LOC NBO'] coefficients = (polynomialOrder + 1) * orders # number of coefficients in the file whatever = [] for x in xrange(coefficients): whatever.append(hdu[0].header['HIERARCH ESO DRS CAL TH COEFF LL' + str(x)]) A = {} for y in xrange(0,len(whatever),polynomialOrder+1): A[y/(polynomialOrder+1)] = whatever[y:y+polynomialOrder+1] for order in range(orders): print "order: ", order wavelength = [] for pixel in xrange(len(hdu[0].data[order])): temp = 0.0 for x in range(polynomialOrder): temp += A[order][x] * pixel ** x wavelength.append(temp) pf.append(outputFITSfile, np.array([np.array(wavelength), hdu[0].data[order], np.sqrt(np.abs(hdu[0].data[order]))])) if __name__ == "__main__": sys.exit(main())
Order by order of HARPS data
Order by order of HARPS data
Python
mit
jbwhit/CaliCompari
#!/usr/bin/env python # encoding: utf-8 """ HARPS-e2ds-to-order.py Created by Jonathan Whitmore on 2011-10-14. Copyright (c) 2011. All rights reserved. """ import sys import os import argparse import pyfits as pf import numpy as np help_message = ''' Takes reduced HARPS***e2ds_A.fits data and reads the header to output a fits file that has the wavelength per pixel per order. ''' class Usage(Exception): def __init__(self, msg): self.msg = msg def main(argv=None): parser = argparse.ArgumentParser(description='Process input file.') parser.add_argument('f', type=str, help='input a filename') args = parser.parse_args() inputfile = vars(args)['f'] outputFITSfile = str('order_' + inputfile) hdu = pf.open(inputfile) print len(hdu[0].data) polynomialOrder = hdu[0].header['HIERARCH ESO DRS CAL TH DEG LL'] orders = hdu[0].header['HIERARCH ESO DRS CAL LOC NBO'] coefficients = (polynomialOrder + 1) * orders # number of coefficients in the file whatever = [] for x in xrange(coefficients): whatever.append(hdu[0].header['HIERARCH ESO DRS CAL TH COEFF LL' + str(x)]) A = {} for y in xrange(0,len(whatever),polynomialOrder+1): A[y/(polynomialOrder+1)] = whatever[y:y+polynomialOrder+1] for order in range(orders): print "order: ", order wavelength = [] for pixel in xrange(len(hdu[0].data[order])): temp = 0.0 for x in range(polynomialOrder): temp += A[order][x] * pixel ** x wavelength.append(temp) pf.append(outputFITSfile, np.array([np.array(wavelength), hdu[0].data[order], np.sqrt(np.abs(hdu[0].data[order]))])) if __name__ == "__main__": sys.exit(main())
Order by order of HARPS data
f61b8cfcbf98da826a847981834763198db42867
setup.py
setup.py
from setuptools import setup, find_packages setup( name='ckanext-archiver', version='0.1', packages=find_packages(), install_requires=[ 'celery==2.4.5', 'kombu==1.5.1', 'kombu-sqlalchemy==1.1.0', 'SQLAlchemy>=0.6.6', 'requests==0.6.4', 'messytables>=0.1.4', 'flask==0.8' # flask needed for tests ], author='Open Knowledge Foundation', author_email='[email protected]', description='Archive ckan resources', long_description='Archive ckan resources', license='MIT', url='http://ckan.org/wiki/Extensions', download_url='', include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules' ], entry_points=''' [paste.paster_command] archiver = ckanext.archiver.commands:Archiver [ckan.plugins] archiver = ckanext.archiver.plugin:ArchiverPlugin [ckan.celery_task] tasks = ckanext.archiver.celery_import:task_imports ''' )
from setuptools import setup, find_packages setup( name='ckanext-archiver', version='0.1', packages=find_packages(), install_requires=[ 'celery==2.4.2', 'kombu==2.1.3', 'kombu-sqlalchemy==1.1.0', 'SQLAlchemy>=0.6.6', 'requests==0.6.4', 'messytables>=0.1.4', 'flask==0.8' # flask needed for tests ], author='Open Knowledge Foundation', author_email='[email protected]', description='Archive ckan resources', long_description='Archive ckan resources', license='MIT', url='http://ckan.org/wiki/Extensions', download_url='', include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules' ], entry_points=''' [paste.paster_command] archiver = ckanext.archiver.commands:Archiver [ckan.plugins] archiver = ckanext.archiver.plugin:ArchiverPlugin [ckan.celery_task] tasks = ckanext.archiver.celery_import:task_imports ''' )
Change celery and kombu requirements to match ckanext-datastorer
Change celery and kombu requirements to match ckanext-datastorer
Python
mit
datagovuk/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver,ckan/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver,datagovuk/ckanext-archiver,ckan/ckanext-archiver,datagovuk/ckanext-archiver,ckan/ckanext-archiver
from setuptools import setup, find_packages setup( name='ckanext-archiver', version='0.1', packages=find_packages(), install_requires=[ 'celery==2.4.2', 'kombu==2.1.3', 'kombu-sqlalchemy==1.1.0', 'SQLAlchemy>=0.6.6', 'requests==0.6.4', 'messytables>=0.1.4', 'flask==0.8' # flask needed for tests ], author='Open Knowledge Foundation', author_email='[email protected]', description='Archive ckan resources', long_description='Archive ckan resources', license='MIT', url='http://ckan.org/wiki/Extensions', download_url='', include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules' ], entry_points=''' [paste.paster_command] archiver = ckanext.archiver.commands:Archiver [ckan.plugins] archiver = ckanext.archiver.plugin:ArchiverPlugin [ckan.celery_task] tasks = ckanext.archiver.celery_import:task_imports ''' )
Change celery and kombu requirements to match ckanext-datastorer from setuptools import setup, find_packages setup( name='ckanext-archiver', version='0.1', packages=find_packages(), install_requires=[ 'celery==2.4.5', 'kombu==1.5.1', 'kombu-sqlalchemy==1.1.0', 'SQLAlchemy>=0.6.6', 'requests==0.6.4', 'messytables>=0.1.4', 'flask==0.8' # flask needed for tests ], author='Open Knowledge Foundation', author_email='[email protected]', description='Archive ckan resources', long_description='Archive ckan resources', license='MIT', url='http://ckan.org/wiki/Extensions', download_url='', include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules' ], entry_points=''' [paste.paster_command] archiver = ckanext.archiver.commands:Archiver [ckan.plugins] archiver = ckanext.archiver.plugin:ArchiverPlugin [ckan.celery_task] tasks = ckanext.archiver.celery_import:task_imports ''' )
6f6199240009ac91da7e663030125df439d8fe7e
tests/test_trust_list.py
tests/test_trust_list.py
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import unittest import sys from oscrypto import trust_list from asn1crypto.x509 import Certificate if sys.version_info < (3,): byte_cls = str else: byte_cls = bytes class TrustListTests(unittest.TestCase): def test_extract_from_system(self): certs = trust_list.extract_from_system() self.assertIsInstance(certs, list) for cert in certs: self.assertIsInstance(cert, byte_cls) _ = Certificate.load(cert).native
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import unittest import sys from oscrypto import trust_list from asn1crypto.x509 import Certificate if sys.version_info < (3,): byte_cls = str else: byte_cls = bytes class TrustListTests(unittest.TestCase): def test_extract_from_system(self): certs = trust_list.extract_from_system() self.assertIsInstance(certs, list) self.assertLess(10, len(certs)) for cert in certs: self.assertIsInstance(cert, byte_cls) _ = Certificate.load(cert).native
Add more sanity checks to the trust list test
Add more sanity checks to the trust list test
Python
mit
wbond/oscrypto
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import unittest import sys from oscrypto import trust_list from asn1crypto.x509 import Certificate if sys.version_info < (3,): byte_cls = str else: byte_cls = bytes class TrustListTests(unittest.TestCase): def test_extract_from_system(self): certs = trust_list.extract_from_system() self.assertIsInstance(certs, list) self.assertLess(10, len(certs)) for cert in certs: self.assertIsInstance(cert, byte_cls) _ = Certificate.load(cert).native
Add more sanity checks to the trust list test # coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import unittest import sys from oscrypto import trust_list from asn1crypto.x509 import Certificate if sys.version_info < (3,): byte_cls = str else: byte_cls = bytes class TrustListTests(unittest.TestCase): def test_extract_from_system(self): certs = trust_list.extract_from_system() self.assertIsInstance(certs, list) for cert in certs: self.assertIsInstance(cert, byte_cls) _ = Certificate.load(cert).native
200f1727f16bcd903554346611afc976846f5896
setup.py
setup.py
#!/usr/bin/env python from distutils.core import setup setup( name='django-payfast', version='0.2.2', author='Mikhail Korobov', author_email='[email protected]', packages=['payfast', 'payfast.south_migrations'], url='http://bitbucket.org/kmike/django-payfast/', download_url = 'http://bitbucket.org/kmike/django-payfast/get/tip.gz', license = 'MIT license', description = 'A pluggable Django application for integrating payfast.co.za payment system.', long_description = open('README.rst').read().decode('utf8'), classifiers=( 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', ), )
#!/usr/bin/env python from distutils.core import setup setup( name='django-payfast', version='0.2.2', author='Mikhail Korobov', author_email='[email protected]', packages=['payfast', 'payfast.south_migrations'], url='http://bitbucket.org/kmike/django-payfast/', download_url = 'http://bitbucket.org/kmike/django-payfast/get/tip.gz', license = 'MIT license', description = 'A pluggable Django application for integrating payfast.co.za payment system.', long_description = open('README.rst').read().decode('utf8'), classifiers=( 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', ), )
Package classifiers: Explicitly target Python 2.7
Package classifiers: Explicitly target Python 2.7
Python
mit
reinbach/django-payfast
#!/usr/bin/env python from distutils.core import setup setup( name='django-payfast', version='0.2.2', author='Mikhail Korobov', author_email='[email protected]', packages=['payfast', 'payfast.south_migrations'], url='http://bitbucket.org/kmike/django-payfast/', download_url = 'http://bitbucket.org/kmike/django-payfast/get/tip.gz', license = 'MIT license', description = 'A pluggable Django application for integrating payfast.co.za payment system.', long_description = open('README.rst').read().decode('utf8'), classifiers=( 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', ), )
Package classifiers: Explicitly target Python 2.7 #!/usr/bin/env python from distutils.core import setup setup( name='django-payfast', version='0.2.2', author='Mikhail Korobov', author_email='[email protected]', packages=['payfast', 'payfast.south_migrations'], url='http://bitbucket.org/kmike/django-payfast/', download_url = 'http://bitbucket.org/kmike/django-payfast/get/tip.gz', license = 'MIT license', description = 'A pluggable Django application for integrating payfast.co.za payment system.', long_description = open('README.rst').read().decode('utf8'), classifiers=( 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', ), )
501a52ae39a63f58e2de2f7f31c6eb82e49f2e0a
comics/comics/hagarthehorrible.py
comics/comics/hagarthehorrible.py
# encoding: utf-8 from comics.aggregator.crawler import ComicsKingdomCrawlerBase from comics.core.comic_data import ComicDataBase class ComicData(ComicDataBase): name = 'Hägar the Horrible' language = 'en' url = 'https://www.comicskingdom.com/hagar-the-horrible' rights = 'Chris Browne' class Crawler(ComicsKingdomCrawlerBase): history_capable_days = 6 schedule = 'Mo,Tu,We,Th,Fr,Sa,Su' time_zone = 'US/Eastern' def crawl(self, pub_date): return self.crawl_helper('hagar-the-horrible', pub_date)
Add crawler for "Hägar the Horrible"
Add crawler for "Hägar the Horrible"
Python
agpl-3.0
datagutten/comics,jodal/comics,datagutten/comics,datagutten/comics,jodal/comics,datagutten/comics,jodal/comics,jodal/comics
# encoding: utf-8 from comics.aggregator.crawler import ComicsKingdomCrawlerBase from comics.core.comic_data import ComicDataBase class ComicData(ComicDataBase): name = 'Hägar the Horrible' language = 'en' url = 'https://www.comicskingdom.com/hagar-the-horrible' rights = 'Chris Browne' class Crawler(ComicsKingdomCrawlerBase): history_capable_days = 6 schedule = 'Mo,Tu,We,Th,Fr,Sa,Su' time_zone = 'US/Eastern' def crawl(self, pub_date): return self.crawl_helper('hagar-the-horrible', pub_date)
Add crawler for "Hägar the Horrible"
fd5ebe9ae938cdf0d586bf3177730619b8b2025a
django_auto_filter/filter_for_models.py
django_auto_filter/filter_for_models.py
from django.conf.urls import patterns, url from django.contrib.auth.decorators import login_required from django_auto_filter.views_django_auto_filter_new import DjangoAutoFilterNew from djangoautoconf.model_utils.model_attr_utils import model_enumerator from ufs_tools.string_tools import class_name_to_low_case def add_filter_to_url_for(urlpatterns, models): for model in model_enumerator(models): urlpatterns += patterns('', url(r'^models/%s/' % class_name_to_low_case(model.__name__), DjangoAutoFilterNew.as_view(model_class=model))) def get_filter_urls(models, template_name=None): url_list = [] for model in model_enumerator(models): param_dict = {"model": model} if template_name is not None: param_dict["template_name"] = template_name url_list.append(url(r'^model/%s/' % class_name_to_low_case(model.__name__), login_required(DjangoAutoFilterNew.as_view(**param_dict)))) p = patterns('', *url_list) return p
from django.conf.urls import patterns, url from django.contrib.auth.decorators import login_required from django_auto_filter.views_django_auto_filter_new import DjangoAutoFilterNew from djangoautoconf.model_utils.model_attr_utils import model_enumerator from ufs_tools.string_tools import class_name_to_low_case def add_filter_to_url_for(urlpatterns, models): for model in model_enumerator(models): urlpatterns += patterns('', url(r'^models/%s/' % class_name_to_low_case(model.__name__), DjangoAutoFilterNew.as_view(model=model))) def get_filter_urls(models, template_name=None): url_list = [] for model in model_enumerator(models): param_dict = {"model": model} if template_name is not None: param_dict["template_name"] = template_name url_list.append(url(r'^model/%s/' % class_name_to_low_case(model.__name__), login_required(DjangoAutoFilterNew.as_view(**param_dict)))) p = patterns('', *url_list) return p
Fix attribute from model_class to model issue.
Fix attribute from model_class to model issue.
Python
bsd-3-clause
weijia/django-auto-filter,weijia/django-auto-filter,weijia/django-auto-filter
from django.conf.urls import patterns, url from django.contrib.auth.decorators import login_required from django_auto_filter.views_django_auto_filter_new import DjangoAutoFilterNew from djangoautoconf.model_utils.model_attr_utils import model_enumerator from ufs_tools.string_tools import class_name_to_low_case def add_filter_to_url_for(urlpatterns, models): for model in model_enumerator(models): urlpatterns += patterns('', url(r'^models/%s/' % class_name_to_low_case(model.__name__), DjangoAutoFilterNew.as_view(model=model))) def get_filter_urls(models, template_name=None): url_list = [] for model in model_enumerator(models): param_dict = {"model": model} if template_name is not None: param_dict["template_name"] = template_name url_list.append(url(r'^model/%s/' % class_name_to_low_case(model.__name__), login_required(DjangoAutoFilterNew.as_view(**param_dict)))) p = patterns('', *url_list) return p
Fix attribute from model_class to model issue. from django.conf.urls import patterns, url from django.contrib.auth.decorators import login_required from django_auto_filter.views_django_auto_filter_new import DjangoAutoFilterNew from djangoautoconf.model_utils.model_attr_utils import model_enumerator from ufs_tools.string_tools import class_name_to_low_case def add_filter_to_url_for(urlpatterns, models): for model in model_enumerator(models): urlpatterns += patterns('', url(r'^models/%s/' % class_name_to_low_case(model.__name__), DjangoAutoFilterNew.as_view(model_class=model))) def get_filter_urls(models, template_name=None): url_list = [] for model in model_enumerator(models): param_dict = {"model": model} if template_name is not None: param_dict["template_name"] = template_name url_list.append(url(r'^model/%s/' % class_name_to_low_case(model.__name__), login_required(DjangoAutoFilterNew.as_view(**param_dict)))) p = patterns('', *url_list) return p