repo_name
stringlengths 5
88
| path
stringlengths 4
199
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 855
832k
| license
stringclasses 15
values | hash
int64 -9,223,128,179,723,874,000
9,223,237,214B
| line_mean
float64 3.5
99
| line_max
int64 15
999
| alpha_frac
float64 0.25
0.87
| autogenerated
bool 1
class | ratio
float64 1.5
7.55
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class | score
float64 0
0.2
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tjsavage/full_nonrel_starter | django/templatetags/static.py | 233 | 2149 | from django import template
from django.utils.encoding import iri_to_uri
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
| bsd-3-clause | 1,365,877,122,325,783,800 | 24.583333 | 66 | 0.579805 | false | 4.197266 | false | false | false | 0.001396 |
jensengrouppsu/rapid | rapid/pyqtgraph/flowchart/FlowchartTemplate_pyside.py | 50 | 2387 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/flowchart/FlowchartTemplate.ui'
#
# Created: Mon Dec 23 10:10:51 2013
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(529, 329)
self.selInfoWidget = QtGui.QWidget(Form)
self.selInfoWidget.setGeometry(QtCore.QRect(260, 10, 264, 222))
self.selInfoWidget.setObjectName("selInfoWidget")
self.gridLayout = QtGui.QGridLayout(self.selInfoWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.selDescLabel = QtGui.QLabel(self.selInfoWidget)
self.selDescLabel.setText("")
self.selDescLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.selDescLabel.setWordWrap(True)
self.selDescLabel.setObjectName("selDescLabel")
self.gridLayout.addWidget(self.selDescLabel, 0, 0, 1, 1)
self.selNameLabel = QtGui.QLabel(self.selInfoWidget)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.selNameLabel.setFont(font)
self.selNameLabel.setText("")
self.selNameLabel.setObjectName("selNameLabel")
self.gridLayout.addWidget(self.selNameLabel, 0, 1, 1, 1)
self.selectedTree = DataTreeWidget(self.selInfoWidget)
self.selectedTree.setObjectName("selectedTree")
self.selectedTree.headerItem().setText(0, "1")
self.gridLayout.addWidget(self.selectedTree, 1, 0, 1, 2)
self.hoverText = QtGui.QTextEdit(Form)
self.hoverText.setGeometry(QtCore.QRect(0, 240, 521, 81))
self.hoverText.setObjectName("hoverText")
self.view = FlowchartGraphicsView(Form)
self.view.setGeometry(QtCore.QRect(0, 0, 256, 192))
self.view.setObjectName("view")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
from ..flowchart.FlowchartGraphicsView import FlowchartGraphicsView
from ..widgets.DataTreeWidget import DataTreeWidget
| mit | 2,436,660,734,330,622,000 | 43.203704 | 111 | 0.697947 | false | 3.633181 | false | false | false | 0.00377 |
EDUlib/edx-platform | common/djangoapps/student/roles.py | 4 | 13523 | """
Classes used to model the roles used in the courseware. Each role is responsible for checking membership,
adding users, removing users, and listing members
"""
import logging
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from opaque_keys.edx.django.models import CourseKeyField
from openedx.core.lib.cache_utils import get_cache
from common.djangoapps.student.models import CourseAccessRole
log = logging.getLogger(__name__)
# A list of registered access roles.
REGISTERED_ACCESS_ROLES = {}
def register_access_role(cls):
"""
Decorator that allows access roles to be registered within the roles module and referenced by their
string values.
Assumes that the decorated class has a "ROLE" attribute, defining its type.
"""
try:
role_name = cls.ROLE
REGISTERED_ACCESS_ROLES[role_name] = cls
except AttributeError:
log.exception("Unable to register Access Role with attribute 'ROLE'.")
return cls
class BulkRoleCache: # lint-amnesty, pylint: disable=missing-class-docstring
CACHE_NAMESPACE = "student.roles.BulkRoleCache"
CACHE_KEY = 'roles_by_user'
@classmethod
def prefetch(cls, users): # lint-amnesty, pylint: disable=missing-function-docstring
roles_by_user = defaultdict(set)
get_cache(cls.CACHE_NAMESPACE)[cls.CACHE_KEY] = roles_by_user
for role in CourseAccessRole.objects.filter(user__in=users).select_related('user'):
roles_by_user[role.user.id].add(role)
users_without_roles = [u for u in users if u.id not in roles_by_user]
for user in users_without_roles:
roles_by_user[user.id] = set()
@classmethod
def get_user_roles(cls, user):
return get_cache(cls.CACHE_NAMESPACE)[cls.CACHE_KEY][user.id]
class RoleCache:
"""
A cache of the CourseAccessRoles held by a particular user
"""
def __init__(self, user):
try:
self._roles = BulkRoleCache.get_user_roles(user)
except KeyError:
self._roles = set(
CourseAccessRole.objects.filter(user=user).all()
)
def has_role(self, role, course_id, org):
"""
Return whether this RoleCache contains a role with the specified role, course_id, and org
"""
return any(
access_role.role == role and
access_role.course_id == course_id and
access_role.org == org
for access_role in self._roles
)
class AccessRole(metaclass=ABCMeta):
"""
Object representing a role with particular access to a resource
"""
@abstractmethod
def has_user(self, user):
"""
Return whether the supplied django user has access to this role.
"""
return False
@abstractmethod
def add_users(self, *users):
"""
Add the role to the supplied django users.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def remove_users(self, *users):
"""
Remove the role from the supplied django users.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.none()
class GlobalStaff(AccessRole):
"""
The global staff role
"""
def has_user(self, user):
return bool(user and user.is_staff)
def add_users(self, *users):
for user in users:
if user.is_authenticated and user.is_active:
user.is_staff = True
user.save()
def remove_users(self, *users):
for user in users:
# don't check is_authenticated nor is_active on purpose
user.is_staff = False
user.save()
def users_with_role(self):
raise Exception("This operation is un-indexed, and shouldn't be used")
class RoleBase(AccessRole):
"""
Roles by type (e.g., instructor, beta_user) and optionally org, course_key
"""
def __init__(self, role_name, org='', course_key=None):
"""
Create role from required role_name w/ optional org and course_key. You may just provide a role
name if it's a global role (not constrained to an org or course). Provide org if constrained to
an org. Provide org and course if constrained to a course. Although, you should use the subclasses
for all of these.
"""
super().__init__()
self.org = org
self.course_key = course_key
self._role_name = role_name
# pylint: disable=arguments-differ
def has_user(self, user, check_user_activation=True):
"""
Check if the supplied django user has access to this role.
Arguments:
user: user to check against access to role
check_user_activation: Indicating whether or not we need to check
user activation while checking user roles
Return:
bool identifying if user has that particular role or not
"""
if check_user_activation and not (user.is_authenticated and user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(user, '_roles'):
# Cache a list of tuples identifying the particular roles that a user has
# Stored as tuples, rather than django models, to make it cheaper to construct objects for comparison
user._roles = RoleCache(user)
return user._roles.has_role(self._role_name, self.course_key, self.org)
def add_users(self, *users):
"""
Add the supplied django users to this role.
"""
# silently ignores anonymous and inactive users so that any that are
# legit get updated.
from common.djangoapps.student.models import CourseAccessRole # lint-amnesty, pylint: disable=redefined-outer-name, reimported
for user in users:
if user.is_authenticated and user.is_active and not self.has_user(user):
entry = CourseAccessRole(user=user, role=self._role_name, course_id=self.course_key, org=self.org)
entry.save()
if hasattr(user, '_roles'):
del user._roles
def remove_users(self, *users):
"""
Remove the supplied django users from this role.
"""
entries = CourseAccessRole.objects.filter(
user__in=users, role=self._role_name, org=self.org, course_id=self.course_key
)
entries.delete()
for user in users:
if hasattr(user, '_roles'):
del user._roles
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
# Org roles don't query by CourseKey, so use CourseKeyField.Empty for that query
if self.course_key is None:
self.course_key = CourseKeyField.Empty
entries = User.objects.filter(
courseaccessrole__role=self._role_name,
courseaccessrole__org=self.org,
courseaccessrole__course_id=self.course_key
)
return entries
class CourseRole(RoleBase):
"""
A named role in a particular course
"""
def __init__(self, role, course_key):
"""
Args:
course_key (CourseKey)
"""
super().__init__(role, course_key.org, course_key)
@classmethod
def course_group_already_exists(self, course_key): # lint-amnesty, pylint: disable=bad-classmethod-argument
return CourseAccessRole.objects.filter(org=course_key.org, course_id=course_key).exists()
def __repr__(self):
return f'<{self.__class__.__name__}: course_key={self.course_key}>'
class OrgRole(RoleBase):
"""
A named role in a particular org independent of course
"""
def __repr__(self):
return f'<{self.__class__.__name__}>'
@register_access_role
class CourseStaffRole(CourseRole):
"""A Staff member of a course"""
ROLE = 'staff'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseInstructorRole(CourseRole):
"""A course Instructor"""
ROLE = 'instructor'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseFinanceAdminRole(CourseRole):
"""A course staff member with privileges to review financial data."""
ROLE = 'finance_admin'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseSalesAdminRole(CourseRole):
"""A course staff member with privileges to perform sales operations. """
ROLE = 'sales_admin'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseBetaTesterRole(CourseRole):
"""A course Beta Tester"""
ROLE = 'beta_testers'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
@register_access_role
class LibraryUserRole(CourseRole):
"""
A user who can view a library and import content from it, but not edit it.
Used in Studio only.
"""
ROLE = 'library_user'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
class CourseCcxCoachRole(CourseRole):
"""A CCX Coach"""
ROLE = 'ccx_coach'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseDataResearcherRole(CourseRole):
"""A Data Researcher"""
ROLE = 'data_researcher'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
class OrgStaffRole(OrgRole):
"""An organization staff member"""
def __init__(self, *args, **kwargs):
super().__init__('staff', *args, **kwargs)
class OrgInstructorRole(OrgRole):
"""An organization instructor"""
def __init__(self, *args, **kwargs):
super().__init__('instructor', *args, **kwargs)
class OrgLibraryUserRole(OrgRole):
"""
A user who can view any libraries in an org and import content from them, but not edit them.
Used in Studio only.
"""
ROLE = LibraryUserRole.ROLE
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
class OrgDataResearcherRole(OrgRole):
"""A Data Researcher"""
ROLE = 'data_researcher'
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseCreatorRole(RoleBase):
"""
This is the group of people who have permission to create new courses (we may want to eventually
make this an org based role).
"""
ROLE = "course_creator_group"
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
@register_access_role
class SupportStaffRole(RoleBase):
"""
Student support team members.
"""
ROLE = "support"
def __init__(self, *args, **kwargs):
super().__init__(self.ROLE, *args, **kwargs)
class UserBasedRole:
"""
Backward mapping: given a user, manipulate the courses and roles
"""
def __init__(self, user, role):
"""
Create a UserBasedRole accessor: for a given user and role (e.g., "instructor")
"""
self.user = user
self.role = role
def has_course(self, course_key):
"""
Return whether the role's user has the configured role access to the passed course
"""
if not (self.user.is_authenticated and self.user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(self.user, '_roles'):
self.user._roles = RoleCache(self.user)
return self.user._roles.has_role(self.role, course_key, course_key.org)
def add_course(self, *course_keys):
"""
Grant this object's user the object's role for the supplied courses
"""
if self.user.is_authenticated and self.user.is_active:
for course_key in course_keys:
entry = CourseAccessRole(user=self.user, role=self.role, course_id=course_key, org=course_key.org)
entry.save()
if hasattr(self.user, '_roles'):
del self.user._roles
else:
raise ValueError("user is not active. Cannot grant access to courses")
def remove_courses(self, *course_keys):
"""
Remove the supplied courses from this user's configured role.
"""
entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)
entries.delete()
if hasattr(self.user, '_roles'):
del self.user._roles
def courses_with_role(self):
"""
Return a django QuerySet for all of the courses with this user x role. You can access
any of these properties on each result record:
* user (will be self.user--thus uninteresting)
* org
* course_id
* role (will be self.role--thus uninteresting)
"""
return CourseAccessRole.objects.filter(role=self.role, user=self.user)
| agpl-3.0 | -3,651,953,646,293,075,000 | 30.303241 | 135 | 0.618502 | false | 4.008002 | false | false | false | 0.001997 |
alhashash/odoo | openerp/addons/base/tests/test_mail_examples.py | 2 | 56685 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
MISC_HTML_SOURCE = """
<font size="2" style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">test1</font>
<div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; font-style: normal; ">
<b>test2</b></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<i>test3</i></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<u>test4</u></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<strike>test5</strike></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">
<font size="5">test6</font></div><div><ul><li><font color="#1f1f1f" face="monospace" size="2">test7</font></li><li>
<font color="#1f1f1f" face="monospace" size="2">test8</font></li></ul><div><ol><li><font color="#1f1f1f" face="monospace" size="2">test9</font>
</li><li><font color="#1f1f1f" face="monospace" size="2">test10</font></li></ol></div></div>
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><div><div><font color="#1f1f1f" face="monospace" size="2">
test11</font></div></div></div></blockquote><blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;">
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><font color="#1f1f1f" face="monospace" size="2">
test12</font></div><div><font color="#1f1f1f" face="monospace" size="2"><br></font></div></blockquote></blockquote>
<font color="#1f1f1f" face="monospace" size="2"><a href="http://google.com">google</a></font>
<a href="javascript:alert('malicious code')">test link</a>
"""
EDI_LIKE_HTML_SOURCE = """<div style="font-family: 'Lucica Grande', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF; ">
<p>Hello ${object.partner_id.name},</p>
<p>A new invoice is available for you: </p>
<p style="border-left: 1px solid #8e0000; margin-left: 30px;">
<strong>REFERENCES</strong><br />
Invoice number: <strong>${object.number}</strong><br />
Invoice total: <strong>${object.amount_total} ${object.currency_id.name}</strong><br />
Invoice date: ${object.date_invoice}<br />
Order reference: ${object.origin}<br />
Your contact: <a href="mailto:${object.user_id.email or ''}?subject=Invoice%20${object.number}">${object.user_id.name}</a>
</p>
<br/>
<p>It is also possible to directly pay with Paypal:</p>
<a style="margin-left: 120px;" href="${object.paypal_url}">
<img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"/>
</a>
<br/>
<p>If you have any question, do not hesitate to contact us.</p>
<p>Thank you for choosing ${object.company_id.name or 'us'}!</p>
<br/>
<br/>
<div style="width: 375px; margin: 0px; padding: 0px; background-color: #8E0000; border-top-left-radius: 5px 5px; border-top-right-radius: 5px 5px; background-repeat: repeat no-repeat;">
<h3 style="margin: 0px; padding: 2px 14px; font-size: 12px; color: #DDD;">
<strong style="text-transform:uppercase;">${object.company_id.name}</strong></h3>
</div>
<div style="width: 347px; margin: 0px; padding: 5px 14px; line-height: 16px; background-color: #F2F2F2;">
<span style="color: #222; margin-bottom: 5px; display: block; ">
${object.company_id.street}<br/>
${object.company_id.street2}<br/>
${object.company_id.zip} ${object.company_id.city}<br/>
${object.company_id.state_id and ('%s, ' % object.company_id.state_id.name) or ''} ${object.company_id.country_id.name or ''}<br/>
</span>
<div style="margin-top: 0px; margin-right: 0px; margin-bottom: 0px; margin-left: 0px; padding-top: 0px; padding-right: 0px; padding-bottom: 0px; padding-left: 0px; ">
Phone: ${object.company_id.phone}
</div>
<div>
Web : <a href="${object.company_id.website}">${object.company_id.website}</a>
</div>
</div>
</div></body></html>"""
OERP_WEBSITE_HTML_1 = """
<div>
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb16">
<h2>OpenERP HR Features</h2>
<h3 class="text-muted">Manage your company most important asset: People</h3>
</div>
<div class="col-md-4">
<img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg">
<h4 class="mt16">Streamline Recruitments</h4>
<p>Post job offers and keep track of each application received. Follow applicants in your recruitment process with the smart kanban view.</p>
<p>Save time by automating some communications with email templates. Resumes are indexed automatically, allowing you to easily find for specific profiles.</p>
</div>
<div class="col-md-4">
<img class="img-rounded img-responsive" src="/website/static/src/img/desert_thumb.jpg">
<h4 class="mt16">Enterprise Social Network</h4>
<p>Break down information silos. Share knowledge and best practices amongst all employees. Follow specific people or documents and join groups of interests to share expertise and documents.</p>
<p>Interact with your collegues in real time with live chat.</p>
</div>
<div class="col-md-4">
<img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg">
<h4 class="mt16">Leaves Management</h4>
<p>Keep track of the vacation days accrued by each employee. Employees enter their requests (paid holidays, sick leave, etc), for managers to approve and validate. It's all done in just a few clicks. The agenda of each employee is updated accordingly.</p>
</div>
</div>
</div>
</div>"""
OERP_WEBSITE_HTML_1_IN = [
'Manage your company most important asset: People',
'img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg"',
]
OERP_WEBSITE_HTML_1_OUT = [
'Break down information silos.',
'Keep track of the vacation days accrued by each employee',
'img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg',
]
OERP_WEBSITE_HTML_2 = """
<div class="mt16 cke_widget_editable cke_widget_element oe_editable oe_dirty" data-oe-model="blog.post" data-oe-id="6" data-oe-field="content" data-oe-type="html" data-oe-translate="0" data-oe-expression="blog_post.content" data-cke-widget-data="{}" data-cke-widget-keep-attr="0" data-widget="oeref" contenteditable="true" data-cke-widget-editable="text">
<section class="mt16 mb16">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32">
<h2>
OpenERP Project Management
</h2>
<h3 class="text-muted">Infinitely flexible. Incredibly easy to use.</h3>
</div>
<div class="col-md-12 mb16 mt16">
<p>
OpenERP's <b>collaborative and realtime</b> project
management helps your team get work done. Keep
track of everything, from the big picture to the
minute details, from the customer contract to the
billing.
</p><p>
Organize projects around <b>your own processes</b>. Work
on tasks and issues using the kanban view, schedule
tasks using the gantt chart and control deadlines
in the calendar view. Every project may have it's
own stages allowing teams to optimize their job.
</p>
</div>
</div>
</div>
</section>
<section class="">
<div class="container">
<div class="row">
<div class="col-md-6 mt16 mb16">
<img class="img-responsive shadow" src="/website/static/src/img/image_text.jpg">
</div>
<div class="col-md-6 mt32">
<h3>Manage Your Shops</h3>
<p>
OpenERP's Point of Sale introduces a super clean
interface with no installation required that runs
online and offline on modern hardwares.
</p><p>
It's full integration with the company inventory
and accounting, gives you real time statistics and
consolidations amongst all shops without the hassle
of integrating several applications.
</p>
</div>
</div>
</div>
</section>
<section class="">
<div class="container">
<div class="row">
<div class="col-md-6 mt32">
<h3>Enterprise Social Network</h3>
<p>
Make every employee feel more connected and engaged
with twitter-like features for your own company. Follow
people, share best practices, 'like' top ideas, etc.
</p><p>
Connect with experts, follow what interests you, share
documents and promote best practices with OpenERP
Social application. Get work done with effective
collaboration across departments, geographies
and business applications.
</p>
</div>
<div class="col-md-6 mt16 mb16">
<img class="img-responsive shadow" src="/website/static/src/img/text_image.png">
</div>
</div>
</div>
</section><section class="">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32">
<h2>Our Porfolio</h2>
<h4 class="text-muted">More than 500 successful projects</h4>
</div>
<div class="col-md-4">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
</div>
<div class="col-md-4">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
</div>
<div class="col-md-4">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/landscape.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
</div>
</div>
</div>
</section>
</div>
"""
OERP_WEBSITE_HTML_2_IN = [
'management helps your team get work done',
]
OERP_WEBSITE_HTML_2_OUT = [
'Make every employee feel more connected',
'img class="img-responsive shadow" src="/website/static/src/img/text_image.png',
]
TEXT_1 = """I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature"""
TEXT_1_IN = ["""I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
TEXT_1_OUT = ["""--
MySignature"""]
TEXT_2 = """Salut Raoul!
Le 28 oct. 2012 à 00:02, Raoul Grosbedon a écrit :
> I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)
Of course. This seems viable.
> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> [email protected]
>>
>
>
> --
> RaoulSignature
Bert TARTOPOILS
[email protected]
"""
TEXT_2_IN = ["Salut Raoul!", "Of course. This seems viable."]
TEXT_2_OUT = ["I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)",
"""> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> [email protected]
>>
>
>
> --
> RaoulSignature"""]
HTML_1 = """<p>I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature</p>"""
HTML_1_IN = ["""I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
HTML_1_OUT = ["""--
MySignature"""]
HTML_2 = """<div>
<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>
</div>
<div>
<ul>
<li><span>9 AM: brainstorming about our new amazing business app</span></li>
<li><span>9.45 AM: summary</span></li>
<li><span>10 AM: meeting with Fabien to present our app</span></li>
</ul>
</div>
<div>
<font><span>Is everything ok for you ?</span></font>
</div>"""
HTML_2_IN = ["<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>",
"<li><span>9 AM: brainstorming about our new amazing business app</span></li>",
"<li><span>9.45 AM: summary</span></li>",
"<li><span>10 AM: meeting with Fabien to present our app</span></li>",
"<font><span>Is everything ok for you ?</span></font>"]
HTML_2_OUT = []
HTML_3 = """<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>
<pre>Hi,
My CRM-related question.
Regards,
XXXX</pre></div>"""
HTML_3_IN = ["""<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>"""]
HTML_3_OUT = ["Hi,", "My CRM-related question.",
"Regards,"]
HTML_4 = """
<div>
<div>Hi Nicholas,</div>
<br>
<div>I'm free now. 00447710085916.</div>
<br>
<div>Regards,</div>
<div>Nicholas</div>
<br>
<span id="OLK_SRC_BODY_SECTION">
<div style="font-family:Calibri; font-size:11pt; text-align:left; color:black; BORDER-BOTTOM: medium none; BORDER-LEFT: medium none; PADDING-BOTTOM: 0in; PADDING-LEFT: 0in; PADDING-RIGHT: 0in; BORDER-TOP: #b5c4df 1pt solid; BORDER-RIGHT: medium none; PADDING-TOP: 3pt">
<span style="font-weight:bold">From: </span>OpenERP Enterprise <<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Reply-To: </span><<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Date: </span>Wed, 17 Apr 2013 13:30:47 +0000<br><span style="font-weight:bold">To: </span>Microsoft Office User <<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Subject: </span>Re: your OpenERP.com registration<br>
</div>
<br>
<div>
<p>Hello Nicholas Saxlund, </p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ?
</p>
<p>Best regards, </p>
<pre><a href="http://openerp.com">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</span>
</div>"""
HTML_5 = """<div><pre>Hi,
I have downloaded OpenERP installer 7.0 and successfully installed the postgresql server and the OpenERP.
I created a database and started to install module by log in as administrator.
However, I was not able to install any module due to "OpenERP Server Error" as shown in the attachement.
Could you please let me know how could I fix this problem?
Regards,
Goh Sin Yih
________________________________
From: OpenERP Enterprise <[email protected]>
To: [email protected]
Sent: Friday, February 8, 2013 12:46 AM
Subject: Feedback From Your OpenERP Trial
Hello Goh Sin Yih,
Thank you for having tested OpenERP Online.
I noticed you started a trial of OpenERP Online (gsy) but you did not decide to keep using it.
So, I just wanted to get in touch with you to get your feedback. Can you tell me what kind of application you were you looking for and why you didn't decide to continue with OpenERP?
Thanks in advance for providing your feedback,
Do not hesitate to contact me if you have any questions,
Thanks,
</pre>"""
GMAIL_1 = """Hello,<div><br></div><div>Ok for me. I am replying directly in gmail, without signature.</div><div><br></div><div>Kind regards,</div><div><br></div><div>Demo.<br><br><div>On Thu, Nov 8, 2012 at 5:29 PM, <span><<a href="mailto:[email protected]">[email protected]</a>></span> wrote:<br><blockquote><div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div><div><ul><li>9 AM: brainstorming about our new amazing business app</span></li></li>
<li>9.45 AM: summary</li><li>10 AM: meeting with Fabien to present our app</li></ul></div><div>Is everything ok for you ?</div>
<div><p>--<br>Administrator</p></div>
<div><p>Log in our portal at: <a href="http://localhost:8069#action=login&db=mail_1&login=demo">http://localhost:8069#action=login&db=mail_1&login=demo</a></p></div>
</blockquote></div><br></div>"""
GMAIL_1_IN = ['Ok for me. I am replying directly in gmail, without signature.']
GMAIL_1_OUT = ['Administrator', 'Log in our portal at:']
THUNDERBIRD_1 = """<div>On 11/08/2012 05:29 PM,
<a href="mailto:[email protected]">[email protected]</a> wrote:<br></div>
<blockquote>
<div>I contact you about our meeting for tomorrow. Here is the
schedule I propose:</div>
<div>
<ul><li>9 AM: brainstorming about our new amazing business
app</span></li></li>
<li>9.45 AM: summary</li>
<li>10 AM: meeting with Fabien to present our app</li>
</ul></div>
<div>Is everything ok for you ?</div>
<div>
<p>--<br>
Administrator</p>
</div>
<div>
<p>Log in our portal at:
<a href="http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH">http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH</a></p>
</div>
</blockquote>
Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.<br><br>
Did you receive my email about my new laptop, by the way ?<br><br>
Raoul.<br><pre>--
Raoul Grosbedonnée
</pre>"""
THUNDERBIRD_1_IN = ['Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.']
THUNDERBIRD_1_OUT = ['I contact you about our meeting for tomorrow.', 'Raoul Grosbedon']
HOTMAIL_1 = """<div>
<div dir="ltr"><br>
I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly.
<br> <br>Kindest regards,<br>xxx<br>
<div>
<div id="SkyDrivePlaceholder">
</div>
<hr id="stopSpelling">
Subject: Re: your OpenERP.com registration<br>From: [email protected]<br>To: [email protected]<br>Date: Wed, 27 Mar 2013 17:12:12 +0000
<br><br>
Hello xxx,
<br>
I noticed you recently created an OpenERP.com account to access OpenERP Apps.
<br>
You indicated that you wish to use OpenERP in your own company.
We would like to know more about your your business needs and requirements, and see how
we can help you. When would you be available to discuss your project ?<br>
Best regards,<br>
<pre>
<a href="http://openerp.com" target="_blank">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</div>
</div>"""
HOTMAIL_1_IN = ["I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly."]
HOTMAIL_1_OUT = ["Subject: Re: your OpenERP.com registration", " I noticed you recently created an OpenERP.com account to access OpenERP Apps.",
"We would like to know more about your your business needs and requirements", "Belgium: +32.81.81.37.00"]
MSOFFICE_1 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.
We are a company of 25 engineers providing product design services to clients.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I’ll install on a windows server and run a very limited trial to see how it works.
If we adopt OpenERP we will probably move to Linux or look for a hosted SaaS option.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
<br>
I am also evaluating Adempiere and maybe others.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I expect the trial will take 2-3 months as this is not a high priority for us.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Alan
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
From:
</span></b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
OpenERP Enterprise [mailto:[email protected]]
<br><b>Sent:</b> Monday, 11 March, 2013 14:47<br><b>To:</b> Alan Widmer<br><b>Subject:</b> Re: your OpenERP.com registration
</span>
</p>
<p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Alan Widmer, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>
Uou mentioned you wish to use OpenERP in your own company. Please let me more about your
business needs and requirements? When will you be available to discuss about your project?
</p>
<p></p>
<p>Thanks for your interest in OpenERP, </p>
<p></p>
<p>Feel free to contact me if you have any questions, </p>
<p></p>
<p>Looking forward to hear from you soon. </p>
<p></p>
<pre><p> </p></pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre><a href="http://openerp.com">http://openerp.com</a><p></p></pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_1_IN = ['Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.']
MSOFFICE_1_OUT = ['I noticed you recently downloaded OpenERP.', 'Uou mentioned you wish to use OpenERP in your own company.', 'Belgium: +32.81.81.37.00']
MSOFFICE_2 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Nicolas,</span></p><p></p>
<p></p>
<p class="MsoNormal" style="text-indent:.5in">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">We are currently investigating the possibility of moving away from our current ERP </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Thank You</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Matt</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Raoul Petitpoil</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Poil Industries</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Information Technology</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">920 Super Street</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Sanchez, Pa 17046 USA</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Tel: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Fax: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Email: </span>
<a href="mailto:[email protected]">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:blue">[email protected]</span>
</a>
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.poilindustries.com</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.superproducts.com</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">From:</span>
</b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:[email protected]] <br><b>Sent:</b> Wednesday, April 17, 2013 1:31 PM<br><b>To:</b> Matt Witters<br><b>Subject:</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul Petitpoil, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_2_IN = ['We are currently investigating the possibility']
MSOFFICE_2_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
MSOFFICE_3 = """<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Hi Nicolas !</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Yes I’d be glad to hear about your offers as we struggle every year with the planning/approving of LOA. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">I saw your boss yesterday on tv and immediately wanted to test the interface. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Bien à vous, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Met vriendelijke groeten, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Best regards,</span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">
</span></b></p><p><b> </b></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">R. Petitpoil <br></span>
</b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Human Resource Manager<b><br><br>Field Resource s.a n.v. <i> <br></i></b>Hermesstraat 6A <br>1930 Zaventem</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:gray"><br></span>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:Wingdings;color:#1F497D">(</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:Wingdings;color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">xxx.xxx </span>
</b>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:gray"><br></span>
</b>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Wingdings 2";color:#1F497D">7</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:"Wingdings 2";color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">+32 2 727.05.91<br></span>
</b>
<span lang="EN-GB" style="font-size:24.0pt;font-family:Webdings;color:green">P</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:green"> <b> </b></span>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:green">Please consider the environment before printing this email.</span>
</b>
<span lang="EN-GB" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:navy"> </span>
<span lang="EN-GB" style="font-family:"Calibri","sans-serif";color:navy">
</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal">
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span>
</b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:[email protected]] <br><b>Envoyé :</b> jeudi 18 avril 2013 11:31<br><b>À :</b> Paul Richard<br><b>Objet :</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul PETITPOIL, </p>
<p></p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_3_IN = ['I saw your boss yesterday']
MSOFFICE_3_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
# ------------------------------------------------------------
# Test cases coming from bugs
# ------------------------------------------------------------
# bug: read more not apparent, strange message in read more span
BUG1 = """<pre>Hi Migration Team,
Paragraph 1, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 2, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 3, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Thanks.
Regards,
--
Olivier Laurent
Migration Manager
OpenERP SA
Chaussée de Namur, 40
B-1367 Gérompont
Tel: +32.81.81.37.00
Web: http://www.openerp.com</pre>"""
BUG_1_IN = [
'Hi Migration Team',
'Paragraph 1'
]
BUG_1_OUT = [
'Olivier Laurent',
'Chaussée de Namur',
'81.81.37.00',
'openerp.com',
]
BUG2 = """
<div>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Original Message --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Subject:
</th>
<td>Fwd: TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date: </th>
<td>Wed, 16 Oct 2013 14:11:13 +0200</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">From: </th>
<td>Christine Herrmann <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">To: </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:[email protected]">[email protected]</a></td>
</tr>
</tbody>
</table>
<br>
<br>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Message original --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Sujet:
</th>
<td>TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date :
</th>
<td>Wed, 16 Oct 2013 10:34:45 -0000</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">De : </th>
<td>Ida Siwatala <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Répondre
à : </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:[email protected]">[email protected]</a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Pour :
</th>
<td>Christine Herrmann (che) <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
</tbody>
</table>
<br>
<br>
<div>
<div class="WordSection1">
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonjour,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<div>
<div style="border:none;border-top:solid #B5C4DF
1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
Ida Siwatala [<a class="moz-txt-link-freetext" href="mailto:[email protected]">mailto:[email protected]</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 20:03<br>
<b>À :</b> 'Followers of
INZO-services-8-all-e-Maxime-Lisbonne-77176-Savigny-le-temple-France'<br>
<b>Objet :</b> RE: OpenERP S.A. Payment Reminder</span></p>
</div>
</div>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonsoir,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Je
me permets de revenir vers vous par écrit , car j’ai
fait 2 appels vers votre service en exposant mon
problème, mais je n’ai pas eu de retour.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cela
fait un mois que j’ai fait la souscription de votre
produit, mais je me rends compte qu’il est pas adapté à
ma situation ( fonctionnalité manquante et surtout je
n’ai pas beaucoup de temps à passer à résoudre des
bugs). </span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">C’est
pourquoi , j’ai demandé qu’un accord soit trouvé avec
vous pour annuler le contrat (tout en vous payant le
mois d’utilisation de septembre).</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Ida
Siwatala</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
<a href="mailto:[email protected]">[email protected]</a>
[<a href="mailto:[email protected]">mailto:[email protected]</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 17:41<br>
<b>À :</b> <a href="mailto:[email protected]">[email protected]</a><br>
<b>Objet :</b> OpenERP S.A. Payment Reminder</span></p>
<p> </p>
<div>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Dear
INZO services,</span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Exception
made if there was a mistake of ours, it seems that the
following amount stays unpaid. Please, take
appropriate measures in order to carry out this
payment in the next 8 days. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"></span></p>
<p> </p>
<table class="MsoNormalTable" style="width:100.0%;border:outset 1.5pt" width="100%" border="1" cellpadding="0">
<tbody>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Date de facturation</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Description</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Reference</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Due Date</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Amount (€)</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Lit.</p>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013/1121</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>Enterprise - Inzo Services
- Juillet 2013</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>420.0</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align:center;background:white" align="center"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Amount
due : 420.00 € </span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Would
your payment have been carried out after this mail was
sent, please ignore this message. Do not hesitate to
contact our accounting department. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"><br>
Best Regards, <br>
Aurore Lesage <br>
OpenERP<br>
Chaussée de Namur, 40 <br>
B-1367 Grand Rosières <br>
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01 <br>
E-mail : <a href="mailto:[email protected]">[email protected]</a> <br>
Web: <a href="http://www.openerp.com">http://www.openerp.com</a></span></p>
</div>
</div>
</div>
--<br>
INZO services <small>Sent by <a style="color:inherit" href="http://www.openerp.com">OpenERP
S.A.</a> using <a style="color:inherit" href="https://www.openerp.com/">OpenERP</a>.</small>
<small>Access your messages and documents <a style="color:inherit" href="https://accounts.openerp.com?db=openerp#action=mail.action_mail_redirect&login=che&message_id=5750830">in
OpenERP</a></small> <br>
<pre class="moz-signature" cols="72">--
Christine Herrmann
OpenERP
Chaussée de Namur, 40
B-1367 Grand Rosières
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01
Web: <a class="moz-txt-link-freetext" href="http://www.openerp.com">http://www.openerp.com</a> </pre>
<br>
</div>
<br>
<br>
</div>
<br>
</div>"""
BUG_2_IN = [
'read more',
'...',
]
BUG_2_OUT = [
'Fwd: TR: OpenERP S.A'
'fait un mois'
]
# BUG 20/08/2014: READ MORE NOT APPEARING
BUG3 = """<div class="oe_msg_body_long" style="/* display: none; */"><p>OpenERP has been upgraded to version 8.0.</p>
<h2>What's new in this upgrade?</h2>
<div class="document">
<ul>
<li><p class="first">New Warehouse Management System:</p>
<blockquote>
<p>Schedule your picking, packing, receptions and internal moves automatically with Odoo using
your own routing rules. Define push and procurement rules to organize a warehouse or to manage
product moves between several warehouses. Track in detail all stock moves, not only in your
warehouse but wherever else it's taken as well (customers, suppliers or manufacturing
locations).</p>
</blockquote>
</li>
<li><p class="first">New Product Configurator</p>
</li>
<li><p class="first">Documentation generation from website forum:</p>
<blockquote>
<p>New module to generate a documentation from questions and responses from your forum.
The documentation manager can define a table of content and any user, depending their karma,
can link a question to an entry of this TOC.</p>
</blockquote>
</li>
<li><p class="first">New kanban view of documents (resumes and letters in recruitement, project documents...)</p>
</li>
<li><p class="first">E-Commerce:</p>
<blockquote>
<ul class="simple">
<li>Manage TIN in contact form for B2B.</li>
<li>Dedicated salesteam to easily manage leads and orders.</li>
</ul>
</blockquote>
</li>
<li><p class="first">Better Instant Messaging.</p>
</li>
<li><p class="first">Faster and Improved Search view: Search drawer now appears on top of the results, and is open
by default in reporting views</p>
</li>
<li><p class="first">Improved User Interface:</p>
<blockquote>
<ul class="simple">
<li>Popups has changed to be more responsive on tablets and smartphones.</li>
<li>New Stat Buttons: Forms views have now dynamic buttons showing some statistics abouts linked models.</li>
<li>Color code to check in one look availability of components in an MRP order.</li>
<li>Unified menu bar allows you to switch easily between the frontend (website) and backend</li>
<li>Results panel is now scrollable independently of the menu bars, keeping the navigation,
search bar and view switcher always within reach.</li>
</ul>
</blockquote>
</li>
<li><p class="first">User signature is now in HTML.</p>
</li>
<li><p class="first">New development API.</p>
</li>
<li><p class="first">Remove support for Outlook and Thunderbird plugins</p>
</li>
</ul>
</div>
<p>Enjoy the new OpenERP Online!</p><span class="oe_mail_reduce"><a href="#">read less</a></span></div>"""
BUG_3_IN = [
'read more',
'...',
]
BUG_3_OUT = [
'New kanban view of documents'
]
| agpl-3.0 | -3,023,002,650,581,860,400 | 47.196596 | 564 | 0.5886 | false | 3.14809 | true | false | false | 0.00505 |
fedepad/espressopp | src/tools/units.py | 7 | 3518 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
*****************************
units - convert to real units
*****************************
Espresso++ returns temperature, energy, pressure, box length etc. in dimensionless units. Usually user should take care about real length, energy, mass and charge units. This python class is a helper in order to simplify the conversion which is based on basic units. However, user always should use it carefully for complicated systems.
Currently it is implemented for SI units. Make sure that you are using
length in [nm]
energy in [kJ/mol]
mass in [amu]
q in [e]
and it will return you
pressure in [bar]
temperature in [K]
time in [ps]
density in [kg/m^3]
Example:
'''
import espressopp
import math
kB = 1.3806488 * pow(10,-23) # m^2 * kg * s^-2 * K^-1
Na = 6.0221413 * pow(10, 23) # mol^-1
amu = 1.6605389 #* pow(10,-27)
class Real_Units:
def __init__(self, _length, _energy, _mass, _charge):
self.length_factor = _length
self.energy_factor = _energy
self.mass_factor = _mass
self.charge_factor = _charge
self.pressure_factor = self.energy_factor / pow(self.length_factor, 3)
self.temperature_factor = self.energy_factor / (kB * Na) * 1000
self.time_factor = self.length_factor * math.sqrt( self.mass_factor / self.energy_factor)
self.density_factor = self.mass_factor * amu / pow(self.length_factor, 3)
def length(self, dl_length):
return dl_length * self.length_factor
def energy(self, dl_energy):
return dl_energy * self.energy_factor
def mass(self, dl_mass):
return dl_mass * self.mass_factor
def charge(self, dl_charge):
return dl_charge * self.charge_factor
def pressure(self, dl_pressure):
return dl_pressure * self.pressure_factor
def temperature(self, dl_temperature):
return dl_temperature * self.temperature_factor
def time(self, dl_time):
return dl_time * self.time_factor
def density(self, dl_density):
return dl_density * self.density_factor
# the other way arround
def dl_length(self, dl_length):
return dl_length / self.length_factor
def dl_energy(self, energy):
return energy / self.energy_factor
def dl_mass(self, mass):
return mass / self.mass_factor
def dl_charge(self, charge):
return charge / self.charge_factor
def dl_pressure(self, pressure):
return pressure / self.pressure_factor
def dl_temperature(self, temperature):
return temperature / self.temperature_factor
def dl_time(self, time):
return time / self.time_factor
def dl_density(self, density):
return density / self.density_factor
| gpl-3.0 | 8,548,672,955,802,156,000 | 30.132743 | 338 | 0.679648 | false | 3.514486 | false | false | false | 0.015065 |
wearpants/osf.io | website/project/sanctions.py | 10 | 37178 | import datetime
import functools
from dateutil.parser import parse as parse_date
from modularodm import (
fields,
Q,
)
from modularodm.exceptions import NoResultsFound
from modularodm.validators import MaxLengthValidator
from framework.auth import Auth
from framework.exceptions import PermissionsError
from framework.mongo import (
ObjectId,
StoredObject,
validators,
)
from website import (
mails,
settings,
tokens,
)
from website.exceptions import (
InvalidSanctionApprovalToken,
InvalidSanctionRejectionToken,
)
from website.prereg import utils as prereg_utils
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
class Sanction(StoredObject):
"""Sanction class is a generic way to track approval states"""
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
# Neither approved not cancelled
UNAPPROVED = 'unapproved'
# Has approval
APPROVED = 'approved'
# Rejected by at least one person
REJECTED = 'rejected'
# Embargo has been completed
COMPLETED = 'completed'
state = fields.StringField(
default=UNAPPROVED,
validate=validators.choice_in((
UNAPPROVED,
APPROVED,
REJECTED,
COMPLETED,
))
)
DISPLAY_NAME = 'Sanction'
# SHORT_NAME must correspond with the associated foreign field to query against,
# e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction))
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
# Controls whether or not the Sanction needs unanimous approval or just a single approval
ANY = 'any'
UNANIMOUS = 'unanimous'
mode = UNANIMOUS
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
# Expiration date-- Sanctions in the UNAPPROVED state that are older than their end_date
# are automatically made ACTIVE by a daily cron job
# Use end_date=None for a non-expiring Sanction
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
def __repr__(self):
return '<Sanction(end_date={self.end_date!r}) with _id {self._id!r}>'.format(self=self)
@property
def is_pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def approve(self, user):
raise NotImplementedError('Sanction subclasses must implement an approve method.')
def reject(self, user):
raise NotImplementedError('Sanction subclasses must implement an approve method.')
def _on_reject(self, user):
"""Callback for rejection of a Sanction
:param User user:
"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""Callback for when a Sanction has approval and enters the ACTIVE state
:param User user:
"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def forcibly_reject(self):
self.state = Sanction.REJECTED
class TokenApprovableSanction(Sanction):
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
def _validate_authorizer(self, user):
"""Subclasses may choose to provide extra restrictions on who can be an authorizer
:return Boolean: True if user is allowed to be an authorizer else False
"""
return True
def add_authorizer(self, user, node, approved=False, save=False):
"""Add an admin user to this Sanction's approval state.
:param User user: User to add.
:param Node registration: The pending registration node.
:param bool approved: Whether `user` has approved.
:param bool save: Whether to save this object.
"""
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'node_id': node._id,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user, save=False):
"""Remove a user as an authorizer
:param User user:
:return Boolean: True if user is removed else False
"""
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
if save:
self.save()
return True
def _on_approve(self, user, token):
"""Callback for when a single user approves a Sanction. Calls #_on_complete under two conditions:
- mode is ANY and the Sanction has not already been cancelled
- mode is UNANIMOUS and all users have given approval
:param User user:
:param str token: user's approval token
"""
if self.mode == self.ANY or all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def token_for_user(self, user, method):
"""
:param str method: 'approval' | 'rejection'
"""
try:
user_state = self.approval_state[user._id]
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
return user_state['{0}_token'.format(method)]
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user)
def _notify_authorizer(self, user, node):
pass
def _notify_non_authorizer(self, user, node):
pass
def ask(self, group):
"""
:param list group: List of (user, node) tuples containing contributors to notify about the
sanction.
"""
for contrib, node in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib, node)
else:
self._notify_non_authorizer(contrib, node)
class EmailApprovableSanction(TokenApprovableSanction):
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# A flag to conditionally run a callback on complete
notify_initiator_on_complete = fields.BooleanField(default=False)
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id, node):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id, node))
def _view_url_context(self, user_id, node):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, node, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer, node):
context = self._email_template_context(authorizer, node, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user, node):
context = self._email_template_context(user, node)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, node, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, node, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id, node),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
def _notify_initiator(self):
raise NotImplementedError
def _on_complete(self, *args):
if self.notify_initiator_on_complete:
self._notify_initiator()
class PreregCallbackMixin(object):
def _notify_initiator(self):
from website.project.model import DraftRegistration
registration = self._get_registration()
prereg_schema = prereg_utils.get_prereg_schema()
draft = DraftRegistration.find_one(
Q('registered_node', 'eq', registration)
)
if prereg_schema in registration.registered_schema:
mails.send_mail(
draft.initiator.username,
mails.PREREG_CHALLENGE_ACCEPTED,
user=draft.initiator,
registration_url=registration.absolute_url,
mimetype='html'
)
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
registration = self._get_registration()
prereg_schema = prereg_utils.get_prereg_schema()
if prereg_schema in registration.registered_schema:
return {
'custom_message': ' as part of the Preregistration Challenge (https://cos.io/prereg)'
}
else:
return {}
class Embargo(PreregCallbackMixin, EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.is_pending_approval
def __repr__(self):
from website.project.model import Node
parent_registration = None
try:
parent_registration = Node.find_one(Q('embargo', 'eq', self))
except NoResultsFound:
pass
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _get_registration(self):
from website.project.model import Node
return Node.find_one(Q('embargo', 'eq', self))
def _view_url_context(self, user_id, node):
registration = node or self._get_registration()
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(Embargo, self)._email_template_context(
user,
node,
is_authorizer=is_authorizer
)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
})
return context
def _on_reject(self, user):
from website.project.model import NodeLog
parent_registration = self._get_registration()
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.delete_registration_tree(save=True)
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
from website.project.model import NodeLog
super(Embargo, self)._on_complete(user)
parent_registration = self._get_registration()
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
def mark_as_completed(self):
self.state = Sanction.COMPLETED
self.save()
class Retraction(EmailApprovableSanction):
"""
Retraction object for public registrations.
Externally (specifically in user-facing language) retractions should be referred to as "Withdrawals", i.e.
"Retract Registration" -> "Withdraw Registration", "Retracted" -> "Withdrawn", etc.
"""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
from website.project.model import Node
parent_registration = None
try:
parent_registration = Node.find_one(Q('retraction', 'eq', self))
except NoResultsFound:
pass
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id, node):
from website.project.model import Node
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
from website.project.model import Node
root_registration = Node.find_one(Q('retraction', 'eq', self))
node_id = user_approval_state.get('node_id', root_registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = Node.find_one(Q('retraction', 'eq', self))
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from._id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
from website.project.model import Node
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'project_name': registration.title,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user):
from website.project.model import Node, NodeLog
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
from website.project.model import Node, NodeLog
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration.registered_from_id,
'retraction_id': self._id,
'registration': parent_registration._id
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
# Pass auth=None because the registration initiator may not be
# an admin on components (component admins had the opportunity
# to disapprove the retraction by this point)
for node in parent_registration.node_and_primary_descendants():
node.set_privacy('public', auth=None, save=True, log=False)
node.update_search()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(PreregCallbackMixin, EmailApprovableSanction):
DISPLAY_NAME = 'Approval'
SHORT_NAME = 'registration_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _get_registration(self):
from website.project.model import Node
return Node.find_one(Q('registration_approval', 'eq', self))
def _view_url_context(self, user_id, node):
user_approval_state = self.approval_state.get(user_id, {})
node_id = user_approval_state.get('node_id', node._id)
return {
'node_id': node_id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from._id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(RegistrationApproval, self)._email_template_context(user, node, is_authorizer, urls)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
})
return context
def _add_success_logs(self, node, user):
from website.project.model import NodeLog
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
from website.project.model import NodeLog
super(RegistrationApproval, self)._on_complete(user)
self.state = Sanction.APPROVED
register = self._get_registration()
registered_from = register.registered_from
# Pass auth=None because the registration initiator may not be
# an admin on components (component admins had the opportunity
# to disapprove the registration by this point)
register.set_privacy('public', auth=None, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth=None, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration': register._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.save()
def _on_reject(self, user):
from website.project.model import NodeLog
register = self._get_registration()
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': registered_from._id,
'registration': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
class DraftRegistrationApproval(Sanction):
mode = Sanction.ANY
# Since draft registrations that require approval are not immediately registered,
# meta stores registration_choice and embargo_end_date (when applicable)
meta = fields.DictionaryField(default=dict)
def _send_rejection_email(self, user, draft):
schema = draft.registration_schema
prereg_schema = prereg_utils.get_prereg_schema()
if schema._id == prereg_schema._id:
mails.send_mail(
user.username,
mails.PREREG_CHALLENGE_REJECTED,
user=user,
draft_url=draft.absolute_url
)
else:
raise NotImplementedError(
'TODO: add a generic email template for registration approvals'
)
def approve(self, user):
if settings.PREREG_ADMIN_TAG not in user.system_tags:
raise PermissionsError('This user does not have permission to approve this draft.')
self.state = Sanction.APPROVED
self._on_complete(user)
def reject(self, user):
if settings.PREREG_ADMIN_TAG not in user.system_tags:
raise PermissionsError('This user does not have permission to approve this draft.')
self.state = Sanction.REJECTED
self._on_reject(user)
def _on_complete(self, user):
from website.project.model import DraftRegistration
draft = DraftRegistration.find_one(
Q('approval', 'eq', self)
)
auth = Auth(draft.initiator)
registration = draft.register(
auth=auth,
save=True
)
registration_choice = self.meta['registration_choice']
if registration_choice == 'immediate':
sanction = functools.partial(registration.require_approval, draft.initiator)
elif registration_choice == 'embargo':
sanction = functools.partial(
registration.embargo_registration,
draft.initiator,
parse_date(self.meta.get('embargo_end_date'), ignoretz=True)
)
else:
raise ValueError("'registration_choice' must be either 'embargo' or 'immediate'")
sanction(notify_initiator_on_complete=True)
def _on_reject(self, user, *args, **kwargs):
from website.project.model import DraftRegistration
# clear out previous registration options
self.meta = {}
self.save()
draft = DraftRegistration.find_one(
Q('approval', 'eq', self)
)
self._send_rejection_email(draft.initiator, draft)
class EmbargoTerminationApproval(EmailApprovableSanction):
DISPLAY_NAME = 'Embargo Termination Request'
SHORT_NAME = 'embargo_termination_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_TERMINATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_TERMINATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
embargoed_registration = fields.ForeignField('node')
def _get_registration(self):
return self.embargoed_registration
def _view_url_context(self, user_id, node):
registration = node or self._get_registration()
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
return {
'node_id': node_id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(EmbargoTerminationApproval, self)._email_template_context(
user,
node,
is_authorizer=is_authorizer
)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_TERMINATION_PENDING_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
})
return context
def _on_complete(self, user=None):
super(EmbargoTerminationApproval, self)._on_complete(user)
registration = self._get_registration()
registration.terminate_embargo(Auth(user) if user else None)
def _on_reject(self, user=None):
# Just forget this ever happened.
self.embargoed_registration.embargo_termination_approval = None
self.embargoed_registration.save()
| apache-2.0 | 2,938,694,678,252,348,000 | 36.140859 | 128 | 0.609231 | false | 4.05962 | false | false | false | 0.001614 |
zaquestion/vendttp | server/util.py | 3 | 1400 | import os, time, random, urllib, hashlib, json
if os.path.exists('settings.py'):
import settings
else:
import settings_default as settings
if os.path.exists('credentials.py'):
import credentials
else:
raw_input("""!! Fatal Error: Couldn't find credentials file.
!! Please copy `credentials_default.py` as `credentials.py` and add the Vending
!! Machine credentials.
[ENTER] to exit.""")
exit()
class InsufficientFunds(Exception): pass
class SoldOut(Exception): pass
class BadItem(Exception): pass
def make_creds():
app_id = credentials.APP_ID
curtime = str(int(time.time()))
randint = str(random.randint(0, pow(2, 32) - 1))
signature = hashlib.sha256(curtime + randint + credentials.PRIVATE_KEY) \
.hexdigest()
return app_id, curtime, randint, signature
class URLOpenError(IOError):
def __init__(self, ioerror):
IOError.__init__(self, *ioerror.args)
class JSONDecodeError(ValueError):
def __init__(self, valueerror):
ValueError.__init__(self, *valueerror.args)
def get(url, get_data = None, post_data = None):
if get_data != None:
url += "?" + urllib.urlencode(get_data)
if post_data != None:
post_data = urllib.urlencode(post_data)
try:
response = urllib.urlopen(url, post_data).read()
except IOError as e:
raise URLOpenError(e)
try:
return json.loads(response)
except ValueError as e:
raise JSONDecodeError(e)
| gpl-2.0 | 8,943,768,291,394,450,000 | 27.571429 | 79 | 0.692143 | false | 3.439803 | false | false | false | 0.024286 |
aspectron/jsx | extern/boost/libs/python/test/newtest.py | 46 | 3629 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from m1 import *
>>> from m2 import *
Prove that we get an appropriate error from trying to return a type
for which we have no registered to_python converter
>>> def check_unregistered(f, msgprefix):
... try:
... f(1)
... except TypeError, x:
... if not str(x).startswith(msgprefix):
... print str(x)
... else:
... print 'expected a TypeError'
...
>>> check_unregistered(make_unregistered, 'No to_python (by-value) converter found for C++ type')
>>> check_unregistered(make_unregistered2, 'No Python class registered for C++ class')
>>> n = new_noddy()
>>> s = new_simple()
>>> unwrap_int(n)
42
>>> unwrap_int_ref(n)
42
>>> unwrap_int_const_ref(n)
42
>>> unwrap_simple(s)
'hello, world'
>>> unwrap_simple_ref(s)
'hello, world'
>>> unwrap_simple_const_ref(s)
'hello, world'
>>> unwrap_int(5)
5
Can't get a non-const reference to a built-in integer object
>>> try:
... unwrap_int_ref(7)
... except: pass
... else: print 'no exception'
>>> unwrap_int_const_ref(9)
9
>>> wrap_int(n)
42
try: wrap_int_ref(n)
... except: pass
... else: print 'no exception'
>>> wrap_int_const_ref(n)
42
>>> unwrap_simple_ref(wrap_simple(s))
'hello, world'
>>> unwrap_simple_ref(wrap_simple_ref(s))
'hello, world'
>>> unwrap_simple_ref(wrap_simple_const_ref(s))
'hello, world'
>>> f(s)
12
>>> unwrap_simple(g(s))
'hello, world'
>>> f(g(s))
12
>>> f_mutable_ref(g(s))
12
>>> f_const_ptr(g(s))
12
>>> f_mutable_ptr(g(s))
12
>>> f2(g(s))
12
Create an extension class which wraps "complicated" (init1 and get_n)
are a complicated constructor and member function, respectively.
>>> c1 = complicated(s, 99)
>>> c1.get_n()
99
>>> c2 = complicated(s)
>>> c2.get_n()
0
a quick regression test for a bug where None could be converted
to the target of any member function. To see it, we need to
access the __dict__ directly, to bypass the type check supplied
by the Method property which wraps the method when accessed as an
attribute.
>>> try: A.__dict__['name'](None)
... except TypeError: pass
... else: print 'expected an exception!'
>>> a = A()
>>> b = B()
>>> c = C()
>>> d = D()
>>> take_a(a).name()
'A'
>>> try:
... take_b(a)
... except: pass
... else: print 'no exception'
>>> try:
... take_c(a)
... except: pass
... else: print 'no exception'
>>> try:
... take_d(a)
... except: pass
... else: print 'no exception'
------
>>> take_a(b).name()
'A'
>>> take_b(b).name()
'B'
>>> try:
... take_c(b)
... except: pass
... else: print 'no exception'
>>> try:
... take_d(b)
... except: pass
... else: print 'no exception'
-------
>>> take_a(c).name()
'A'
>>> try:
... take_b(c)
... except: pass
... else: print 'no exception'
>>> take_c(c).name()
'C'
>>> try:
... take_d(c)
... except: pass
... else: print 'no exception'
-------
>>> take_a(d).name()
'A'
>>> take_b(d).name()
'B'
>>> take_c(d).name()
'C'
>>> take_d(d).name()
'D'
>>> take_d_shared_ptr(d).name()
'D'
>>> d_as_a = d_factory()
>>> dd = take_d(d_as_a)
>>> dd.name()
'D'
>>> print g.__doc__.splitlines()[1]
g( (Simple)arg1) -> Simple :
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| mit | 3,414,530,847,523,026,000 | 16.616505 | 97 | 0.58501 | false | 2.763899 | false | false | false | 0.002756 |
gitcoinco/web | app/dashboard/embed.py | 1 | 10901 | from django.http import HttpResponse, JsonResponse
from django.template import loader
from django.utils import timezone
from django.utils.cache import patch_response_headers
import requests
from dashboard.models import Bounty
from git.utils import get_user, org_name
from PIL import Image, ImageDraw, ImageFont
from ratelimit.decorators import ratelimit
AVATAR_BASE = 'assets/other/avatars/'
def wrap_text(text, w=30):
new_text = ""
new_sentence = ""
for word in text.split(" "):
delim = " " if new_sentence != "" else ""
new_sentence = new_sentence + delim + word
if len(new_sentence) > w:
new_text += "\n" + new_sentence
new_sentence = ""
new_text += "\n" + new_sentence
return new_text
def summarize_bounties(bounties):
val_usdt = sum(bounties.values_list('_val_usd_db', flat=True))
if val_usdt < 1:
return False, ""
currency_to_value = {bounty.token_name: 0.00 for bounty in bounties}
for bounty in bounties:
currency_to_value[bounty.token_name] += float(bounty.value_true)
other_values = ", ".join([
f"{round(value, 2)} {token_name}"
for token_name, value in currency_to_value.items()
])
is_plural = 's' if bounties.count() > 1 else ''
return True, f"Total: {bounties.count()} issue{is_plural}, {val_usdt} USD, {other_values}"
@ratelimit(key='ip', rate='50/m', method=ratelimit.UNSAFE, block=True)
def stat(request, key):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
from marketing.models import Stat
limit = 10
weekly_stats = Stat.objects.filter(key=key).order_by('created_on')
# weekly stats only
weekly_stats = weekly_stats.filter(
created_on__hour=1,
created_on__week_day=1
).filter(
created_on__gt=(timezone.now() - timezone.timedelta(weeks=7))
)
daily_stats = Stat.objects.filter(key=key) \
.filter(
created_on__gt=(timezone.now() - timezone.timedelta(days=7))
).order_by('created_on')
daily_stats = daily_stats.filter(created_on__hour=1) # daily stats only
stats = weekly_stats if weekly_stats.count() < limit else daily_stats
fig = Figure(figsize=(1.6, 1.5), dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
x = []
y = []
for stat in stats:
x.append(stat.created_on)
y.append(stat.val)
x = x[-1 * limit:]
y = y[-1 * limit:]
ax.plot_date(x, y, '-')
ax.set_axis_off()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
if stats.count() > 1:
ax.set_title("Usage over time", y=0.9)
else:
ax.set_title("(Not enough data)", y=0.3)
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
@ratelimit(key='ip', rate='50/m', method=ratelimit.UNSAFE, block=True)
def embed(request):
# default response
could_not_find = Image.new('RGBA', (1, 1), (0, 0, 0, 0))
err_response = HttpResponse(content_type="image/jpeg")
could_not_find.save(err_response, "JPEG")
# Get maxAge GET param if provided, else default on the small side
max_age = int(request.GET.get('maxAge', 3600))
# params
repo_url = request.GET.get('repo', False)
if not repo_url or 'github.com' not in repo_url:
return err_response
try:
badge = request.GET.get('badge', False)
if badge:
open_bounties = Bounty.objects.current() \
.filter(
github_url__startswith=repo_url,
network='mainnet',
idx_status__in=['open']
)
tmpl = loader.get_template('svg_badge.txt')
response = HttpResponse(
tmpl.render({'bounties_count': open_bounties.count()}),
content_type='image/svg+xml',
)
patch_response_headers(response, cache_timeout=max_age)
return response
# get avatar of repo
_org_name = org_name(repo_url)
avatar = None
filename = f"{_org_name}.png"
filepath = 'assets/other/avatars/' + filename
try:
avatar = Image.open(filepath, 'r').convert("RGBA")
except IOError:
remote_user = get_user(_org_name)
if not remote_user.get('avatar_url', False):
return JsonResponse({'msg': 'invalid user'}, status=422)
remote_avatar_url = remote_user['avatar_url']
r = requests.get(remote_avatar_url, stream=True)
chunk_size = 20000
with open(filepath, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
avatar = Image.open(filepath, 'r').convert("RGBA")
# make transparent
datas = avatar.getdata()
new_data = []
for item in datas:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
new_data.append((255, 255, 255, 0))
else:
new_data.append(item)
avatar.putdata(new_data)
avatar.save(filepath, "PNG")
# get issues
length = request.GET.get('len', 10)
super_bounties = Bounty.objects.current() \
.filter(
github_url__startswith=repo_url,
network='mainnet',
idx_status__in=['open', 'started', 'submitted']
).order_by('-_val_usd_db')
bounties = super_bounties[:length]
# config
bounty_height = 200
bounty_width = 572
font = 'assets/v2/fonts/futura/FuturaStd-Medium.otf'
width = 1776
height = 576
# setup
img = Image.new("RGBA", (width, height), (255, 255, 255))
draw = ImageDraw.Draw(img)
black = (0, 0, 0)
gray = (102, 102, 102)
h1 = ImageFont.truetype(font, 36, encoding="unic")
h2_thin = ImageFont.truetype(font, 36, encoding="unic")
p = ImageFont.truetype(font, 24, encoding="unic")
# background
background_image = 'assets/v2/images/embed-widget/background.png'
back = Image.open(background_image, 'r').convert("RGBA")
offset = 0, 0
img.paste(back, offset)
# repo logo
icon_size = (184, 184)
avatar.thumbnail(icon_size, Image.ANTIALIAS)
offset = 195, 148
img.paste(avatar, offset, avatar)
img_org_name = ImageDraw.Draw(img)
img_org_name_size = img_org_name.textsize(_org_name, h1)
img_org_name.multiline_text(
align="left",
xy=(287 - img_org_name_size[0] / 2, 360),
text=_org_name,
fill=black,
font=h1,
)
draw.multiline_text(
align="left",
xy=(110, 410),
text="supports funded issues",
fill=black,
font=h1,
)
# put bounty list in there
i = 0
for bounty in bounties[:4]:
i += 1
# execute
line_size = 2
# Limit text to 28 chars
text = f"{bounty.title_or_desc}"
text = (text[:28] + '...') if len(text) > 28 else text
x = 620 + (int((i-1)/line_size) * (bounty_width))
y = 230 + (abs(i % line_size-1) * bounty_height)
draw.multiline_text(align="left", xy=(x, y), text=text, fill=black, font=h2_thin)
unit = 'day'
num = int(round((bounty.expires_date - timezone.now()).days, 0))
if num == 0:
unit = 'hour'
num = int(round((bounty.expires_date - timezone.now()).seconds / 3600 / 24, 0))
unit = unit + ("s" if num != 1 else "")
draw.multiline_text(
align="left",
xy=(x, y - 40),
text=f"Expires in {num} {unit}:",
fill=gray,
font=p,
)
bounty_eth_background = Image.new("RGBA", (200, 56), (231, 240, 250))
bounty_usd_background = Image.new("RGBA", (200, 56), (214, 251, 235))
img.paste(bounty_eth_background, (x, y + 50))
img.paste(bounty_usd_background, (x + 210, y + 50))
tmp = ImageDraw.Draw(img)
bounty_value_size = tmp.textsize(f"{round(bounty.value_true, 2)} {bounty.token_name}", p)
draw.multiline_text(
align="left",
xy=(x + 100 - bounty_value_size[0]/2, y + 67),
text=f"{round(bounty.value_true, 2)} {bounty.token_name}",
fill=(44, 35, 169),
font=p,
)
bounty_value_size = tmp.textsize(f"{round(bounty.value_in_usdt_now, 2)} USD", p)
draw.multiline_text(
align="left",
xy=(x + 310 - bounty_value_size[0]/2, y + 67),
text=f"{round(bounty.value_in_usdt_now, 2)} USD",
fill=(45, 168, 116),
font=p,
)
# blank slate
if bounties.count() == 0:
draw.multiline_text(
align="left",
xy=(760, 320),
text="No active issues. Post a funded issue at: https://gitcoin.co",
fill=gray,
font=h1,
)
if bounties.count() != 0:
text = 'Browse issues at: https://gitcoin.co/explorer'
draw.multiline_text(
align="left",
xy=(64, height - 70),
text=text,
fill=gray,
font=p,
)
draw.multiline_text(
align="left",
xy=(624, 120),
text="Recently funded issues:",
fill=(62, 36, 251),
font=p,
)
_, value = summarize_bounties(super_bounties)
value_size = tmp.textsize(value, p)
draw.multiline_text(
align="left",
xy=(1725 - value_size[0], 120),
text=value,
fill=gray,
font=p,
)
line_table_header = Image.new("RGBA", (1100, 6), (62, 36, 251))
img.paste(line_table_header, (624, 155))
# Resize back to output size for better anti-alias
img = img.resize((888, 288), Image.LANCZOS)
# Return image with right content-type
response = HttpResponse(content_type="image/png")
img.save(response, "PNG")
patch_response_headers(response, cache_timeout=max_age)
return response
except IOError as e:
print(e)
return err_response
| agpl-3.0 | -4,671,346,504,243,543,000 | 32.336391 | 101 | 0.531603 | false | 3.600066 | false | false | false | 0.000734 |
luxnovalabs/enjigo_door | web_interface/django/contrib/auth/views.py | 18 | 10471 | try:
from urllib.parse import urlparse, urlunparse
except ImportError: # Python 2
from urlparse import urlparse, urlunparse
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, QueryDict
from django.template.response import TemplateResponse
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.translation import ugettext as _
from django.shortcuts import resolve_url
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout, get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm, SetPasswordForm, PasswordChangeForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = authentication_form(data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
# Okay, security check complete. Log the user in.
auth_login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
request.session.set_test_cookie()
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
current_app=None, extra_context=None):
"""
Logs out the user and displays 'You are logged out' message.
"""
auth_logout(request)
if redirect_field_name in request.REQUEST:
next_page = request.REQUEST[redirect_field_name]
# Security check -- don't allow redirection to a different host.
if not is_safe_url(url=next_page, host=request.get_host()):
next_page = request.path
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
current_site = get_current_site(request)
context = {
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out')
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
def logout_then_login(request, login_url=None, current_app=None, extra_context=None):
"""
Logs out the user if he is logged in. Then redirects to the log-in page.
"""
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return logout(request, login_url, current_app=current_app, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@csrf_protect
def password_reset(request, is_admin_site=False,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
current_app=None,
extra_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
}
if is_admin_site:
opts = dict(opts, domain_override=request.get_host())
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
def password_reset_done(request,
template_name='registration/password_reset_done.html',
current_app=None, extra_context=None):
context = {}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
current_app=None, extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete')
try:
uid = urlsafe_base64_decode(str(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(None)
else:
validlink = False
form = None
context = {
'form': form,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
current_app=None, extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL)
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
@sensitive_post_parameters()
@csrf_protect
@login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
current_app=None, extra_context=None):
if post_change_redirect is None:
post_change_redirect = reverse('django.contrib.auth.views.password_change_done')
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
@login_required
def password_change_done(request,
template_name='registration/password_change_done.html',
current_app=None, extra_context=None):
context = {}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
| unlicense | -4,979,753,466,850,738,000 | 37.076364 | 112 | 0.638716 | false | 4.20691 | false | false | false | 0.001242 |
kobolabs/calibre | src/calibre/gui2/preferences/save_template.py | 4 | 3143 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import QWidget, pyqtSignal
from calibre.gui2 import error_dialog, question_dialog
from calibre.gui2.preferences.save_template_ui import Ui_Form
from calibre.library.save_to_disk import FORMAT_ARG_DESCS, preprocess_template
from calibre.utils.formatter import validation_formatter
from calibre.gui2.dialogs.template_dialog import TemplateDialog
class SaveTemplate(QWidget, Ui_Form):
changed_signal = pyqtSignal()
def __init__(self, *args):
QWidget.__init__(self, *args)
Ui_Form.__init__(self)
self.setupUi(self)
def initialize(self, name, default, help):
variables = sorted(FORMAT_ARG_DESCS.keys())
rows = []
for var in variables:
rows.append(u'<tr><td>%s</td><td> </td><td>%s</td></tr>'%
(var, FORMAT_ARG_DESCS[var]))
rows.append(u'<tr><td>%s </td><td> </td><td>%s</td></tr>'%(
_('Any custom field'),
_('The lookup name of any custom field (these names begin with "#").')))
table = u'<table>%s</table>'%(u'\n'.join(rows))
self.template_variables.setText(table)
self.opt_template.initialize(name+'_template_history',
default, help)
self.opt_template.editTextChanged.connect(self.changed)
self.opt_template.currentIndexChanged.connect(self.changed)
self.option_name = name
self.open_editor.clicked.connect(self.do_open_editor)
def do_open_editor(self):
t = TemplateDialog(self, self.opt_template.text())
t.setWindowTitle(_('Edit template'))
if t.exec_():
self.opt_template.set_value(t.rule[1])
def changed(self, *args):
self.changed_signal.emit()
def validate(self):
'''
Do a syntax check on the format string. Doing a semantic check
(verifying that the fields exist) is not useful in the presence of
custom fields, because they may or may not exist.
'''
tmpl = preprocess_template(self.opt_template.text())
try:
t = validation_formatter.validate(tmpl)
if t.find(validation_formatter._validation_string) < 0:
return question_dialog(self, _('Constant template'),
_('The template contains no {fields}, so all '
'books will have the same name. Is this OK?'))
except Exception as err:
error_dialog(self, _('Invalid template'),
'<p>'+_('The template %s is invalid:')%tmpl + \
'<br>'+str(err), show=True)
return False
return True
def set_value(self, val):
self.opt_template.set_value(val)
def save_settings(self, config, name):
val = unicode(self.opt_template.text())
config.set(name, val)
self.opt_template.save_history(self.option_name+'_template_history')
| gpl-3.0 | -3,911,105,166,098,977,000 | 35.126437 | 84 | 0.610881 | false | 3.764072 | false | false | false | 0.006363 |
cubarco/tunasync | tunasync/jobs.py | 1 | 4053 | #!/usr/bin/env python2
# -*- coding:utf-8 -*-
import sh
import sys
from setproctitle import setproctitle
import signal
import Queue
import traceback
def run_job(sema, child_q, manager_q, provider, **settings):
aquired = False
setproctitle("tunasync-{}".format(provider.name))
def before_quit(*args):
provider.terminate()
if aquired:
print("{} release semaphore".format(provider.name))
sema.release()
sys.exit(0)
def sleep_wait(timeout):
try:
msg = child_q.get(timeout=timeout)
if msg == "terminate":
manager_q.put(("CONFIG_ACK", (provider.name, "QUIT")))
return True
except Queue.Empty:
return False
signal.signal(signal.SIGTERM, before_quit)
if provider.delay > 0:
if sleep_wait(provider.delay):
return
max_retry = settings.get("max_retry", 1)
def _real_run(idx=0, stage="job_hook", ctx=None):
"""\
4 stages:
0 -> job_hook, 1 -> set_retry, 2 -> exec_hook, 3 -> exec
"""
assert(ctx is not None)
if stage == "exec":
# exec_job
try:
provider.run(ctx=ctx)
provider.wait()
except sh.ErrorReturnCode:
status = "fail"
else:
status = "success"
return status
elif stage == "set_retry":
# enter stage 3 with retry
for retry in range(max_retry):
status = "syncing"
manager_q.put(("UPDATE", (provider.name, status, ctx)))
print("start syncing {}, retry: {}".format(provider.name, retry))
status = _real_run(idx=0, stage="exec_hook", ctx=ctx)
if status == "success":
break
return status
# job_hooks
elif stage == "job_hook":
if idx == len(provider.hooks):
return _real_run(idx=idx, stage="set_retry", ctx=ctx)
hook = provider.hooks[idx]
hook_before, hook_after = hook.before_job, hook.after_job
status = "pre-syncing"
elif stage == "exec_hook":
if idx == len(provider.hooks):
return _real_run(idx=idx, stage="exec", ctx=ctx)
hook = provider.hooks[idx]
hook_before, hook_after = hook.before_exec, hook.after_exec
status = "syncing"
try:
# print("%s run before_%s, %d" % (provider.name, stage, idx))
hook_before(provider=provider, ctx=ctx)
status = _real_run(idx=idx+1, stage=stage, ctx=ctx)
except Exception:
traceback.print_exc()
status = "fail"
finally:
# print("%s run after_%s, %d" % (provider.name, stage, idx))
# job may break when syncing
if status != "success":
status = "fail"
try:
hook_after(provider=provider, status=status, ctx=ctx)
except Exception:
traceback.print_exc()
return status
while 1:
try:
sema.acquire(True)
except:
break
aquired = True
ctx = {} # put context info in it
ctx['current_dir'] = provider.local_dir
ctx['mirror_name'] = provider.name
status = "pre-syncing"
manager_q.put(("UPDATE", (provider.name, status, ctx)))
try:
status = _real_run(idx=0, stage="job_hook", ctx=ctx)
except Exception:
traceback.print_exc()
status = "fail"
finally:
sema.release()
aquired = False
print("syncing {} finished, sleep {} minutes for the next turn".format(
provider.name, provider.interval
))
manager_q.put(("UPDATE", (provider.name, status, ctx)))
if sleep_wait(timeout=provider.interval * 60):
break
# vim: ts=4 sw=4 sts=4 expandtab
| gpl-3.0 | -5,656,886,585,572,669,000 | 29.022222 | 81 | 0.515174 | false | 4.085685 | false | false | false | 0.00074 |
DESHRAJ/fjord | vendor/packages/translate-toolkit/translate/storage/jsonl10n.py | 4 | 7484 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007,2009-2011 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Class that manages JSON data files for translation
JSON is an acronym for JavaScript Object Notation, it is an open standard
designed for human-readable data interchange.
JSON basic types:
- Number (integer or real)
- String (double-quoted Unicode with backslash escaping)
- Boolean (true or false)
- Array (an ordered sequence of values, comma-separated and enclosed
in square brackets)
- Object (a collection of key:value pairs, comma-separated and
enclosed in curly braces)
- null
Example::
{
"firstName": "John",
"lastName": "Smith",
"age": 25,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021"
},
"phoneNumber": [
{
"type": "home",
"number": "212 555-1234"
},
{
"type": "fax",
"number": "646 555-4567"
}
]
}
TODO:
- Handle \u and other escapes in Unicode
- Manage data type storage and conversion. True -> "True" -> True
- Sort the extracted data to the order of the JSON file
"""
import os
from StringIO import StringIO
try:
import json as json # available since Python 2.6
except ImportError:
import simplejson as json # API compatible with the json module
from translate.storage import base
class JsonUnit(base.TranslationUnit):
"""A JSON entry"""
def __init__(self, source=None, ref=None, item=None, encoding="UTF-8"):
self._id = None
self._item = str(os.urandom(30))
if item is not None:
self._item = item
self._ref = {}
if ref is not None:
self._ref = ref
if ref is None and item is None:
self._ref[self._item] = ""
if source:
self.source = source
super(JsonUnit, self).__init__(source)
def getsource(self):
return self.gettarget()
def setsource(self, source):
self.settarget(source)
source = property(getsource, setsource)
def gettarget(self):
def change_type(value):
if isinstance(value, bool):
return str(value)
return value
return newvalue
if isinstance(self._ref, list):
return change_type(self._ref[self._item])
elif isinstance(self._ref, dict):
return change_type(self._ref[self._item])
def settarget(self, target):
def change_type(oldvalue, newvalue):
if isinstance(oldvalue, bool):
newvalue = bool(newvalue)
return newvalue
if isinstance(self._ref, list):
self._ref[int(self._item)] = change_type(self._ref[int(self._item)],
target)
elif isinstance(self._ref, dict):
self._ref[self._item] = change_type(self._ref[self._item], target)
else:
raise ValueError("We don't know how to handle:\n"
"Type: %s\n"
"Value: %s" % (type(self._ref), target))
target = property(gettarget, settarget)
def setid(self, value):
self._id = value
def getid(self):
return self._id
def getlocations(self):
return [self.getid()]
class JsonFile(base.TranslationStore):
"""A JSON file"""
UnitClass = JsonUnit
def __init__(self, inputfile=None, unitclass=UnitClass, filter=None):
"""construct a JSON file, optionally reading in from inputfile."""
base.TranslationStore.__init__(self, unitclass=unitclass)
self._filter = filter
self.filename = ''
self._file = u''
if inputfile is not None:
self.parse(inputfile)
def __str__(self):
return json.dumps(self._file, sort_keys=True,
indent=4, ensure_ascii=False).encode('utf-8')
def _extract_translatables(self, data, stop=None, prev="", name_node=None,
name_last_node=None, last_node=None):
"""Recursive function to extract items from the data files
:param data: the current branch to walk down
:param stop: a list of leaves to extract or None to extract everything
:param prev: the heirarchy of the tree at this iteration
:param name_node:
:param name_last_node: the name of the last node
:param last_node: the last list or dict
"""
if isinstance(data, dict):
for k, v in data.iteritems():
for x in self._extract_translatables(v, stop,
"%s.%s" % (prev, k),
k, None, data):
yield x
elif isinstance(data, list):
for i, item in enumerate(data):
for x in self._extract_translatables(item, stop,
"%s[%s]" % (prev, i),
i, name_node, data):
yield x
# apply filter
elif (stop is None \
or (isinstance(last_node, dict) and name_node in stop) \
or (isinstance(last_node, list) and name_last_node in stop)):
if isinstance(data, str) or isinstance(data, unicode):
yield (prev, data, last_node, name_node)
elif isinstance(data, bool):
yield (prev, str(data), last_node, name_node)
elif data is None:
pass
else:
raise ValueError("We don't handle these values:\n"
"Type: %s\n"
"Data: %s\n"
"Previous: %s" % (type(data), data, prev))
def parse(self, input):
"""parse the given file or file source string"""
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
if hasattr(input, "read"):
src = input.read()
input.close()
input = src
if isinstance(input, str):
input = StringIO(input)
try:
self._file = json.load(input)
except ValueError, e:
raise base.ParseError(e.message)
for k, data, ref, item in self._extract_translatables(self._file,
stop=self._filter):
unit = self.UnitClass(data, ref, item)
unit.setid(k)
self.addunit(unit)
| bsd-3-clause | -8,534,990,308,302,248,000 | 32.560538 | 80 | 0.547969 | false | 4.264387 | false | false | false | 0.001203 |
newtonne/trellis | lib/trellis/plugins/vars/version.py | 4 | 2049 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import __version__
from ansible.errors import AnsibleError
from distutils.version import LooseVersion
from operator import eq, ge, gt
from sys import version_info
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
version_requirement = '2.8.0'
version_tested_max = '2.9.10'
python3_required_version = '2.5.3'
if version_info[0] == 3 and not ge(LooseVersion(__version__), LooseVersion(python3_required_version)):
raise AnsibleError(('Ansible >= {} is required when using Python 3.\n'
'Either downgrade to Python 2 or update your Ansible version to {}.').format(python3_required_version, python3_required_version))
if not ge(LooseVersion(__version__), LooseVersion(version_requirement)):
raise AnsibleError(('Trellis no longer supports Ansible {}.\n'
'Please upgrade to Ansible {} or higher.').format(__version__, version_requirement))
elif gt(LooseVersion(__version__), LooseVersion(version_tested_max)):
display.warning(u'Your Ansible version is {} but this version of Trellis has only been tested for '
u'compatability with Ansible {} -> {}. It is advisable to check for Trellis updates or '
u'downgrade your Ansible version.'.format(__version__, version_requirement, version_tested_max))
if eq(LooseVersion(__version__), LooseVersion('2.5.0')):
display.warning(u'Your Ansible version is {}. Consider upgrading your Ansible version to avoid '
u'erroneous warnings such as `Removed restricted key from module data...`'.format(__version__))
# Import BaseVarsPlugin after Ansible version check.
# Otherwise import error for Ansible versions older than 2.4 would prevent display of version check message.
from ansible.plugins.vars import BaseVarsPlugin
class VarsModule(BaseVarsPlugin):
def get_vars(self, loader, path, entities, cache=True):
return {}
| mit | 5,998,044,075,033,520,000 | 44.533333 | 137 | 0.727672 | false | 4.122736 | false | false | false | 0.007321 |
mwmuni/LIGGGHTS_GUI | OpenGL/logs.py | 9 | 3153 | """Fix missing-API problems in logging module (circa Python 2.3)
Adds constants to the log objects.
Adds getException(err) to log objects to retrieve
formatted exception or err if traceback not available.
"""
import traceback, logging
from OpenGL._configflags import ERROR_LOGGING, FULL_LOGGING
getLog = logging.getLogger
def getException(error):
"""Get formatted traceback from exception"""
try:
return traceback.format_exc( limit=10 )
except Exception as err:
return str( error )
logging.Logger.getException = staticmethod( getException )
logging.Logger.err = logging.Logger.error
logging.Logger.DEBUG = logging.DEBUG
logging.Logger.WARN = logging.WARN
logging.Logger.INFO = logging.INFO
logging.Logger.ERR = logging.Logger.ERROR = logging.ERROR
if FULL_LOGGING:
getLog( 'OpenGL.calltrace' ).setLevel( logging.INFO )
class _LoggedFunction( object ):
"""Proxy that overrides __call__ to log arguments"""
def __init__( self, base, log ):
self.__dict__[''] = base
self.__dict__['log'] = log
def __setattr__( self, key, value ):
if key != '':
setattr( self.__dict__[''], key, value )
else:
self.__dict__[''] = value
def __getattr__( self, key ):
if key == '':
return self.__dict__['']
else:
return getattr( self.__dict__[''], key )
class _FullLoggedFunction( _LoggedFunction ):
"""Fully-logged function wrapper (logs all call params to OpenGL.calltrace)"""
_callTrace = getLog( 'OpenGL.calltrace' )
def __call__( self, *args, **named ):
argRepr = []
function = getattr( self, '' )
for arg in args:
argRepr.append( repr(arg) )
for key,value in named.items():
argRepr.append( '%s = %s'%( key,repr(value)) )
argRepr = ",".join( argRepr )
self._callTrace.info( '%s( %s )', function.__name__, argRepr )
try:
return function( *args, **named )
except Exception as err:
self.log.warn(
"""Failure on %s: %s""", function.__name__, self.log.getException( err )
)
raise
class _ErrorLoggedFunction ( _LoggedFunction ):
"""On-error-logged function wrapper"""
def __call__( self, *args, **named ):
function = getattr( self, '' )
try:
return function( *args, **named )
except Exception as err:
self.log.warn(
"""Failure on %s: %s""", function.__name__, self.log.getException( err )
)
raise
def logOnFail( function, log ):
"""Produce possible log-wrapped version of function
function -- callable object to be wrapped
log -- the log to which to log information
Uses ERROR_LOGGING and FULL_LOGGING
to determine whether/how to wrap the function.
"""
if ERROR_LOGGING or FULL_LOGGING:
if FULL_LOGGING:
loggedFunction = _FullLoggedFunction( function, log )
else:
loggedFunction = _ErrorLoggedFunction( function, log )
return loggedFunction
else:
return function
| gpl-3.0 | 2,120,488,149,775,024,000 | 33.648352 | 88 | 0.596575 | false | 4.143233 | false | false | false | 0.026007 |
disqus/graphite-web | webapp/graphite/browser/views.py | 30 | 7082 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import re
from django.conf import settings
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
from django.utils.html import escape
from graphite.account.models import Profile
from graphite.compat import HttpResponse
from graphite.util import getProfile, getProfileByUsername, json
from graphite.logger import log
from hashlib import md5
def header(request):
"View for the header frame of the browser UI"
context = {}
context['user'] = request.user
context['profile'] = getProfile(request)
context['documentation_url'] = settings.DOCUMENTATION_URL
context['login_url'] = settings.LOGIN_URL
return render_to_response("browserHeader.html", context)
def browser(request):
"View for the top-level frame of the browser UI"
context = {
'queryString': mark_safe(request.GET.urlencode()),
'target': request.GET.get('target')
}
if context['queryString']:
context['queryString'] = context['queryString'].replace('#','%23')
if context['target']:
context['target'] = context['target'].replace('#','%23') #js libs terminate a querystring on #
return render_to_response("browser.html", context)
def search(request):
query = request.POST.get('query')
if not query:
return HttpResponse("")
patterns = query.split()
regexes = [re.compile(p,re.I) for p in patterns]
def matches(s):
for regex in regexes:
if regex.search(s):
return True
return False
results = []
index_file = open(settings.INDEX_FILE)
for line in index_file:
if matches(line):
results.append( line.strip() )
if len(results) >= 100:
break
index_file.close()
result_string = ','.join(results)
return HttpResponse(result_string, content_type='text/plain')
def myGraphLookup(request):
"View for My Graphs navigation"
profile = getProfile(request,allowDefault=False)
assert profile
nodes = []
leafNode = {
'allowChildren' : 0,
'expandable' : 0,
'leaf' : 1,
}
branchNode = {
'allowChildren' : 1,
'expandable' : 1,
'leaf' : 0,
}
try:
path = request.GET.get('path', u'')
if path:
if path.endswith('.'):
userpath_prefix = path
else:
userpath_prefix = path + '.'
else:
userpath_prefix = u""
matches = [ graph for graph in profile.mygraph_set.all().order_by('name') if graph.name.startswith(userpath_prefix) ]
log.info( "myGraphLookup: username=%s, path=%s, userpath_prefix=%s, %ld graph to process" % (profile.user.username, path, userpath_prefix, len(matches)) )
branch_inserted = set()
leaf_inserted = set()
for graph in matches: #Now let's add the matching graph
isBranch = False
dotPos = graph.name.find( '.', len(userpath_prefix) )
if dotPos >= 0:
isBranch = True
name = graph.name[ len(userpath_prefix) : dotPos ]
if name in branch_inserted: continue
branch_inserted.add(name)
else:
name = graph.name[ len(userpath_prefix): ]
if name in leaf_inserted: continue
leaf_inserted.add(name)
node = {'text': escape(name)}
if isBranch:
node.update({'id': userpath_prefix + name + '.'})
node.update(branchNode)
else:
m = md5()
m.update(name.encode('utf-8'))
node.update( { 'id' : str(userpath_prefix + m.hexdigest()), 'graphUrl' : str(graph.url) } )
node.update(leafNode)
nodes.append(node)
except:
log.exception("browser.views.myGraphLookup(): could not complete request.")
if not nodes:
no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' }
no_graphs.update(leafNode)
nodes.append(no_graphs)
return json_response(nodes, request)
def userGraphLookup(request):
"View for User Graphs navigation"
user = request.GET.get('user')
path = request.GET['path']
if user:
username = user
graphPath = path[len(username)+1:]
elif '.' in path:
username, graphPath = path.split('.', 1)
else:
username, graphPath = path, None
nodes = []
branchNode = {
'allowChildren' : 1,
'expandable' : 1,
'leaf' : 0,
}
leafNode = {
'allowChildren' : 0,
'expandable' : 0,
'leaf' : 1,
}
try:
if not username:
profiles = Profile.objects.exclude(user__username='default')
for profile in profiles:
if profile.mygraph_set.count():
node = {
'text' : str(profile.user.username),
'id' : str(profile.user.username)
}
node.update(branchNode)
nodes.append(node)
else:
profile = getProfileByUsername(username)
assert profile, "No profile for username '%s'" % username
if graphPath:
prefix = graphPath.rstrip('.') + '.'
else:
prefix = ''
matches = [ graph for graph in profile.mygraph_set.all().order_by('name') if graph.name.startswith(prefix) ]
inserted = set()
for graph in matches:
relativePath = graph.name[ len(prefix): ]
nodeName = relativePath.split('.')[0]
if nodeName in inserted:
continue
inserted.add(nodeName)
if '.' in relativePath: # branch
node = {
'text' : escape(str(nodeName)),
'id' : str(username + '.' + prefix + nodeName + '.'),
}
node.update(branchNode)
else: # leaf
m = md5()
m.update(nodeName)
node = {
'text' : escape(str(nodeName)),
'id' : str(username + '.' + prefix + m.hexdigest()),
'graphUrl' : str(graph.url),
}
node.update(leafNode)
nodes.append(node)
except:
log.exception("browser.views.userLookup(): could not complete request for %s" % username)
if not nodes:
no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' }
no_graphs.update(leafNode)
nodes.append(no_graphs)
nodes.sort()
return json_response(nodes, request)
def json_response(nodes, request=None):
if request:
jsonp = request.REQUEST.get('jsonp', False)
else:
jsonp = False
#json = str(nodes) #poor man's json encoder for simple types
json_data = json.dumps(nodes)
if jsonp:
response = HttpResponse("%s(%s)" % (jsonp, json_data),
content_type="text/javascript")
else:
response = HttpResponse(json_data, content_type="application/json")
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
| apache-2.0 | 3,770,620,964,558,497,300 | 26.343629 | 158 | 0.628636 | false | 3.731296 | false | false | false | 0.023299 |
Rbeaty88/ginga | ginga/qtw/plugins/PixTable.py | 1 | 10590 | #
# PixTable.py -- Pixel Table plugin for fits viewer
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.qtw import QtHelp
from ginga.qtw import ImageViewCanvasTypesQt as CanvasTypes
from ginga import GingaPlugin
class PixTable(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(PixTable, self).__init__(fv, fitsimage)
self.layertag = 'pixtable-canvas'
self.pan2mark = False
canvas = CanvasTypes.DrawingCanvas()
## canvas.enable_draw(True)
## canvas.set_drawtype('point', color='pink')
## canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('cursor-down', self.btndown_cb)
canvas.set_callback('none-move', self.motion_cb)
canvas.setSurface(self.fitsimage)
self.canvas = canvas
# For pixel table
self.pixtbl_radius = 2
self.sizes = [ 1, 2, 3, 4 ]
self.lastx = 0
self.lasty = 0
# For "marks" feature
self.mark_radius = 10
self.mark_style = 'cross'
self.mark_color = 'purple'
self.select_color = 'cyan'
self.marks = ['None']
self.mark_index = 0
self.mark_selected = None
def build_gui(self, container):
# Splitter is just to provide a way to size the graph
# to a reasonable size
vpaned = QtGui.QSplitter()
vpaned.setOrientation(QtCore.Qt.Vertical)
# Make the PixTable plot
twidget = QtHelp.VBox()
vbox1 = twidget.layout()
vbox1.setContentsMargins(4, 4, 4, 4)
vbox1.setSpacing(2)
fr = QtHelp.Frame("Pixel Values")
# Make the cuts plot
msgFont = self.fv.getFont('fixedFont', 10)
tw = QtGui.QLabel()
tw.setFont(msgFont)
tw.setWordWrap(False)
self.tw = tw
fr.layout().addWidget(tw, stretch=1, alignment=QtCore.Qt.AlignTop)
vbox1.addWidget(fr, stretch=1, alignment=QtCore.Qt.AlignTop)
hbox = QtHelp.HBox()
layout = hbox.layout()
layout.setSpacing(4)
cbox1 = QtHelp.ComboBox()
index = 0
for i in self.sizes:
j = 1 + i*2
name = "%dx%d" % (j, j)
cbox1.addItem(name)
index += 1
index = self.sizes.index(self.pixtbl_radius)
cbox1.setCurrentIndex(index)
cbox1.activated.connect(lambda val: self.set_cutout_size(cbox1))
cbox1.setToolTip("Select size of pixel table")
layout.addWidget(cbox1, stretch=0, alignment=QtCore.Qt.AlignLeft)
# control for selecting a mark
cbox2 = QtHelp.ComboBox()
for tag in self.marks:
cbox2.addItem(tag)
if self.mark_selected == None:
cbox2.setCurrentIndex(0)
else:
cbox2.show_text(lambda n: self.mark_selected(cbox2))
cbox2.activated.connect(lambda n: self.mark_select_cb(cbox2))
self.w.marks = cbox2
cbox2.setToolTip("Select a mark")
cbox2.setMinimumContentsLength(8)
layout.addWidget(cbox2, stretch=0, alignment=QtCore.Qt.AlignLeft)
btn1 = QtGui.QPushButton("Delete")
btn1.clicked.connect(self.clear_mark_cb)
btn1.setToolTip("Delete selected mark")
layout.addWidget(btn1, stretch=0, alignment=QtCore.Qt.AlignLeft)
btn2 = QtGui.QPushButton("Delete All")
btn2.clicked.connect(self.clear_all)
btn2.setToolTip("Clear all marks")
layout.addWidget(btn2, stretch=0, alignment=QtCore.Qt.AlignLeft)
vbox1.addWidget(hbox, stretch=0, alignment=QtCore.Qt.AlignLeft)
hbox = QtHelp.HBox()
layout = hbox.layout()
layout.setSpacing(4)
btn3 = QtGui.QCheckBox("Pan to mark")
btn3.setChecked(self.pan2mark)
btn3.stateChanged.connect(lambda w: self.pan2mark_cb(btn3))
btn3.setToolTip("Pan follows selected mark")
layout.addWidget(btn3, stretch=0, alignment=QtCore.Qt.AlignLeft)
vbox1.addWidget(hbox, stretch=0, alignment=QtCore.Qt.AlignLeft)
hbox = QtHelp.HBox()
layout = hbox.layout()
layout.setSpacing(3)
#btns.set_child_size(15, -1)
btn = QtGui.QPushButton("Close")
btn.clicked.connect(self.close)
layout.addWidget(btn, stretch=0, alignment=QtCore.Qt.AlignLeft)
vbox1.addWidget(hbox, stretch=0, alignment=QtCore.Qt.AlignLeft)
vpaned.addWidget(twidget)
vpaned.addWidget(QtGui.QLabel(''))
container.addWidget(vpaned, stretch=1)
def select_mark(self, tag, pan=True):
# deselect the current selected mark, if there is one
if self.mark_selected != None:
try:
obj = self.canvas.getObjectByTag(self.mark_selected)
obj.setAttrAll(color=self.mark_color)
except:
# old object may have been deleted
pass
self.mark_selected = tag
if tag == None:
self.w.marks.show_text('None')
self.canvas.redraw(whence=3)
return
self.w.marks.show_text(tag)
obj = self.canvas.getObjectByTag(tag)
obj.setAttrAll(color=self.select_color)
self.lastx = obj.objects[0].x
self.lasty = obj.objects[0].y
if self.pan2mark and pan:
self.fitsimage.panset_xy(self.lastx, self.lasty, redraw=True)
self.canvas.redraw(whence=3)
self.redo()
def mark_select_cb(self, w):
index = w.currentIndex()
tag = self.marks[index]
if index == 0:
tag = None
self.select_mark(tag)
def pan2mark_cb(self, w):
self.pan2mark = w.checkState()
def clear_mark_cb(self):
tag = self.mark_selected
if tag == None:
return
index = self.marks.index(tag)
self.canvas.deleteObjectByTag(tag)
self.w.marks.removeItem(index)
self.marks.remove(tag)
self.w.marks.setCurrentIndex(0)
self.mark_selected = None
def clear_all(self):
self.canvas.deleteAllObjects()
for index in len(self.marks):
self.w.marks.removeItem(index)
self.marks = ['None']
self.w.marks.append_text('None')
self.w.marks.setCurrentIndex(0)
self.mark_selected = None
def plot(self, data, x1, y1, x2, y2, data_x, data_y, radius,
maxv=9):
width, height = self.fitsimage.get_dims(data)
maxval = numpy.nanmax(data)
minval = numpy.nanmin(data)
avgval = numpy.average(data)
maxdigits = 9
sep = ' '
# make format string for a row
fmt_cell = '%%%d.2f' % maxdigits
fmt_r = (fmt_cell + sep) * width
fmt_r = '%6d | ' + fmt_r
fmt_h = (('%%%dd' % maxdigits) + sep) * width
fmt_h = ('%6s | ') % '' + fmt_h
t = tuple([i + x1 + 1 for i in xrange(width)])
# format the buffer and insert into the tw
l = [fmt_h % t]
for i in xrange(height):
t = tuple([y1 + i + 1] + list(data[i]))
l.append(fmt_r % t)
l.append('')
# append statistics line
fmt_stat = " Min: %s Max: %s Avg: %s" % (fmt_cell, fmt_cell,
fmt_cell)
l.append(fmt_stat % (minval, maxval, avgval))
# update the text widget
self.tw.setText('\n'.join(l))
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_operation_channel(chname, str(self))
return True
def start(self):
#self.plot.set_titles(rtitle="Pixel Values")
# insert layer if it is not already
try:
obj = self.fitsimage.getObjectByTag(self.layertag)
except KeyError:
# Add canvas layer
self.fitsimage.add(self.canvas, tag=self.layertag)
self.resume()
def stop(self):
# remove the canvas from the image
self.canvas.ui_setActive(False)
try:
self.fitsimage.deleteObjectByTag(self.layertag)
except:
pass
self.plot = None
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
self.canvas.ui_setActive(True)
self.redo()
def redo(self):
if self.plot == None:
return
# cut out and set the pixel table data
image = self.fitsimage.get_image()
data, x1, y1, x2, y2 = image.cutout_radius(self.lastx, self.lasty,
self.pixtbl_radius)
self.plot(data, x1, y1, x2, y2, self.lastx, self.lasty,
self.pixtbl_radius, maxv=9)
def set_cutout_size(self, w):
index = w.currentIndex()
self.pixtbl_radius = self.sizes[index]
def motion_cb(self, canvas, button, data_x, data_y):
if self.mark_selected != None:
return False
if self.plot == None:
return
self.lastx, self.lasty = data_x, data_y
self.redo()
return False
def btndown_cb(self, canvas, button, data_x, data_y):
self.add_mark(data_x, data_y)
return True
def add_mark(self, data_x, data_y, radius=None, color=None, style=None):
if not radius:
radius = self.mark_radius
if not color:
color = self.mark_color
if not style:
style = self.mark_style
self.logger.debug("Setting mark at %d,%d" % (data_x, data_y))
self.mark_index += 1
tag = 'mark%d' % (self.mark_index)
tag = self.canvas.add(CanvasTypes.CompoundObject(
CanvasTypes.Point(data_x, data_y, self.mark_radius,
style=style, color=color,
linestyle='solid'),
CanvasTypes.Text(data_x + 10, data_y, "%d" % (self.mark_index),
color=color)),
tag=tag)
self.marks.append(tag)
self.w.marks.append_text(tag)
self.select_mark(tag, pan=False)
def __str__(self):
return 'pixtable'
#END
| bsd-3-clause | -7,415,328,210,901,128,000 | 31.685185 | 76 | 0.564778 | false | 3.577703 | false | false | false | 0.004155 |
jolyonb/edx-platform | cms/envs/bok_choy.py | 1 | 6556 | # -*- coding: utf-8 -*-
"""
Settings for Bok Choy tests that are used when running Studio.
Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running the tests
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
import os
from path import Path as path
from django.utils.translation import ugettext_lazy
from openedx.core.release import RELEASE_LINE
########################## Prod-like settings ###################################
# These should be as close as possible to the settings we use in production.
# As in prod, we read in environment and auth variables from JSON files.
# Unlike in prod, we use the JSON files stored in this repo.
# This is a convenience for ensuring (a) that we can consistently find the files
# and (b) that the files are the same in Jenkins as in local dev.
os.environ['SERVICE_VARIANT'] = 'bok_choy_docker' if 'BOK_CHOY_HOSTNAME' in os.environ else 'bok_choy'
os.environ['CONFIG_ROOT'] = path(__file__).abspath().dirname()
os.environ['STUDIO_CFG'] = str.format("{config_root}/{service_variant}.yml",
config_root=os.environ['CONFIG_ROOT'],
service_variant=os.environ['SERVICE_VARIANT'])
from .production import * # pylint: disable=wildcard-import, unused-wildcard-import, wrong-import-position
######################### Testing overrides ####################################
# Redirect to the test_root folder within the repo
TEST_ROOT = REPO_ROOT / "test_root"
GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath()
LOG_DIR = (TEST_ROOT / "log").abspath()
DATA_DIR = TEST_ROOT / "data"
# Configure modulestore to use the test folder within the repo
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': (TEST_ROOT / "data").abspath(),
},
xml_store_options={
'data_dir': (TEST_ROOT / "data").abspath(),
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
# Needed to enable licensing on video modules
XBLOCK_SETTINGS.update({'VideoDescriptor': {'licensing_enabled': True}})
# Capture the console log via template includes, until webdriver supports log capture again
CAPTURE_CONSOLE_LOG = True
PLATFORM_NAME = ugettext_lazy(u"édX")
PLATFORM_DESCRIPTION = ugettext_lazy(u"Open édX Platform")
STUDIO_NAME = ugettext_lazy(u"Your Platform 𝓢𝓽𝓾𝓭𝓲𝓸")
STUDIO_SHORT_NAME = ugettext_lazy(u"𝓢𝓽𝓾𝓭𝓲𝓸")
############################ STATIC FILES #############################
# Enable debug so that static assets are served by Django
DEBUG = True
# Serve static files at /static directly from the staticfiles directory under test root
# Note: optimized files for testing are generated with settings from test_static_optimized
STATIC_URL = "/static/"
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
]
STATICFILES_DIRS = [
(TEST_ROOT / "staticfiles" / "cms").abspath(),
]
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = TEST_ROOT / "uploads"
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = TEST_ROOT / "staticfiles" / "cms" / "webpack-stats.json"
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
FEATURES['RESTRICT_AUTOMATIC_AUTH'] = False
# Enable milestones app
FEATURES['MILESTONES_APP'] = True
# Enable pre-requisite course
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Enable student notes
FEATURES['ENABLE_EDXNOTES'] = True
# Enable teams feature
FEATURES['ENABLE_TEAMS'] = True
# Enable custom content licensing
FEATURES['LICENSING'] = True
FEATURES['ENABLE_MOBILE_REST_API'] = True # Enable video bumper in Studio
FEATURES['ENABLE_VIDEO_BUMPER'] = True # Enable video bumper in Studio settings
FEATURES['ENABLE_ENROLLMENT_TRACK_USER_PARTITION'] = True
# Whether archived courses (courses with end dates in the past) should be
# shown in Studio in a separate list.
FEATURES['ENABLE_SEPARATE_ARCHIVED_COURSES'] = True
# Enable support for OpenBadges accomplishments
FEATURES['ENABLE_OPENBADGES'] = True
# Enable partner support link in Studio footer
PARTNER_SUPPORT_EMAIL = '[email protected]'
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_PORT = 9080
YOUTUBE['TEST_TIMEOUT'] = 5000
YOUTUBE_HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', '127.0.0.1')
YOUTUBE['API'] = "http://{0}:{1}/get_youtube_api/".format(YOUTUBE_HOSTNAME, YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://{0}:{1}/test_youtube/".format(YOUTUBE_HOSTNAME, YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "{0}:{1}/test_transcripts_youtube/".format(YOUTUBE_HOSTNAME, YOUTUBE_PORT)
FEATURES['ENABLE_COURSEWARE_INDEX'] = True
FEATURES['ENABLE_LIBRARY_INDEX'] = True
FEATURES['ORGANIZATIONS_APP'] = True
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Path at which to store the mock index
MOCK_SEARCH_BACKING_FILE = (
TEST_ROOT / "index_file.dat"
).abspath()
# this secret key should be the same as lms/envs/bok_choy.py's
SECRET_KEY = "very_secret_bok_choy_key"
LMS_ROOT_URL = "http://localhost:8003"
if RELEASE_LINE == "master":
# On master, acceptance tests use edX books, not the default Open edX books.
HELP_TOKENS_BOOKS = {
'learner': 'https://edx.readthedocs.io/projects/edx-guide-for-students',
'course_author': 'https://edx.readthedocs.io/projects/edx-partner-course-staff',
}
########################## VIDEO TRANSCRIPTS STORAGE ############################
VIDEO_TRANSCRIPTS_SETTINGS = dict(
VIDEO_TRANSCRIPTS_MAX_BYTES=3 * 1024 * 1024, # 3 MB
STORAGE_KWARGS=dict(
location=MEDIA_ROOT,
base_url=MEDIA_URL,
),
DIRECTORY_PREFIX='video-transcripts/',
)
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=wildcard-import
except ImportError:
pass
| agpl-3.0 | -8,692,064,918,802,202,000 | 35.824859 | 107 | 0.682418 | false | 3.493033 | true | false | false | 0.003836 |
mmerce/python | bigml/model.py | 1 | 30058 | # -*- coding: utf-8 -*-
#
# Copyright 2013-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A local Predictive Model.
This module defines a Model to make predictions locally or
embedded into your application without needing to send requests to
BigML.io.
This module cannot only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your models
offline.
You can also visualize your predictive model in IF-THEN rule format
and even generate a python function that implements the model.
Example usage (assuming that you have previously set up the BIGML_USERNAME
and BIGML_API_KEY environment variables and that you own the model/id below):
from bigml.api import BigML
from bigml.model import Model
api = BigML()
model = Model('model/5026965515526876630001b2')
model.predict({"petal length": 3, "petal width": 1})
You can also see model in a IF-THEN rule format with:
model.rules()
Or auto-generate a python function code for the model with:
model.python()
"""
import logging
import locale
from functools import cmp_to_key
import bigml.predict_utils.classification as c
import bigml.predict_utils.regression as r
import bigml.predict_utils.boosting as b
from bigml.predict_utils.common import FIELD_OFFSET, extract_distribution
from bigml.api import FINISHED, STATUSES
from bigml.api import get_status, get_api_connection, get_model_id
from bigml.util import find_locale, cast, use_cache, load
from bigml.util import DEFAULT_LOCALE, PRECISION, NUMERIC
from bigml.constants import LAST_PREDICTION, PROPORTIONAL
from bigml.basemodel import BaseModel, get_resource_dict
from bigml.multivote import ws_confidence
from bigml.prediction import Prediction
LOGGER = logging.getLogger('BigML')
OPERATING_POINT_KINDS = ["probability", "confidence"]
DICTIONARY = "dict"
OUT_FORMATS = [DICTIONARY, "list"]
BOOSTING = "boosting"
REGRESSION = "regression"
CLASSIFICATION = "classification"
# we use the atof conversion for integers to include integers written as
# 10.0
PYTHON_CONV = {
"double": "locale.atof",
"float": "locale.atof",
"integer": "lambda x: int(locale.atof(x))",
"int8": "lambda x: int(locale.atof(x))",
"int16": "lambda x: int(locale.atof(x))",
"int32": "lambda x: int(locale.atof(x))",
"int64": "lambda x: long(locale.atof(x))",
"day": "lambda x: int(locale.atof(x))",
"month": "lambda x: int(locale.atof(x))",
"year": "lambda x: int(locale.atof(x))",
"hour": "lambda x: int(locale.atof(x))",
"minute": "lambda x: int(locale.atof(x))",
"second": "lambda x: int(locale.atof(x))",
"millisecond": "lambda x: int(locale.atof(x))",
"day-of-week": "lambda x: int(locale.atof(x))",
"day-of-month": "lambda x: int(locale.atof(x))"}
PYTHON_FUNC = dict([(numtype, eval(function))
for numtype, function in PYTHON_CONV.items()])
def init_structure(to):
"""Creates the empty structure to store predictions depending on the
chosen format.
"""
if to is not None and to not in OUT_FORMATS:
raise ValueError("The allowed formats are %s." % \
", ".join(OUT_FORMATS))
return {} if to is DICTIONARY else () if to is None \
else []
def cast_prediction(full_prediction, to=None,
confidence=False, probability=False,
path=False, distribution=False,
count=False, next=False, d_min=False,
d_max=False, median=False,
unused_fields=False):
"""Creates the output filtering the attributes in a full
prediction.
to: defines the output format. The current
values are: None, `list` and `dict`. If not set, the result
will be expressed as a tuple. The other two options will
produce a list and a dictionary respectively. In the case of lists,
the attributes are stored in the same order used in
the signature of the function.
confidence: Boolean. If True, adds the confidence to the output
probability: Boolean. If True, adds the probability to the output
path: Boolean. If True adds the prediction path to the output
distribution: distribution of probabilities for each
of the objective field classes
count: Boolean. If True adds the number of training instances in the
prediction node to the output
next: Boolean. If True adds the next predicate field to the output
d_min: Boolean. If True adds the predicted node distribution
minimum to the output
d_max: Boolean. If True adds the predicted node distribution
maximum to the output
median: Boolean. If True adds the median of the predicted node
distribution to the output
unused_fields: Boolean. If True adds the fields used in the input
data that have not been used by the model.
"""
prediction_properties = [ \
"prediction", "confidence", "probability", "path", "distribution",
"count", "next", "d_min", "d_max", "median", "unused_fields"]
prediction = True
result = init_structure(to)
for prop in prediction_properties:
value = full_prediction.get(prop)
if eval(prop):
if to is None:
# tuple
result = result + (value,)
elif to == DICTIONARY:
result.update({prop: value})
else:
# list
result.append(value)
return result
def sort_categories(a, b, categories_list):
"""Sorts a list of dictionaries with category keys according to their
value and order in the categories_list. If not found, alphabetic order is
used.
"""
index_a = categories_list.index(a["category"])
index_b = categories_list.index(b["category"])
if index_a < 0 and index_b < 0:
index_a = a['category']
index_b = b['category']
if index_b < index_a:
return 1
if index_b > index_a:
return -1
return 0
def parse_operating_point(operating_point, operating_kinds, class_names):
"""Checks the operating point contents and extracts the three defined
variables
"""
if "kind" not in operating_point:
raise ValueError("Failed to find the kind of operating point.")
if operating_point["kind"] not in operating_kinds:
raise ValueError("Unexpected operating point kind. Allowed values"
" are: %s." % ", ".join(operating_kinds))
if "threshold" not in operating_point:
raise ValueError("Failed to find the threshold of the operating"
"point.")
if operating_point["threshold"] > 1 or \
operating_point["threshold"] < 0:
raise ValueError("The threshold value should be in the 0 to 1"
" range.")
if "positive_class" not in operating_point:
raise ValueError("The operating point needs to have a"
" positive_class attribute.")
positive_class = operating_point["positive_class"]
if positive_class not in class_names:
raise ValueError("The positive class must be one of the"
"objective field classes: %s." %
", ".join(class_names))
kind = operating_point["kind"]
threshold = operating_point["threshold"]
return kind, threshold, positive_class
def to_prediction(model, value_as_string, data_locale=DEFAULT_LOCALE):
"""Given a prediction string, returns its value in the required type
"""
if not isinstance(value_as_string, str):
value_as_string = str(value_as_string, "utf-8")
objective_id = model.objective_id
if model.fields[objective_id]['optype'] == NUMERIC:
if data_locale is None:
data_locale = model.locale
find_locale(data_locale)
datatype = model.fields[objective_id]['datatype']
cast_function = PYTHON_FUNC.get(datatype, None)
if cast_function is not None:
return cast_function(value_as_string)
return value_as_string
def average_confidence(model):
"""Average for the confidence of the predictions resulting from
running the training data through the model
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
total = 0.0
cumulative_confidence = 0
groups = model.group_prediction()
for _, predictions in list(groups.items()):
for _, count, confidence in predictions['details']:
cumulative_confidence += count * confidence
total += count
return float('nan') if total == 0.0 else cumulative_confidence
def tree_predict(tree, tree_type, weighted, fields,
input_data, missing_strategy=LAST_PREDICTION):
"""Makes a prediction based on a number of field values.
The input fields must be keyed by Id. There are two possible
strategies to predict when the value for the splitting field
is missing:
0 - LAST_PREDICTION: the last issued prediction is returned.
1 - PROPORTIONAL: as we cannot choose between the two branches
in the tree that stem from this split, we consider both. The
algorithm goes on until the final leaves are reached and
all their predictions are used to decide the final prediction.
"""
if missing_strategy == PROPORTIONAL:
if tree_type == REGRESSION:
return r.regression_proportional_predict(tree, weighted, fields,
input_data)
if tree_type == CLASSIFICATION:
# classification
return c.classification_proportional_predict(tree, weighted,
fields,
input_data)
# boosting
return b.boosting_proportional_predict(tree, fields, input_data)
if tree_type == REGRESSION:
# last prediction missing strategy
return r.regression_last_predict(tree, weighted, fields, input_data)
if tree_type == CLASSIFICATION:
return c.classification_last_predict(tree, weighted, fields,
input_data)
# boosting
return b.boosting_last_predict(tree, fields, input_data)
def laplacian_term(root_dist, weighted):
"""Correction term based on the training dataset distribution
"""
if weighted:
category_map = {category[0]: 0.0 for category in root_dist}
else:
total = float(sum([category[1] for category in root_dist]))
category_map = {category[0]: category[1] / total
for category in root_dist}
return category_map
class Model(BaseModel):
""" A lightweight wrapper around a Tree model.
Uses a BigML remote model to build a local version that can be used
to generate predictions locally.
"""
def __init__(self, model, api=None, fields=None, cache_get=None):
"""The Model constructor can be given as first argument:
- a model structure
- a model id
- a path to a JSON file containing a model structure
"""
if use_cache(cache_get):
# using a cache to store the model attributes
self.__dict__ = load(get_model_id(model), cache_get)
return
self.resource_id = None
self.ids_map = {}
self.terms = {}
self.regression = False
self.boosting = None
self.class_names = None
api = get_api_connection(api)
# retrieving model information from
self.resource_id, model = get_resource_dict( \
model, "model", api=api, no_check_fields=fields is not None)
if 'object' in model and isinstance(model['object'], dict):
model = model['object']
if 'model' in model and isinstance(model['model'], dict):
status = get_status(model)
if 'code' in status and status['code'] == FINISHED:
# fill boosting info before creating modelfields
if model.get("boosted_ensemble"):
self.boosting = model.get('boosting', False)
if self.boosting == {}:
self.boosting = False
self.input_fields = model["input_fields"]
BaseModel.__init__(self, model, api=api, fields=fields)
root = model['model']['root']
self.weighted = "weighted_objective_summary" in root
if self.boosting:
# build boosted tree
self.tree = b.build_boosting_tree( \
model['model']['root'])
elif self.regression:
self.root_distribution = model['model'][ \
'distribution']['training']
# build regression tree
self.tree = r.build_regression_tree(root, \
distribution=self.root_distribution, \
weighted=self.weighted)
else:
# build classification tree
self.root_distribution = model['model'][\
'distribution']['training']
self.laplacian_term = laplacian_term( \
extract_distribution(self.root_distribution)[1],
self.weighted)
self.tree = c.build_classification_tree( \
model['model']['root'], \
distribution=self.root_distribution, \
weighted=self.weighted)
self.class_names = sorted( \
[category[0] for category in \
self.root_distribution["categories"]])
self.objective_categories = [category for \
category, _ in self.fields[self.objective_id][ \
"summary"]["categories"]]
if self.boosting:
self.tree_type = BOOSTING
self.offsets = b.OFFSETS
elif self.regression:
self.tree_type = REGRESSION
self.offsets = r.OFFSETS[str(self.weighted)]
else:
self.tree_type = CLASSIFICATION
self.offsets = c.OFFSETS[str(self.weighted)]
else:
raise Exception("Cannot create the Model instance."
" Only correctly finished models can be"
" used. The model status is currently:"
" %s\n" % STATUSES[status['code']])
else:
raise Exception("Cannot create the Model instance. Could not"
" find the 'model' key in the resource:"
"\n\n%s" % model)
def _to_output(self, output_map, compact, value_key):
if compact:
return [round(output_map.get(name, 0.0), PRECISION)
for name in self.class_names]
output = []
for name in self.class_names:
output.append({
'category': name,
value_key: round(output_map.get(name, 0.0), PRECISION)
})
return output
def predict_confidence(self, input_data, missing_strategy=LAST_PREDICTION,
compact=False):
"""For classification models, Predicts a one-vs.-rest confidence value
for each possible output class, based on input values. This
confidence value is a lower confidence bound on the predicted
probability of the given class. The input fields must be a
dictionary keyed by field name for field ID.
For regressions, the output is a single element list
containing the prediction.
:param input_data: Input data to be predicted
:param missing_strategy: LAST_PREDICTION|PROPORTIONAL missing strategy
for missing fields
:param compact: If False, prediction is returned as a list of maps, one
per class, with the keys "prediction" and "confidence"
mapped to the name of the class and its confidence,
respectively. If True, returns a list of confidences
ordered by the sorted order of the class names.
"""
if self.regression:
prediction = self.predict(input_data,
missing_strategy=missing_strategy,
full=not compact)
if compact:
output = [prediction]
else:
output = cast_prediction(prediction, to=DICTIONARY,
confidence=True)
return output
if self.boosting:
raise AttributeError("This method is available for non-boosting"
" models only.")
root_dist = self.root_distribution
category_map = {category[0]: 0.0 for category in root_dist}
prediction = self.predict(input_data,
missing_strategy=missing_strategy,
full=True)
distribution = prediction['distribution']
population = prediction['count']
for class_info in distribution:
name = class_info[0]
category_map[name] = ws_confidence(name, distribution,
ws_n=population)
return self._to_output(category_map, compact, "confidence")
def _probabilities(self, distribution):
"""Computes the probability of a distribution using a Laplacian
correction.
"""
total = 0 if self.weighted else 1
category_map = {}
category_map.update(self.laplacian_term)
for class_info in distribution:
category_map[class_info[0]] += class_info[1]
total += class_info[1]
for k in category_map:
category_map[k] /= total
return category_map
def predict_probability(self, input_data,
missing_strategy=LAST_PREDICTION,
compact=False):
"""For classification models, Predicts a probability for
each possible output class, based on input values. The input
fields must be a dictionary keyed by field name for field ID.
For regressions, the output is a single element list
containing the prediction.
:param input_data: Input data to be predicted
:param missing_strategy: LAST_PREDICTION|PROPORTIONAL missing strategy
for missing fields
:param compact: If False, prediction is returned as a list of maps, one
per class, with the keys "prediction" and "probability"
mapped to the name of the class and it's probability,
respectively. If True, returns a list of probabilities
ordered by the sorted order of the class names.
"""
if self.regression or self.boosting:
prediction = self.predict(input_data,
missing_strategy=missing_strategy,
full=not compact)
if compact:
output = [prediction]
else:
output = prediction
else:
prediction = self.predict(input_data,
missing_strategy=missing_strategy,
full=True)
category_map = self._probabilities(prediction['distribution'])
output = self._to_output(category_map, compact, "probability")
return output
def predict_operating(self, input_data,
missing_strategy=LAST_PREDICTION,
operating_point=None):
"""Computes the prediction based on a user-given operating point.
"""
kind, threshold, positive_class = parse_operating_point( \
operating_point, OPERATING_POINT_KINDS, self.class_names)
if kind == "probability":
predictions = self.predict_probability(input_data,
missing_strategy, False)
else:
predictions = self.predict_confidence(input_data,
missing_strategy, False)
position = self.class_names.index(positive_class)
if predictions[position][kind] > threshold:
prediction = predictions[position]
else:
# if the threshold is not met, the alternative class with
# highest probability or confidence is returned
predictions.sort( \
key=cmp_to_key( \
lambda a, b: self._sort_predictions(a, b, kind)))
prediction = predictions[0: 2]
if prediction[0]["category"] == positive_class:
prediction = prediction[1]
else:
prediction = prediction[0]
prediction["prediction"] = prediction["category"]
del prediction["category"]
return prediction
def _sort_predictions(self, a, b, criteria):
"""Sorts the categories in the predicted node according to the
given criteria
"""
if a[criteria] == b[criteria]:
return sort_categories(a, b, self.objective_categories)
return 1 if b[criteria] > a[criteria] else -1
def predict_operating_kind(self, input_data,
missing_strategy=LAST_PREDICTION,
operating_kind=None):
"""Computes the prediction based on a user-given operating kind.
"""
kind = operating_kind.lower()
if kind not in OPERATING_POINT_KINDS:
raise ValueError("Allowed operating kinds are %s. %s found." %
(", ".join(OPERATING_POINT_KINDS), kind))
if kind == "probability":
predictions = self.predict_probability(input_data,
missing_strategy, False)
else:
predictions = self.predict_confidence(input_data,
missing_strategy, False)
if self.regression:
prediction = predictions
else:
predictions.sort( \
key=cmp_to_key( \
lambda a, b: self._sort_predictions(a, b, kind)))
prediction = predictions[0]
prediction["prediction"] = prediction["category"]
del prediction["category"]
return prediction
def predict(self, input_data, missing_strategy=LAST_PREDICTION,
operating_point=None, operating_kind=None, full=False):
"""Makes a prediction based on a number of field values.
input_data: Input data to be predicted
missing_strategy: LAST_PREDICTION|PROPORTIONAL missing strategy for
missing fields
operating_point: In classification models, this is the point of the
ROC curve where the model will be used at. The
operating point can be defined in terms of:
- the positive_class, the class that is important to
predict accurately
- the probability_threshold (or confidence_threshold),
the probability (or confidence) that is stablished
as minimum for the positive_class to be predicted.
The operating_point is then defined as a map with
two attributes, e.g.:
{"positive_class": "Iris-setosa",
"probability_threshold": 0.5}
or
{"positive_class": "Iris-setosa",
"confidence_threshold": 0.5}
operating_kind: "probability" or "confidence". Sets the
property that decides the prediction. Used only if
no operating_point is used
full: Boolean that controls whether to include the prediction's
attributes. By default, only the prediction is produced. If set
to True, the rest of available information is added in a
dictionary format. The dictionary keys can be:
- prediction: the prediction value
- confidence: prediction's confidence
- probability: prediction's probability
- path: rules that lead to the prediction
- count: number of training instances supporting the
prediction
- next: field to check in the next split
- min: minim value of the training instances in the
predicted node
- max: maximum value of the training instances in the
predicted node
- median: median of the values of the training instances
in the predicted node
- unused_fields: list of fields in the input data that
are not being used in the model
"""
# Checks and cleans input_data leaving the fields used in the model
unused_fields = []
new_data = self.filter_input_data( \
input_data,
add_unused_fields=full)
if full:
input_data, unused_fields = new_data
else:
input_data = new_data
# Strips affixes for numeric values and casts to the final field type
cast(input_data, self.fields)
full_prediction = self._predict( \
input_data, missing_strategy=missing_strategy,
operating_point=operating_point, operating_kind=operating_kind,
unused_fields=unused_fields)
if full:
return dict((key, value) for key, value in \
full_prediction.items() if value is not None)
return full_prediction['prediction']
def _predict(self, input_data, missing_strategy=LAST_PREDICTION,
operating_point=None, operating_kind=None,
unused_fields=None):
"""Makes a prediction based on a number of field values. Please,
note that this function does not check the types for the input
provided, so it's unsafe to use it directly without prior checking.
"""
# When operating_point is used, we need the probabilities
# (or confidences) of all possible classes to decide, so se use
# the `predict_probability` or `predict_confidence` methods
if operating_point:
if self.regression:
raise ValueError("The operating_point argument can only be"
" used in classifications.")
prediction = self.predict_operating( \
input_data,
missing_strategy=missing_strategy,
operating_point=operating_point)
return prediction
if operating_kind:
if self.regression:
raise ValueError("The operating_kind argument can only be"
" used in classifications.")
prediction = self.predict_operating_kind( \
input_data,
missing_strategy=missing_strategy,
operating_kind=operating_kind)
return prediction
prediction = tree_predict( \
self.tree, self.tree_type, self.weighted, self.fields,
input_data, missing_strategy=missing_strategy)
if self.boosting and missing_strategy == PROPORTIONAL:
# output has to be recomputed and comes in a different format
g_sum, h_sum, population, path = prediction
prediction = Prediction( \
- g_sum / (h_sum + self.boosting.get("lambda", 1)),
path,
None,
distribution=None,
count=population,
median=None,
distribution_unit=None)
result = vars(prediction)
# changing key name to prediction
result['prediction'] = result['output']
del result['output']
# next
field = (None if len(prediction.children) == 0 else
prediction.children[0][FIELD_OFFSET])
if field is not None and field in self.model_fields:
field = self.model_fields[field]['name']
result.update({'next': field})
del result['children']
if not self.regression and not self.boosting:
probabilities = self._probabilities(result['distribution'])
result['probability'] = probabilities[result['prediction']]
# adding unused fields, if any
if unused_fields:
result.update({'unused_fields': unused_fields})
return result
| apache-2.0 | -6,770,123,183,223,930,000 | 40.062842 | 79 | 0.578249 | false | 4.71572 | false | false | false | 0.001264 |
the-it/WS_THEbotIT | service/ws_re/scanner/tasks/wikidata/claims/test_claim_factory.py | 1 | 12717 | # pylint: disable=protected-access,no-self-use
from datetime import datetime
from typing import List
from unittest import TestCase
from unittest.mock import MagicMock, PropertyMock, Mock
import pywikibot
from testfixtures import compare
from service.ws_re.scanner.tasks.wikidata.claims.claim_factory import ClaimFactory
from service.ws_re.scanner.tasks.wikidata.claims._base import SnakParameter
from service.ws_re.scanner.tasks.wikidata.claims._typing import JsonClaimDict
from service.ws_re.template.re_page import RePage
from tools.bots import BotException
from tools.bots.pi import WikiLogger
from tools.test import REAL_WIKI_TEST
class BaseTestClaimFactory(TestCase):
def setUp(self) -> None:
if REAL_WIKI_TEST:
self.wikisource_site = pywikibot.Site(code='de', fam='wikisource', user='THEbotIT')
self.wikidata_site = self.wikisource_site.data_repository()
else:
self.wikidata_site = MagicMock()
self.wikisource_site = MagicMock()
self.logger = WikiLogger(bot_name="Test",
start_time=datetime(2000, 1, 1),
log_to_screen=False)
@staticmethod
def _create_mock_page(text: str = None, title: str = None):
mock_item = MagicMock()
if text:
text_mock = PropertyMock(return_value=text)
type(mock_item).text = text_mock
if title:
title_mock = Mock(return_value=title)
type(mock_item).title = title_mock
return RePage(mock_item)
class TestClaimFactory(BaseTestClaimFactory):
class P1234FactoryDummy(ClaimFactory):
def _get_claim_json(self) -> List[JsonClaimDict]:
return []
def setUp(self) -> None:
super().setUp()
self.factory_dummy = self.P1234FactoryDummy(MagicMock(), self.logger)
def get_json(letter: str):
return {"mainsnak": {"snaktype": "value",
"property": "P1234",
"datatype": "string",
"datavalue": {"value": letter, "type": "string"}},
"type": "statement",
"rank": "normal"}
self.a = pywikibot.Claim.fromJSON(self.wikidata_site, get_json("a"))
self.b = pywikibot.Claim.fromJSON(self.wikidata_site, get_json("b"))
self.c = pywikibot.Claim.fromJSON(self.wikidata_site, get_json("c"))
self.d = pywikibot.Claim.fromJSON(self.wikidata_site, get_json("d"))
def test_property_string(self):
compare("P1234", self.P1234FactoryDummy.get_property_string())
def test__filter_new_vs_old_claim_list(self):
compare(([self.a, self.c], [self.d]),
self.factory_dummy._filter_new_vs_old_claim_list([self.a, self.b, self.c], [self.b, self.d]))
compare(([self.a], []), self.factory_dummy._filter_new_vs_old_claim_list([self.a], []))
compare(([], [self.a]), self.factory_dummy._filter_new_vs_old_claim_list([], [self.a]))
compare(([], []),
self.factory_dummy._filter_new_vs_old_claim_list([self.a, self.b, self.c], [self.a, self.b, self.c]))
def test__create_claim_dictionary(self):
compare({"add": {"P1234": [self.a, self.b]}, "remove": [self.c, self.d]},
self.factory_dummy._create_claim_dictionary([self.a, self.b], [self.c, self.d]))
def test__create_claim_json_wikibase_item(self):
expect = {"mainsnak": {"snaktype": "value",
"property": "P31",
"datatype": "wikibase-item",
"datavalue": {
"value": {
"entity-type": "item",
"numeric-id": 123
},
"type": "wikibase-entityid"
}},
"type": "statement",
"rank": "normal"}
compare(expect, ClaimFactory.create_claim_json(
SnakParameter(property_str="P31", target_type="wikibase-item", target="Q123")))
compare(expect, ClaimFactory.create_claim_json(
SnakParameter(property_str="P31", target_type="wikibase-item", target="123")))
def test__create_claim_json_time_just_year(self):
expect = {"mainsnak": {"snaktype": "value",
"property": "P31",
"datatype": "time",
"datavalue": {
"value": {
"time": "+00000001234-01-01T00:00:00Z",
"precision": 9,
"after": 0,
"before": 0,
"timezone": 0,
"calendarmodel": "http://www.wikidata.org/entity/Q1985727"
},
"type": "time"}},
"type": "statement",
"rank": "normal"}
compare(expect, ClaimFactory.create_claim_json(
SnakParameter(property_str="P31", target_type="time", target="1234")))
def test__create_claim_json_string(self):
expect = {"mainsnak": {"snaktype": "value",
"property": "P31",
"datatype": "string",
"datavalue": {"value": "texttexttext",
"type": "string"
}},
"type": "statement",
"rank": "normal"}
compare(expect, ClaimFactory.create_claim_json(
SnakParameter(property_str="P31", target_type="string", target="texttexttext")))
def test__create_claim_json_monolingualtext(self):
expect = {"mainsnak": {"snaktype": "value",
"property": "P31",
"datatype": "monolingualtext",
"datavalue": {"value": {"text": "texttexttext",
"language": "mul"},
"type": "monolingualtext"
}
},
"type": "statement",
"rank": "normal"}
compare(expect, ClaimFactory.create_claim_json(
SnakParameter(property_str="P31", target_type="monolingualtext", target="texttexttext")))
def test__create_claim_json_with_qualifier(self):
expect = {"mainsnak": {"snaktype": "value",
"property": "P31",
"datatype": "string",
"datavalue": {"value": "texttexttext",
"type": "string"
}},
"type": "statement",
"rank": "normal",
"qualifiers": {
"P1234": [
{
"snaktype": "value",
"property": "P1234",
"datatype": "string",
"datavalue": {
"value": "text",
"type": "string"
}
}
],
"P5678": [
{
"snaktype": "value",
"property": "P5678",
"datatype": "wikibase-item",
"datavalue": {
"value": {
"entity-type": "item",
"numeric-id": 123456
},
"type": "wikibase-entityid"
}
}
]
},
"qualifiers-order": [
"P1234",
"P5678"
]
}
main_parameter = SnakParameter(property_str="P31", target_type="string", target="texttexttext")
quali_snak_1 = SnakParameter(property_str="P1234", target_type="string", target="text")
quali_snak_2 = SnakParameter(property_str="P5678", target_type="wikibase-item", target="Q123456")
compare(expect, ClaimFactory.create_claim_json(main_parameter, qualifiers=[quali_snak_1, quali_snak_2]))
def test__create_claim_json_with_reference(self):
expect = {
"mainsnak": {
"snaktype": "value",
"property": "P1234",
"datatype": "string",
"datavalue": {
"value": "value",
"type": "string"
}
},
"type": "statement",
"rank": "normal",
"references": [
{
"snaks": {
"P123": [
{
"snaktype": "value",
"property": "P123",
"datatype": "string",
"datavalue": {
"value": "ref1",
"type": "string"
}
}
],
"P234": [
{
"snaktype": "value",
"property": "P234",
"datatype": "string",
"datavalue": {
"value": "ref2",
"type": "string"
}
}
]
},
"snaks-order": [
"P123",
"P234"
]
},
{
"snaks": {
"P345": [
{
"snaktype": "value",
"property": "P345",
"datatype": "string",
"datavalue": {
"value": "ref3",
"type": "string"
}
}
],
"P456": [
{
"snaktype": "value",
"property": "P456",
"datatype": "string",
"datavalue": {
"value": "ref4",
"type": "string"
}
}
]
},
"snaks-order": [
"P345",
"P456"
]
}
]
}
main_parameter = SnakParameter(property_str="P1234", target_type="string", target="value")
ref_snak_1 = SnakParameter(property_str="P123", target_type="string", target="ref1")
ref_snak_2 = SnakParameter(property_str="P234", target_type="string", target="ref2")
ref_snak_3 = SnakParameter(property_str="P345", target_type="string", target="ref3")
ref_snak_4 = SnakParameter(property_str="P456", target_type="string", target="ref4")
compare(expect, ClaimFactory.create_claim_json(snak_parameter=main_parameter,
references=[[ref_snak_1, ref_snak_2],
[ref_snak_3, ref_snak_4]]))
def test__create_claim_json_exception(self):
with self.assertRaises(BotException):
ClaimFactory.create_claim_json(SnakParameter(property_str="P31", target_type="tada", target="123"))
with self.assertRaises(ValueError):
ClaimFactory.create_claim_json(SnakParameter(property_str="P31", target_type="time", target="tada"))
| mit | -9,093,277,020,768,808,000 | 43.778169 | 117 | 0.406149 | false | 4.703033 | true | false | false | 0.00228 |
maartenq/ansible | lib/ansible/modules/cloud/google/gcp_compute_https_health_check_facts.py | 12 | 7524 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_https_health_check_facts
description:
- Gather facts for GCP HttpsHealthCheck
short_description: Gather facts for GCP HttpsHealthCheck
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
A list of filter value pairs. Available filters are listed here
U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
Each additional filter in the list will act be added as an AND condition
(filter1 and filter2)
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a https health check facts
gcp_compute_https_health_check_facts:
filters:
- name = test_object
project: test_project
auth_kind: service_account
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
check_interval_sec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
returned: success
type: int
creation_timestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive successes.
The default value is 2.
returned: success
type: int
host:
description:
- The value of the host header in the HTTPS health check request. If left empty (default
value), the public IP on behalf of which this health check is performed will be
used.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
returned: success
type: str
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 80.
returned: success
type: int
request_path:
description:
- The request path of the HTTPS health check request.
- The default value is /.
returned: success
type: str
timeout_sec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater value
than checkIntervalSec.
returned: success
type: int
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive failures.
The default value is 2.
returned: success
type: int
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(
argument_spec=dict(
filters=dict(type='list', elements='str'),
)
)
if 'scopes' not in module.params:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {
'items': items
}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpsHealthChecks".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | -50,928,666,003,723,830 | 32.891892 | 118 | 0.533892 | false | 4.816901 | false | false | false | 0.003456 |
gcsadovy/generalPY | listComprehension.py | 1 | 1709 | #listComprehension.py
#Garik Sadovy
#gcsadovy
# Create a list of all uppercase field names
fieldNames = ['FID', 'Shape', 'COVER', 'RECNO']
fieldNames2 = [ i.upper() for i in fieldNames ]
print "1. All cap field names:", fieldNames2
# Create a list of rounded float values
strList = ['3.34', '1.07', '4.21', '4.56', '4.5']
intList = [float(int(float(i))) for i in strList] #modify this
print "2. Rounded float values:", intList
# Create a list of reciprocal values (the reciprocal of a number n is defined as 1/n)
values = [8.0, 4.0, 4.0, 1.0, 5.0, 4.0, 4.0, 2.0]
reciprocal = [(1/float(i)) for i in values] #modify this
print "3. The reciprocal values:", reciprocal
# Create a list in which all the slash marks ('/') are replaced with underscores ('_').
fieldNames = [ "FireType/Protection-Type", "Time/Date", "Condition/Status/Role"]
fieldNames2 = [i.replace('/', '_') for i in fieldNames] #modify this
print "4. No slashes:", fieldNames2
# Create a list of output file names
import os
inputFiles = os.listdir("C:/Temp")
# Sample output below for inputFiles = ["COVER.shp", "Fires.shp", "Data.txt"]
outputFiles = [(os.path.basename(i[:-4]))+"out"+(os.path.basename(i[-4:])) for i in inputFiles] #modify this
print "5. Output files:", outputFiles
# Create a list file extensions -- You may assume file extensions are the last
# 4 characters or for an extra challenge, find a solution using the 'os.path.splitext' method
import arcpy
inputFiles = os.listdir("C:/Temp")
# Sample output below for inputFiles = ["COVER.shp", "Fires.shp", "Data.txt"]
extensions = [os.path.basename(i[-4:]) for i in inputFiles] #modify this
print "6. File extensions:", extensions | gpl-3.0 | -7,737,266,964,302,702,000 | 43.026316 | 109 | 0.67993 | false | 3.090416 | false | false | false | 0.018724 |
google/graphicsfuzz | gfauto/gfauto/run_amber_android.py | 1 | 2782 | # -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs AmberScript tests on Android devices."""
import argparse
import sys
from pathlib import Path
from typing import List, Optional
from gfauto import android_device, binaries_util, settings_util
from gfauto.settings_pb2 import Settings
def main() -> None: # pylint: disable=too-many-statements, too-many-locals, too-many-branches;
parser = argparse.ArgumentParser(
description="Runs AmberScript files on Android devices."
)
parser.add_argument(
"amber_script_file", help="AmberScript tests to run.", nargs="+",
)
parser.add_argument(
"--output", help="Output directory.", default="output",
)
parser.add_argument(
"--settings",
help="Path to a settings JSON file for this instance. "
"Unlike with gfauto_fuzz, the default value is an empty string, which is ignored. ",
default="",
)
parser.add_argument(
"--serial",
help="Android device serial. If left unspecified, the tests will be run on all Android devices.",
action="append",
)
parsed_args = parser.parse_args(sys.argv[1:])
amber_script_files: List[Path] = [Path(a) for a in parsed_args.amber_script_file]
output_path: Path = Path(parsed_args.output)
serials: Optional[List[str]] = parsed_args.serial
settings_str: str = parsed_args.settings
settings = Settings()
if settings_str:
settings = settings_util.read_or_create(Path(settings_str))
binary_manager = binaries_util.get_default_binary_manager(settings)
if not serials:
android_devices = android_device.get_all_android_devices(
binary_manager, include_device_details=False
)
serials = []
for device in android_devices:
serials.append(device.android.serial)
for amber_script_file in amber_script_files:
for serial in serials:
android_device.run_amber_on_device(
amber_script_file,
output_path / serial,
dump_image=False,
dump_buffer=False,
serial=serial,
)
if __name__ == "__main__":
main()
| apache-2.0 | -864,128,570,365,748,200 | 31.348837 | 105 | 0.659238 | false | 3.974286 | false | false | false | 0.001438 |
bdoner/SickRage | lib/sqlalchemy/testing/exclusions.py | 76 | 10199 | # testing/exclusions.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from .plugin.plugin_base import SkipTest
from ..util import decorator
from . import config
from .. import util
import contextlib
import inspect
class skip_if(object):
def __init__(self, predicate, reason=None):
self.predicate = _as_predicate(predicate)
self.reason = reason
_fails_on = None
def __add__(self, other):
def decorate(fn):
return other(self(fn))
return decorate
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
return not self.predicate(config)
@contextlib.contextmanager
def fail_if(self, name='block'):
try:
yield
except Exception as ex:
if self.predicate(config._current):
print(("%s failed as expected (%s): %s " % (
name, self.predicate, str(ex))))
else:
raise
else:
if self.predicate(config._current):
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(name, self.predicate))
def __call__(self, fn):
@decorator
def decorate(fn, *args, **kw):
if self.predicate(config._current):
if self.reason:
msg = "'%s' : %s" % (
fn.__name__,
self.reason
)
else:
msg = "'%s': %s" % (
fn.__name__, self.predicate
)
raise SkipTest(msg)
else:
if self._fails_on:
with self._fails_on.fail_if(name=fn.__name__):
return fn(*args, **kw)
else:
return fn(*args, **kw)
return decorate(fn)
def fails_on(self, other, reason=None):
self._fails_on = skip_if(other, reason)
return self
def fails_on_everything_except(self, *dbs):
self._fails_on = skip_if(fails_on_everything_except(*dbs))
return self
class fails_if(skip_if):
def __call__(self, fn):
@decorator
def decorate(fn, *args, **kw):
with self.fail_if(name=fn.__name__):
return fn(*args, **kw)
return decorate(fn)
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate):
if isinstance(predicate, skip_if):
return NotPredicate(predicate.predicate)
elif isinstance(predicate, Predicate):
return predicate
elif isinstance(predicate, list):
return OrPredicate([cls.as_predicate(pred) for pred in predicate])
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, util.string_types):
tokens = predicate.split(" ", 2)
op = spec = None
db = tokens.pop(0)
if tokens:
op = tokens.pop(0)
if tokens:
spec = tuple(int(d) for d in tokens.pop(0).split("."))
return SpecPredicate(db, op, spec)
elif util.callable(predicate):
return LambdaPredicate(predicate)
else:
assert False, "unknown predicate type: %s" % predicate
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, negate=False):
if negate:
return "not " + self.description
else:
return self.description
def __str__(self):
return self._as_string()
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, negate=False):
if self.description is not None:
return self.description
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
def __str__(self):
return self._as_string()
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect.getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, negate=False):
if negate:
return "not " + self.description
else:
return self.description
def __str__(self):
return self._as_string()
class NotPredicate(Predicate):
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, config):
return not self.predicate(config)
def __str__(self):
return self.predicate._as_string(True)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
self._str = pred
return True
return False
_str = None
def _eval_str(self, negate=False):
if self._str is None:
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(negate=negate)
for p in self.predicates)
else:
return self._str._as_string(negate=negate)
def _negation_str(self):
if self.description is not None:
return "Not " + (self.description % {"spec": self._str})
else:
return self._eval_str(negate=True)
def _as_string(self, negate=False):
if negate:
return self._negation_str()
else:
if self.description is not None:
return self.description % {"spec": self._str}
else:
return self._eval_str()
def __str__(self):
return self._as_string()
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
[Predicate.as_predicate(db) for db in dbs]
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails():
return fails_if(BooleanPredicate(True, "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(SpecPredicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
SpecPredicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(SpecPredicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([SpecPredicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])(config)
| gpl-3.0 | 1,909,600,769,299,854,800 | 27.096419 | 84 | 0.535151 | false | 4.260234 | true | false | false | 0.000882 |
yjxtogo/horizon | horizon/templatetags/parse_date.py | 75 | 1738 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Template tags for parsing date strings.
"""
from datetime import datetime # noqa
from django import template
from django.utils import timezone
register = template.Library()
class ParseDateNode(template.Node):
def render(self, datestring):
"""Parses a date-like input string into a timezone aware Python
datetime.
"""
formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%d %H:%M:%S.%f",
"%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
if datestring:
for format in formats:
try:
parsed = datetime.strptime(datestring, format)
if not timezone.is_aware(parsed):
parsed = timezone.make_aware(parsed, timezone.utc)
return parsed
except Exception:
pass
return None
@register.filter(name='parse_date')
def parse_date(value):
return ParseDateNode().render(value)
| apache-2.0 | -8,700,838,267,463,968,000 | 31.792453 | 78 | 0.635213 | false | 4.108747 | false | false | false | 0 |
realgam3/x64dbg-python | swig/x64dbg_python/__init__.py | 1 | 1565 | __author__ = 'Tomer Zait (RealGame)'
__version__ = '1.0.0'
import sys
from bridgemain import *
from _plugins import _plugin_logprintf, _plugin_logputs
def raw_input(prompt=''):
return GuiGetLineWindow(prompt)
def input(prompt=''):
return eval(GuiGetLineWindow(prompt))
class OutputHook(object):
def __init__(self, stream_name='stdout', callback=_plugin_logprintf):
self.is_hooking = False
self.callback = callback
self.stream_name = stream_name
if self.stream_name not in ['stderr', 'stdout']:
raise Exception('Cannot hook %s stream.' % self.stream_name)
elif self.__is_hooked():
raise Exception('Do not hook the hooker!')
self.__original_stream = getattr(sys, self.stream_name)
def __getattr__(self, name):
return getattr(self.__original_stream, name)
def __is_hooked(self):
stream = getattr(sys, self.stream_name)
return hasattr(stream, 'is_hooking')
def write(self, text):
self.callback(text)
def start(self):
if not self.is_hooking:
setattr(sys, self.stream_name, self)
self.is_hooking = True
def stop(self):
if self.is_hooking:
setattr(sys, self.stream_name, self.__original_stream)
self.is_hooking = False
# Hook sys.stdout
STDOUT_HOOK = OutputHook('stdout')
STDOUT_HOOK.start()
# Hook sys.stderr
STDERR_HOOK = OutputHook('stderr')
STDERR_HOOK.start()
# Print Message That The Hooks Worked!
print '[PYTHON] stdout, stderr, raw_input hooked!'
| mit | 4,448,265,607,922,926,000 | 25.525424 | 73 | 0.634505 | false | 3.56492 | false | false | false | 0.000639 |
linglung/ytdl | youtube_dl/extractor/sportbox.py | 1 | 4558 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
js_to_json,
unified_strdate,
)
class SportBoxIE(InfoExtractor):
_VALID_URL = r'https?://news\.sportbox\.ru/(?:[^/]+/)+spbvideo_NI\d+_(?P<display_id>.+)'
_TESTS = [{
'url': 'http://news.sportbox.ru/Vidy_sporta/Avtosport/Rossijskij/spbvideo_NI483529_Gonka-2-zaezd-Obyedinenniy-2000-klassi-Turing-i-S',
'md5': 'ff56a598c2cf411a9a38a69709e97079',
'info_dict': {
'id': '80822',
'ext': 'mp4',
'title': 'Гонка 2 заезд ««Объединенный 2000»: классы Туринг и Супер-продакшн',
'description': 'md5:3d72dc4a006ab6805d82f037fdc637ad',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140928',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://news.sportbox.ru/Vidy_sporta/billiard/spbvideo_NI486287_CHempionat-mira-po-dinamichnoy-piramide-4',
'only_matching': True,
}, {
'url': 'http://news.sportbox.ru/video/no_ads/spbvideo_NI536574_V_Novorossijske_proshel_detskij_turnir_Pole_slavy_bojevoj?ci=211355',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
player = self._search_regex(
r'src="/?(vdl/player/[^"]+)"', webpage, 'player')
title = self._html_search_regex(
[r'"nodetitle"\s*:\s*"([^"]+)"', r'class="node-header_{1,2}title">([^<]+)'],
webpage, 'title')
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage, 'description')
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._html_search_meta(
'dateCreated', webpage, 'upload date'))
return {
'_type': 'url_transparent',
'url': compat_urlparse.urljoin(url, '/%s' % player),
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
}
class SportBoxEmbedIE(InfoExtractor):
_VALID_URL = r'https?://news\.sportbox\.ru/vdl/player(?:/[^/]+/|\?.*?\bn?id=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://news.sportbox.ru/vdl/player/ci/211355',
'info_dict': {
'id': '211355',
'ext': 'mp4',
'title': 'В Новороссийске прошел детский турнир «Поле славы боевой»',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://news.sportbox.ru/vdl/player?nid=370908&only_player=1&autostart=false&playeri=2&height=340&width=580',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+src="(https?://news\.sportbox\.ru/vdl/player[^"]+)"',
webpage)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
def cleanup_js(code):
# desktop_advert_config contains complex Javascripts and we don't need it
return js_to_json(re.sub(r'desktop_advert_config.*', '', code))
jwplayer_data = self._parse_json(self._search_regex(
r'(?s)player\.setup\(({.+?})\);', webpage, 'jwplayer settings'), video_id,
transform_source=cleanup_js)
hls_url = jwplayer_data.get('hls_url')
if hls_url:
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, ext='mp4', m3u8_id='hls'))
rtsp_url = jwplayer_data.get('rtsp_url')
if rtsp_url:
formats.append({
'url': rtsp_url,
'format_id': 'rtsp',
})
self._sort_formats(formats)
title = jwplayer_data['node_title']
thumbnail = jwplayer_data.get('image_url')
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense | -6,251,618,962,852,294,000 | 33.284615 | 142 | 0.546107 | false | 3.125526 | false | false | false | 0.002692 |
TomAugspurger/pandas | pandas/tests/tseries/frequencies/test_to_offset.py | 1 | 4660 | import re
import pytest
from pandas._libs.tslibs import Timedelta, offsets, to_offset
@pytest.mark.parametrize(
"freq_input,expected",
[
(to_offset("10us"), offsets.Micro(10)),
(offsets.Hour(), offsets.Hour()),
((5, "T"), offsets.Minute(5)),
("2h30min", offsets.Minute(150)),
("2h 30min", offsets.Minute(150)),
("2h30min15s", offsets.Second(150 * 60 + 15)),
("2h 60min", offsets.Hour(3)),
("2h 20.5min", offsets.Second(8430)),
("1.5min", offsets.Second(90)),
("0.5S", offsets.Milli(500)),
("15l500u", offsets.Micro(15500)),
("10s75L", offsets.Milli(10075)),
("1s0.25ms", offsets.Micro(1000250)),
("1s0.25L", offsets.Micro(1000250)),
("2800N", offsets.Nano(2800)),
("2SM", offsets.SemiMonthEnd(2)),
("2SM-16", offsets.SemiMonthEnd(2, day_of_month=16)),
("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)),
("2SMS-15", offsets.SemiMonthBegin(2)),
],
)
def test_to_offset(freq_input, expected):
result = to_offset(freq_input)
assert result == expected
@pytest.mark.parametrize(
"freqstr,expected", [("-1S", -1), ("-2SM", -2), ("-1SMS", -1), ("-5min10s", -310)]
)
def test_to_offset_negative(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize(
"freqstr",
[
"2h20m",
"U1",
"-U",
"3U1",
"-2-3U",
"-2D:3H",
"1.5.0S",
"2SMS-15-15",
"2SMS-15D",
"100foo",
# Invalid leading +/- signs.
"+-1d",
"-+1h",
"+1",
"-7",
"+d",
"-m",
# Invalid shortcut anchors.
"SM-0",
"SM-28",
"SM-29",
"SM-FOO",
"BSM",
"SM--1",
"SMS-1",
"SMS-28",
"SMS-30",
"SMS-BAR",
"SMS-BYR",
"BSMS",
"SMS--2",
],
)
def test_to_offset_invalid(freqstr):
# see gh-13930
# We escape string because some of our
# inputs contain regex special characters.
msg = re.escape(f"Invalid frequency: {freqstr}")
with pytest.raises(ValueError, match=msg):
to_offset(freqstr)
def test_to_offset_no_evaluate():
with pytest.raises(ValueError, match="Could not evaluate"):
to_offset(("", ""))
@pytest.mark.parametrize(
"freqstr,expected",
[
("2D 3H", offsets.Hour(51)),
("2 D3 H", offsets.Hour(51)),
("2 D 3 H", offsets.Hour(51)),
(" 2 D 3 H ", offsets.Hour(51)),
(" H ", offsets.Hour()),
(" 3 H ", offsets.Hour(3)),
],
)
def test_to_offset_whitespace(freqstr, expected):
result = to_offset(freqstr)
assert result == expected
@pytest.mark.parametrize(
"freqstr,expected", [("00H 00T 01S", 1), ("-00H 03T 14S", -194)]
)
def test_to_offset_leading_zero(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize("freqstr,expected", [("+1d", 1), ("+2h30min", 150)])
def test_to_offset_leading_plus(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize(
"kwargs,expected",
[
(dict(days=1, seconds=1), offsets.Second(86401)),
(dict(days=-1, seconds=1), offsets.Second(-86399)),
(dict(hours=1, minutes=10), offsets.Minute(70)),
(dict(hours=1, minutes=-10), offsets.Minute(50)),
(dict(weeks=1), offsets.Day(7)),
(dict(hours=1), offsets.Hour(1)),
(dict(hours=1), to_offset("60min")),
(dict(microseconds=1), offsets.Micro(1)),
(dict(microseconds=0), offsets.Nano(0)),
],
)
def test_to_offset_pd_timedelta(kwargs, expected):
# see gh-9064
td = Timedelta(**kwargs)
result = to_offset(td)
assert result == expected
@pytest.mark.parametrize(
"shortcut,expected",
[
("W", offsets.Week(weekday=6)),
("W-SUN", offsets.Week(weekday=6)),
("Q", offsets.QuarterEnd(startingMonth=12)),
("Q-DEC", offsets.QuarterEnd(startingMonth=12)),
("Q-MAY", offsets.QuarterEnd(startingMonth=5)),
("SM", offsets.SemiMonthEnd(day_of_month=15)),
("SM-15", offsets.SemiMonthEnd(day_of_month=15)),
("SM-1", offsets.SemiMonthEnd(day_of_month=1)),
("SM-27", offsets.SemiMonthEnd(day_of_month=27)),
("SMS-2", offsets.SemiMonthBegin(day_of_month=2)),
("SMS-27", offsets.SemiMonthBegin(day_of_month=27)),
],
)
def test_anchored_shortcuts(shortcut, expected):
result = to_offset(shortcut)
assert result == expected
| bsd-3-clause | 3,521,344,345,220,055,600 | 27.242424 | 86 | 0.556438 | false | 3.090186 | true | false | false | 0.000215 |
praba230890/junction | junction/conferences/migrations/0005_emailreviewernotificationsetting.py | 9 | 1843 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Third Party Stuff
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('proposals', '0003_auto_20150113_1401'),
('conferences', '0004_conference_logo'),
]
operations = [
migrations.CreateModel(
name='EmailReviewerNotificationSetting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='Last Modified At')),
('action', models.CharField(max_length=15)),
('status', models.BooleanField(default=True)),
('conference_reviewer', models.ForeignKey(to='conferences.ConferenceProposalReviewer')),
('created_by', models.ForeignKey(related_name='created_emailreviewernotificationsetting_set', verbose_name='Created By', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='updated_emailreviewernotificationsetting_set', verbose_name='Modified By', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('proposal_section', models.ForeignKey(to='proposals.ProposalSection')),
('proposal_type', models.ForeignKey(to='proposals.ProposalType')),
],
options={
'verbose_name': 'email notification',
'verbose_name_plural': 'email notifications',
},
bases=(models.Model,),
),
]
| mit | -6,880,900,112,396,074,000 | 47.5 | 192 | 0.627238 | false | 4.326291 | false | false | false | 0.004341 |
slohse/ansible | lib/ansible/modules/crypto/openssl_publickey.py | 43 | 11004 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_publickey
author: "Yanis Guenane (@Spredzy)"
version_added: "2.3"
short_description: Generate an OpenSSL public key from its private key.
description:
- "This module allows one to (re)generate OpenSSL public keys from their private keys.
It uses the pyOpenSSL python library to interact with openssl. Keys are generated
in PEM format. This module works only if the version of PyOpenSSL is recent enough (> 16.0.0)."
requirements:
- "python-pyOpenSSL"
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the public key should exist or not, taking action if the state is different from what is stated.
force:
required: false
default: False
type: bool
description:
- Should the key be regenerated even it it already exists
format:
required: false
default: PEM
choices: [ PEM, OpenSSH ]
description:
- The format of the public key.
version_added: "2.4"
path:
required: true
description:
- Name of the file in which the generated TLS/SSL public key will be written.
privatekey_path:
required: true
description:
- Path to the TLS/SSL private key from which to generate the public key.
privatekey_passphrase:
required: false
description:
- The passphrase for the privatekey.
version_added: "2.4"
extends_documentation_fragment: files
'''
EXAMPLES = '''
# Generate an OpenSSL public key in PEM format.
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
# Generate an OpenSSL public key in OpenSSH v2 format.
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
format: OpenSSH
# Generate an OpenSSL public key with a passphrase protected
# private key
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
# Force regenerate an OpenSSL public key if it already exists
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
force: True
# Remove an OpenSSL public key
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
state: absent
'''
RETURN = '''
privatekey:
description: Path to the TLS/SSL private key the public key was generated from
returned: changed or success
type: string
sample: /etc/ssl/private/ansible.com.pem
format:
description: The format of the public key (PEM, OpenSSH, ...)
returned: changed or success
type: string
sample: PEM
filename:
description: Path to the generated TLS/SSL public key file
returned: changed or success
type: string
sample: /etc/ssl/public/ansible.com.pem
fingerprint:
description: The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
'''
import hashlib
import os
try:
from OpenSSL import crypto
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization as crypto_serialization
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
class PublicKeyError(crypto_utils.OpenSSLObjectError):
pass
class PublicKey(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(PublicKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.privatekey = None
self.fingerprint = {}
def generate(self, module):
"""Generate the public key."""
if not os.path.exists(self.privatekey_path):
raise PublicKeyError(
'The private key %s does not exist' % self.privatekey_path
)
if not self.check(module, perms_required=False) or self.force:
try:
if self.format == 'OpenSSH':
privatekey_content = open(self.privatekey_path, 'rb').read()
key = crypto_serialization.load_pem_private_key(privatekey_content,
password=self.privatekey_passphrase,
backend=default_backend())
publickey_content = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
self.privatekey = crypto_utils.load_privatekey(
self.privatekey_path, self.privatekey_passphrase
)
publickey_content = crypto.dump_publickey(crypto.FILETYPE_PEM, self.privatekey)
with open(self.path, 'wb') as publickey_file:
publickey_file.write(publickey_content)
self.changed = True
except (IOError, OSError) as exc:
raise PublicKeyError(exc)
except AttributeError as exc:
self.remove()
raise PublicKeyError('You need to have PyOpenSSL>=16.0.0 to generate public keys')
self.fingerprint = crypto_utils.get_fingerprint(
self.privatekey_path,
self.privatekey_passphrase
)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PublicKey, self).check(module, perms_required)
def _check_privatekey():
if not os.path.exists(self.privatekey_path):
return False
try:
publickey_content = open(self.path, 'rb').read()
if self.format == 'OpenSSH':
current_publickey = crypto_serialization.load_ssh_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo)
current_publickey = crypto.dump_publickey(
crypto.FILETYPE_ASN1,
crypto.load_publickey(crypto.FILETYPE_PEM, publickey_content)
)
except (crypto.Error, ValueError):
return False
desired_publickey = crypto.dump_publickey(
crypto.FILETYPE_ASN1,
crypto_utils.load_privatekey(self.privatekey_path, self.privatekey_passphrase)
)
return current_publickey == desired_publickey
if not state_and_perms:
return state_and_perms
return _check_privatekey()
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'format': self.format,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
force=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
privatekey_path=dict(type='path'),
format=dict(type='str', choices=['PEM', 'OpenSSH'], default='PEM'),
privatekey_passphrase=dict(type='str', no_log=True),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[('state', 'present', ['privatekey_path'])]
)
if not pyopenssl_found:
module.fail_json(msg='the python pyOpenSSL module is required')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
public_key = PublicKey(module)
if public_key.state == 'present':
if module.check_mode:
result = public_key.dump()
result['changed'] = module.params['force'] or not public_key.check(module)
module.exit_json(**result)
try:
public_key.generate(module)
except PublicKeyError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = public_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
try:
public_key.remove()
except PublicKeyError as exc:
module.fail_json(msg=to_native(exc))
result = public_key.dump()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,198,091,442,209,563,000 | 34.496774 | 159 | 0.60896 | false | 3.72638 | false | false | false | 0.002999 |
ssteo/moviepy | moviepy/video/io/ImageSequenceClip.py | 1 | 4955 | import os
import numpy as np
from ..VideoClip import VideoClip
from imageio import imread
class ImageSequenceClip(VideoClip):
"""
A VideoClip made from a series of images.
Parameters
-----------
sequence
Can be one of these:
- The name of a folder (containing only pictures). The pictures
will be considered in alphanumerical order.
- A list of names of image files. In this case you can choose to
load the pictures in memory pictures
- A list of Numpy arrays representing images. In this last case,
masks are not supported currently.
fps
Number of picture frames to read per second. Instead, you can provide
the duration of each image with durations (see below)
durations
List of the duration of each picture.
with_mask
Should the alpha layer of PNG images be considered as a mask ?
ismask
Will this sequence of pictures be used as an animated mask.
Notes
------
If your sequence is made of image files, the only image kept in
"""
def __init__(self, sequence, fps=None, durations=None, with_mask=True,
ismask=False, load_images=False):
# CODE WRITTEN AS IT CAME, MAY BE IMPROVED IN THE FUTURE
if (fps is None) and (durations is None):
raise ValueError("Please provide either 'fps' or 'durations'.")
VideoClip.__init__(self, ismask=ismask)
# Parse the data
fromfiles = True
if isinstance(sequence, list):
if isinstance(sequence[0], str):
if load_images:
sequence = [imread(f) for f in sequence]
fromfiles = False
else:
fromfiles= True
else:
# sequence is already a list of numpy arrays
fromfiles = False
else:
# sequence is a folder name, make it a list of files:
fromfiles = True
sequence = sorted([os.path.join(sequence, f)
for f in os.listdir(sequence)])
#check that all the images are of the same size
if isinstance(sequence[0], str):
size = imread(sequence[0]).shape
else:
size = sequence[0].shape
for image in sequence:
image1=image
if isinstance(image, str):
image1=imread(image)
if size != image1.shape:
raise Exception("Moviepy: ImageSequenceClip requires all images to be the same size")
self.fps = fps
if fps is not None:
durations = [1.0/fps for image in sequence]
self.images_starts = [1.0*i/fps-np.finfo(np.float32).eps for i in range(len(sequence))]
else:
self.images_starts = [0]+list(np.cumsum(durations))
self.durations = durations
self.duration = sum(durations)
self.end = self.duration
self.sequence = sequence
def find_image_index(t):
return max([i for i in range(len(self.sequence))
if self.images_starts[i]<=t])
if fromfiles:
self.lastindex = None
self.lastimage = None
def make_frame(t):
index = find_image_index(t)
if index != self.lastindex:
self.lastimage = imread(self.sequence[index])[:,:,:3]
self.lastindex = index
return self.lastimage
if with_mask and (imread(self.sequence[0]).shape[2]==4):
self.mask = VideoClip(ismask=True)
self.mask.lastindex = None
self.mask.lastimage = None
def mask_make_frame(t):
index = find_image_index(t)
if index != self.mask.lastindex:
frame = imread(self.sequence[index])[:,:,3]
self.mask.lastimage = frame.astype(float)/255
self.mask.lastindex = index
return self.mask.lastimage
self.mask.make_frame = mask_make_frame
self.mask.size = mask_make_frame(0).shape[:2][::-1]
else:
def make_frame(t):
index = find_image_index(t)
return self.sequence[index][:,:,:3]
if with_mask and (self.sequence[0].shape[2]==4):
self.mask = VideoClip(ismask=True)
def mask_make_frame(t):
index = find_image_index(t)
return 1.0*self.sequence[index][:,:,3]/255
self.mask.make_frame = mask_make_frame
self.mask.size = mask_make_frame(0).shape[:2][::-1]
self.make_frame = make_frame
self.size = make_frame(0).shape[:2][::-1]
| mit | -8,786,644,600,333,354,000 | 29.398773 | 100 | 0.534612 | false | 4.331294 | false | false | false | 0.008274 |
Schnaffon/clamav-devel | win32/clamav-for-windows/sigui/wxWidgets-2.9.1/misc/scripts/png2c.py | 12 | 1923 | #!/usr/bin/python
# This script is a slightly modified version of the original found at
#
# http://wiki.wxwidgets.org/Embedding_PNG_Images-Bin2c_In_Python
#
# without any copyright attribution so it is assumed it can be used under
# wxWindows licence as the rest of the wiki material.
import sys
import os
import os.path
import re
import array
USAGE = """png2c - Embed a PNG in a C header file (like XPM)
Usage: png2c [file ..] Output input PNG files as C structures on stdout"""
if len(sys.argv) < 2:
print USAGE
sys.exit(1)
r = re.compile("^([a-zA-Z._][a-zA-Z._0-9]*)[.][pP][nN][gG]$")
for path in sys.argv[1:]:
filename = os.path.basename(path)
m = r.match(filename)
# Allow only filenames that make sense
# as C variable names
if not(m):
print "Skipped file (unsuitable filename): " + filename
continue
# Read PNG file as character array
bytes = array.array('B', open(path, "rb").read())
count = len(bytes)
# Create the C header
text = "/* %s - %d bytes */\n" \
"static const unsigned char %s_png[] = {\n" % (filename, count, m.group(1))
# Iterate the characters, we want
# lines like:
# 0x01, 0x02, .... (8 values per line maximum)
i = 0
count = len(bytes)
for byte in bytes:
# Every new line starts with two whitespaces
if (i % 8) == 0:
text += " "
# Then the hex data (up to 8 values per line)
text += "0x%02x" % (byte)
# Separate all but the last values
if (i + 1) < count:
text += ", "
if (i % 8) == 7:
text += '\n'
i += 1
# Now conclude the C source
text += "};\n\n"
print text
| gpl-2.0 | -1,775,812,428,795,662,300 | 29.52381 | 90 | 0.518461 | false | 3.719536 | false | false | false | 0.0052 |
daevaorn/sentry | src/sentry/management/commands/runserver.py | 1 | 2943 | from __future__ import absolute_import
import os
import os.path
import sys
from django.conf import settings
from django.core.management.base import CommandError
from django.core.management.color import color_style
from django.core.management.commands.runserver import Command as RunserverCommand
from optparse import make_option
from subprocess import Popen
class Command(RunserverCommand):
"""
A version of Django's runserver which bundles Sentry's development
tooling (such as static assets).
"""
help = "Starts a lightweight Web server for development"
option_list = RunserverCommand.option_list + (
make_option(
'--no-watchers', action='store_false', dest='use_watcher',
default=True,
help='Tells Sentry to NOT automatically recompile static distributions.'),
make_option(
'--watchers', action='store_true', dest='use_watcher',
default=True,
help='Tells Sentry to NOT automatically recompile static distributions.'),
)
cwd = os.path.realpath(os.path.join(settings.PROJECT_ROOT, os.pardir, os.pardir))
def get_env(self):
from sentry.app import env
result = os.environ.copy()
result.update({
'SENTRY_CONF': env.data['config'],
})
return result
def get_watchers(self):
return settings.SENTRY_WATCHERS
def run_watchers(self, verbosity, **options):
if self.verbosity:
stdout = None
else:
stdout = open('/dev/null', 'w')
env = self.get_env()
result = []
for watcher in self.get_watchers():
if self.verbosity:
self.stdout.write(self.style.HTTP_INFO('>> Running {0}'.format(watcher)))
try:
result.append(Popen(watcher, cwd=self.cwd, stdout=stdout, env=env))
except OSError:
raise CommandError('{0} not found.'.format(watcher[0]))
return result
def run_server(self, verbosity, **options):
if self.verbosity:
self.stdout.write(self.style.HTTP_INFO('>> Launching webserver..'))
return Popen(sys.argv + ['--no-watchers'], cwd=self.cwd,
env=self.get_env())
def run(self, *args, **options):
self.style = color_style()
self.verbosity = int(options['verbosity'])
if options['use_watcher']:
watcher_list = []
server = None
try:
watcher_list = self.run_watchers(**options)
server = self.run_server(**options)
server.wait()
finally:
if server and server.poll() is None:
server.kill()
for watcher in watcher_list:
if watcher.poll() is None:
watcher.kill()
else:
super(Command, self).run(*args, **options)
| bsd-3-clause | -4,056,635,716,688,804,400 | 32.827586 | 89 | 0.584777 | false | 4.277616 | false | false | false | 0.002039 |
yunxliu/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_ro_script-src_self_unsafe-inline.py | 30 | 3385 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "script-src 'self' 'unsafe-inline'"
response.headers.set("Content-Security-Policy-Report-Only", _CSP)
response.headers.set("X-Content-Security-Policy-Report-Only", _CSP)
response.headers.set("X-WebKit-CSP-Report-Only", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_ro_script-src_self_unsafe-inline</title>
<link rel="author" title="Intel" href="http://www.intel.com/"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#script-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="script-src 'self' 'unsafe-inline'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<script src="support/csp.js"></script>
<script src='""" + url1 + """/tests/csp/support/test.js'></script>
<script>
test(function() {
var d = document.getElementById("log");
assert_true(typeof d == "object", "HTML div element is of type object");
assert_true(d.toString() == "[object HTMLDivElement]", "HTML div element is of [object HTMLAudioElement]");
}, document.title);
test(function() {
assert_equals(X, 10, "X is 10");
assert_equals(Y, 27, "Y is X+17");
}, document.title + "_internal");
test(function() {
assert_true(typeof getVideoURI == "function", "Function getVideoURI is undefined");
}, document.title + "_external");
</script>
</body>
</html> """
| bsd-3-clause | 7,977,293,502,638,320,000 | 43.539474 | 119 | 0.682422 | false | 3.872998 | true | false | false | 0.001477 |
tropp/acq4 | acq4/util/HelpfulException.py | 4 | 1453 | ## test to see if new branch is working
import sys
class HelpfulException(Exception):
"""Allows for stacked exceptions.
Initalization:
message: The error message to the user. ex: Device could not be found.
exc: The original exception object
reasons: Reasons why the exception may have occurred. ex: "a. Device initialization failed during startup. b. Device Gui was closed."
docs: Referral to documentation.
When you catch a HelpfulException:
-- add additional information to the original exception
-- use self.prependErr("Additional message, ex: Protocol initiation failed. ", exc, reasons="a. A device could not be found.", docs='')
"""
def __init__(self, message='', exc=None, reasons=None, docs=None, **kwargs):
Exception.__init__(self, message)
self.kwargs = kwargs
if exc is None:
exc = sys.exc_info()
self.oldExc = exc
#self.messages = [message]
if reasons is None:
self.reasons = []
else:
self.reasons = reasons
if docs is None:
self.docs = []
else:
self.docs = docs
#def prependErr(self, msg, exc, reasons='', docs=''):
#self.messages.insert(0, msg)
#self.excs.insert(0, exc)
#self.reasons.insert(0, reasons)
#self.reasons.insert(0, docs)
| mit | 3,204,513,224,228,436,500 | 37.263158 | 146 | 0.581555 | false | 4.350299 | false | false | false | 0.0117 |
noxora/flask-base | flask/lib/python3.4/site-packages/pip/_vendor/distlib/_backport/tarfile.py | 422 | 92628 | #-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel ([email protected])"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| mit | 5,432,366,884,000,980,000 | 34.530495 | 103 | 0.539059 | false | 4.285952 | false | false | false | 0.001684 |
mlperf/training_results_v0.7 | Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v3-8192/lingvo/tasks/mt/params/wmt14_en_de.py | 3 | 6670 | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train NMT Models on WMT'14 English-German machine translation task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import model_registry
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_model_params
from lingvo.tasks.mt import base_config
from lingvo.tasks.mt import input_generator
from lingvo.tasks.mt import model
@model_registry.RegisterSingleTaskModel
class WmtEnDeTransformerBase(base_model_params.SingleTaskModelParams):
"""Params for WMT'14 En->De."""
DATADIR = '/usr/local/google/wmt14/wpm/'
VOCAB_SIZE = 32000
def _CommonInputParams(self, is_eval):
"""Input generator params for WMT'14 En->De."""
p = input_generator.NmtInput.Params()
p.tokenizer.vocab_size = self.VOCAB_SIZE
if is_eval:
p.file_random_seed = 27182818
p.file_parallelism = 1
p.file_buffer_size = 1
p.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98, 137, 200]
p.bucket_batch_limit = [16] * 8 + [4] * 2
else:
p.file_random_seed = 0
p.file_parallelism = 16
p.file_buffer_size = 10000000
p.bucket_upper_bound = ([8, 10, 12, 14, 16, 20, 24, 28] +
[32, 40, 48, 56, 64, 80, 96])
p.bucket_batch_limit = ([512, 409, 341, 292, 256, 204, 170, 146] +
[128, 102, 85, 73, 64, 51, 42])
return p
def Train(self):
p = self._CommonInputParams(is_eval=False)
p.file_pattern = 'tfrecord:' + os.path.join(self.DATADIR,
'train.tfrecords-*')
p.tokenizer.token_vocab_filepath = os.path.join(self.DATADIR,
'wpm-ende.voc')
p.num_samples = 4492447
return p
def Dev(self):
p = self._CommonInputParams(is_eval=True)
p.file_pattern = 'tfrecord:' + os.path.join(self.DATADIR, 'dev.tfrecords')
p.tokenizer.token_vocab_filepath = os.path.join(self.DATADIR,
'wpm-ende.voc')
p.num_samples = 3000
return p
def Test(self):
p = self._CommonInputParams(is_eval=True)
p.file_pattern = 'tfrecord:' + os.path.join(self.DATADIR, 'test.tfrecords')
p.tokenizer.token_vocab_filepath = os.path.join(self.DATADIR,
'wpm-ende.voc')
p.num_samples = 2737
return p
def Task(self):
p = base_config.SetupTransformerParams(
model.TransformerModel.Params(),
name='wmt14_en_de_transformer_base',
vocab_size=self.VOCAB_SIZE,
model_dim=512,
hidden_dim=2048,
num_heads=8,
num_layers=6,
residual_dropout_prob=0.1,
input_dropout_prob=0.1,
learning_rate=3.0,
warmup_steps=40000)
p.eval.samples_per_summary = 7500
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeTransformerSmall(WmtEnDeTransformerBase):
"""Small Transformer Params for WMT'14 En->De."""
def Task(self):
p = base_config.SetupTransformerParams(
model.TransformerModel.Params(),
name='wmt14_en_de_transformer_small',
vocab_size=self.VOCAB_SIZE,
model_dim=64,
hidden_dim=128,
num_heads=2,
num_layers=2,
residual_dropout_prob=0.1,
input_dropout_prob=0.1,
learning_rate=3.0,
warmup_steps=40000)
p.eval.samples_per_summary = 7500
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeTransformerSmallCloudTpu(WmtEnDeTransformerSmall):
"""Small Transformer Params for WMT'14 En->De on TPU."""
def _CommonInputParams(self, is_eval):
p = super(WmtEnDeTransformerSmallCloudTpu, self)._CommonInputParams(is_eval)
p.pad_to_max_seq_length = True
p.source_max_length = p.bucket_upper_bound[-1]
p.bucket_batch_limit = [64] * len(p.bucket_upper_bound)
return p
def Task(self):
p = super(WmtEnDeTransformerSmallCloudTpu, self).Task()
p.decoder.token_emb.max_num_shards = 1
p.encoder.token_emb.max_num_shards = 1
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeRNMT(WmtEnDeTransformerBase):
"""Params for WMT'14 En->De in sync training."""
def _CommonInputParams(self, is_eval):
p = super(WmtEnDeRNMT, self)._CommonInputParams(is_eval)
if is_eval:
p.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98, 200]
p.bucket_batch_limit = [128] * 8 + [32]
else:
p.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98]
p.bucket_batch_limit = [128] * 7 + [64]
return p
def Task(self):
p = base_config.SetupRNMTParams(
model.RNMTModel.Params(),
name='wmt14_en_de_rnmtplus_base',
vocab_size=self.VOCAB_SIZE,
embedding_dim=1024,
hidden_dim=1024,
num_heads=4,
num_encoder_layers=6,
num_decoder_layers=8,
learning_rate=1e-4,
l2_regularizer_weight=1e-5,
lr_warmup_steps=500,
lr_decay_start=400000,
lr_decay_end=1200000,
lr_min=0.5,
ls_uncertainty=0.1,
atten_dropout_prob=0.3,
residual_dropout_prob=0.3,
adam_beta2=0.98,
adam_epsilon=1e-6,
)
p.eval.samples_per_summary = 7500
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeRNMTCloudTpu(WmtEnDeRNMT):
"""Params for WMT'14 En->De in sync training on TPU."""
def _CommonInputParams(self, is_eval):
p = super(WmtEnDeRNMTCloudTpu, self)._CommonInputParams(is_eval)
p.pad_to_max_seq_length = True
p.source_max_length = p.bucket_upper_bound[-1]
p.bucket_batch_limit = [16] * len(p.bucket_upper_bound)
return p
def Task(self):
p = super(WmtEnDeRNMTCloudTpu, self).Task()
p.encoder.emb.max_num_shards = 1
p.decoder.emb.max_num_shards = 1
return p
| apache-2.0 | -923,244,760,926,141,700 | 31.378641 | 107 | 0.632534 | false | 3.225338 | false | false | false | 0.005397 |
wdzhou/mantid | Framework/PythonInterface/plugins/functions/HallRoss.py | 1 | 2107 | #pylint: disable=no-init,invalid-name
'''
@author Spencer Howells, ISIS
@date December 05, 2013
Copyright © 2007-8 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge National Laboratory & European Spallation Source
This file is part of Mantid.
Mantid is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Mantid is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
File change history is stored at: <https://github.com/mantidproject/mantid>
Code Documentation is available at: <http://doxygen.mantidproject.org>
'''
from __future__ import (absolute_import, division, print_function)
import math
import numpy as np
from mantid.api import IFunction1D, FunctionFactory
class HallRoss(IFunction1D):
def category(self):
return "QuasiElastic"
def init(self):
# Active fitting parameters
self.declareParameter("Tau", 1.0, 'Residence time')
self.declareParameter("L", 0.2, 'Jump length')
def function1D(self, xvals):
tau = self.getParameterValue("Tau")
l = self.getParameterValue("L")
l = l**2 / 2
xvals = np.array(xvals)
hwhm = (1.0 - np.exp( -l * xvals * xvals )) / tau
return hwhm
def functionDeriv1D(self, xvals, jacobian):
tau = self.getParameterValue("Tau")
l = self.getParameterValue("L")
l = l**2 / 2
i = 0
for x in xvals:
ex = math.exp(-l*x*x)
h = (1.0-ex)/tau
jacobian.set(i,0,-h/tau)
jacobian.set(i,1,x*x*ex/tau)
i += 1
# Required to have Mantid recognise the new function
FunctionFactory.subscribe(HallRoss)
| gpl-3.0 | -1,202,712,966,458,093,300 | 30.924242 | 124 | 0.682487 | false | 3.535235 | false | false | false | 0.00617 |
ntddk/pemu | roms/seabios/scripts/checkrom.py | 17 | 3347 | #!/usr/bin/env python
# Script to check a bios image and report info on it.
#
# Copyright (C) 2008 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, struct
import layoutrom, buildrom
from python23compat import as_bytes
def subst(data, offset, new):
return data[:offset] + new + data[offset + len(new):]
def checksum(data, start, size, csum):
sumbyte = buildrom.checksum(data[start:start+size])
return subst(data, start+csum, sumbyte)
def main():
# Get args
objinfo, finalsize, rawfile, outfile = sys.argv[1:]
# Read in symbols
objinfofile = open(objinfo, 'r')
symbols = layoutrom.parseObjDump(objinfofile, 'in')[1]
# Read in raw file
f = open(rawfile, 'rb')
rawdata = f.read()
f.close()
datasize = len(rawdata)
finalsize = int(finalsize) * 1024
if finalsize == 0:
finalsize = 64*1024
if datasize > 64*1024:
finalsize = 128*1024
if datasize > 128*1024:
finalsize = 256*1024
if datasize > finalsize:
print("Error! ROM doesn't fit (%d > %d)" % (datasize, finalsize))
print(" You have to either increate the size (CONFIG_ROM_SIZE)")
print(" or turn off some features (such as hardware support not")
print(" needed) to make it fit. Trying a more recent gcc version")
print(" might work too.")
sys.exit(1)
# Sanity checks
start = symbols['code32flat_start'].offset
end = symbols['code32flat_end'].offset
expend = layoutrom.BUILD_BIOS_ADDR + layoutrom.BUILD_BIOS_SIZE
if end != expend:
print("Error! Code does not end at 0x%x (got 0x%x)" % (
expend, end))
sys.exit(1)
if datasize > finalsize:
print("Error! Code is too big (0x%x vs 0x%x)" % (
datasize, finalsize))
sys.exit(1)
expdatasize = end - start
if datasize != expdatasize:
print("Error! Unknown extra data (0x%x vs 0x%x)" % (
datasize, expdatasize))
sys.exit(1)
# Fix up CSM Compatibility16 table
if 'csm_compat_table' in symbols and 'entry_csm' in symbols:
# Field offsets within EFI_COMPATIBILITY16_TABLE
ENTRY_FIELD_OFS = 14 # Compatibility16CallOffset (UINT16)
SIZE_FIELD_OFS = 5 # TableLength (UINT8)
CSUM_FIELD_OFS = 4 # TableChecksum (UINT8)
tableofs = symbols['csm_compat_table'].offset - symbols['code32flat_start'].offset
entry_addr = symbols['entry_csm'].offset - layoutrom.BUILD_BIOS_ADDR
entry_addr = struct.pack('<H', entry_addr)
rawdata = subst(rawdata, tableofs+ENTRY_FIELD_OFS, entry_addr)
tsfield = tableofs+SIZE_FIELD_OFS
tablesize = ord(rawdata[tsfield:tsfield+1])
rawdata = checksum(rawdata, tableofs, tablesize, CSUM_FIELD_OFS)
# Print statistics
runtimesize = end - symbols['code32init_end'].offset
print("Total size: %d Fixed: %d Free: %d (used %.1f%% of %dKiB rom)" % (
datasize, runtimesize, finalsize - datasize
, (datasize / float(finalsize)) * 100.0
, int(finalsize / 1024)))
# Write final file
f = open(outfile, 'wb')
f.write((as_bytes("\0") * (finalsize - datasize)) + rawdata)
f.close()
if __name__ == '__main__':
main()
| gpl-2.0 | 3,845,735,010,519,052,000 | 34.231579 | 90 | 0.617867 | false | 3.357071 | false | false | false | 0.002988 |
wangxiangyu/horizon | openstack_dashboard/dashboards/project/images/images/forms.py | 14 | 13351 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
from django.conf import settings
from django.forms import ValidationError # noqa
from django.forms.widgets import HiddenInput # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard import policy
IMAGE_BACKEND_SETTINGS = getattr(settings, 'OPENSTACK_IMAGE_BACKEND', {})
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS.get('image_formats', [])
class CreateImageForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
description = forms.CharField(max_length=255, label=_("Description"),
required=False)
source_type = forms.ChoiceField(
label=_('Image Source'),
required=False,
choices=[('url', _('Image Location')),
('file', _('Image File'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
image_url = forms.CharField(max_length=255,
label=_("Image Location"),
help_text=_("An external (HTTP) URL to load "
"the image from."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-url': _('Image Location'),
'ng-model': 'copyFrom',
'ng-change':
'selectImageFormat(copyFrom)'}),
required=False)
image_file = forms.FileField(label=_("Image File"),
help_text=_("A local image to upload."),
widget=forms.FileInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-file': _('Image File'),
'ng-model': 'imageFile',
'ng-change':
'selectImageFormat(imageFile.name)',
'image-file-on-change': None}),
required=False)
disk_format = forms.ChoiceField(label=_('Format'),
choices=[],
widget=forms.Select(attrs={
'class': 'switchable',
'ng-model': 'diskFormat'}))
architecture = forms.CharField(max_length=255, label=_("Architecture"),
required=False)
minimum_disk = forms.IntegerField(
label=_("Minimum Disk (GB)"),
min_value=0,
help_text=_('The minimum disk size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(
label=_("Minimum RAM (MB)"),
min_value=0,
help_text=_('The minimum memory size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
is_copying = forms.BooleanField(
label=_("Copy Data"), initial=True, required=False,
help_text=_('Specify this option to copy image data to the image '
'service. If unspecified, image data will be used in its '
'current location.'),
widget=forms.CheckboxInput(attrs={
'class': 'switched',
'data-source-url': _('Image Location'),
'data-switch-on': 'source'}))
is_public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(CreateImageForm, self).__init__(request, *args, **kwargs)
if (not settings.HORIZON_IMAGES_ALLOW_UPLOAD or
not policy.check((("image", "upload_image"),), request)):
self._hide_file_source_type()
if not policy.check((("image", "set_image_location"),), request):
self._hide_url_source_type()
if not policy.check((("image", "publicize_image"),), request):
self._hide_is_public()
self.fields['disk_format'].choices = IMAGE_FORMAT_CHOICES
def _hide_file_source_type(self):
self.fields['image_file'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'file']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_url_source_type(self):
self.fields['image_url'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'url']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_is_public(self):
self.fields['is_public'].widget = HiddenInput()
self.fields['is_public'].initial = False
def clean(self):
data = super(CreateImageForm, self).clean()
# The image_file key can be missing based on particular upload
# conditions. Code defensively for it here...
image_file = data.get('image_file', None)
image_url = data.get('image_url', None)
if not image_url and not image_file:
raise ValidationError(
_("A image or external image location must be specified."))
elif image_url and image_file:
raise ValidationError(
_("Can not specify both image and external image location."))
else:
return data
def handle(self, request, data):
# Glance does not really do anything with container_format at the
# moment. It requires it is set to the same disk_format for the three
# Amazon image types, otherwise it just treats them as 'bare.' As such
# we will just set that to be that here instead of bothering the user
# with asking them for information we can already determine.
if data['disk_format'] in ('ami', 'aki', 'ari',):
container_format = data['disk_format']
else:
container_format = 'bare'
meta = {'is_public': data['is_public'],
'protected': data['protected'],
'disk_format': data['disk_format'],
'container_format': container_format,
'min_disk': (data['minimum_disk'] or 0),
'min_ram': (data['minimum_ram'] or 0),
'name': data['name'],
'properties': {}}
if data['description']:
meta['properties']['description'] = data['description']
if data['architecture']:
meta['properties']['architecture'] = data['architecture']
if (settings.HORIZON_IMAGES_ALLOW_UPLOAD and
policy.check((("image", "upload_image"),), request) and
data.get('image_file', None)):
meta['data'] = self.files['image_file']
elif data['is_copying']:
meta['copy_from'] = data['image_url']
else:
meta['location'] = data['image_url']
try:
image = api.glance.image_create(request, **meta)
messages.success(request,
_('Your image %s has been queued for creation.') %
data['name'])
return image
except Exception as e:
msg = _('Unable to create new image')
# TODO(nikunj2512): Fix this once it is fixed in glance client
if hasattr(e, 'code') and e.code == 400:
if "Invalid disk format" in e.details:
msg = _('Unable to create new image: Invalid disk format '
'%s for image.') % data['disk_format']
elif "Image name too long" in e.details:
msg = _('Unable to create new image: Image name too long.')
exceptions.handle(request, msg)
return False
class UpdateImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("Name"))
description = forms.CharField(max_length=255, label=_("Description"),
required=False)
kernel = forms.CharField(
max_length=36,
label=_("Kernel ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
ramdisk = forms.CharField(
max_length=36,
label=_("Ramdisk ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
architecture = forms.CharField(
label=_("Architecture"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
disk_format = forms.ChoiceField(
label=_("Format"),
)
minimum_disk = forms.IntegerField(label=_("Minimum Disk (GB)"),
min_value=0,
help_text=_('The minimum disk size'
' required to boot the'
' image. If unspecified,'
' this value defaults to'
' 0 (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(label=_("Minimum RAM (MB)"),
min_value=0,
help_text=_('The minimum memory size'
' required to boot the'
' image. If unspecified,'
' this value defaults to'
' 0 (no minimum).'),
required=False)
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(UpdateImageForm, self).__init__(request, *args, **kwargs)
self.fields['disk_format'].choices = [(value, name) for value,
name in IMAGE_FORMAT_CHOICES
if value]
if not policy.check((("image", "publicize_image"),), request):
self.fields['public'].widget = forms.CheckboxInput(
attrs={'readonly': 'readonly'})
def handle(self, request, data):
image_id = data['image_id']
error_updating = _('Unable to update image "%s".')
if data['disk_format'] in ['aki', 'ari', 'ami']:
container_format = data['disk_format']
else:
container_format = 'bare'
meta = {'is_public': data['public'],
'protected': data['protected'],
'disk_format': data['disk_format'],
'container_format': container_format,
'name': data['name'],
'min_ram': (data['minimum_ram'] or 0),
'min_disk': (data['minimum_disk'] or 0),
'properties': {'description': data['description']}}
if data['kernel']:
meta['properties']['kernel_id'] = data['kernel']
if data['ramdisk']:
meta['properties']['ramdisk_id'] = data['ramdisk']
if data['architecture']:
meta['properties']['architecture'] = data['architecture']
# Ensure we do not delete properties that have already been
# set on an image.
meta['purge_props'] = False
try:
image = api.glance.image_update(request, image_id, **meta)
messages.success(request, _('Image was successfully updated.'))
return image
except Exception:
exceptions.handle(request, error_updating % image_id)
| apache-2.0 | 6,945,701,932,557,604,000 | 44.411565 | 79 | 0.52483 | false | 4.773329 | false | false | false | 0 |
avitalyahel/autolite | autolite_system.py | 1 | 3993 | import sys
import yaml
import common
import consts
import db
import schema
from system import System
SELF_ABS_PATH, SELF_FULL_DIR, SELF_SUB_DIR = consts.get_self_path_dir(__file__)
PACKAGE_NAME = SELF_SUB_DIR
def menu(arguments):
if arguments['list']:
if arguments['--YAML'] or arguments['--JSON']:
where = dict(name=arguments['<name>']) if arguments['<name>'] else dict()
systems = System.list(**where)
common.dump(systems, toyaml=arguments['--YAML'], tojson=arguments['--JSON'],
entry=lambda item: item.__dict__)
else:
_system_list_table(arguments)
elif system_execute(arguments):
pass
else:
try:
if arguments['create']:
System.create(**_system_create_kwargs(arguments))
arguments['--long'] = True
_system_list_table(arguments)
elif arguments['read']:
system_read(arguments)
elif arguments['set']:
system_set(arguments)
elif arguments['delete']:
System(arguments['<name>']).delete()
except NameError as exc:
print(PACKAGE_NAME, 'Error!', exc)
sys.exit(1)
def system_read(arguments):
system = System(arguments['<name>'])
toyaml, tojson = arguments['--YAML'], arguments['--JSON']
if toyaml or tojson:
common.dump([system.__dict__], toyaml, tojson, squash=True)
else:
print(system)
def _system_create_kwargs(arguments):
kwargs = schema.TABLE_SCHEMAS.systems.new()
if arguments['--fields']:
with open(arguments['--fields']) as f:
kwargs.update(yaml.safe_load(f))
kwargs.update(dict((k, arguments['--' + k] or kwargs[k]) for k in kwargs.keys() if ('--' + k) in arguments))
kwargs.update(
name=arguments['<name>'],
ip=arguments['<ip>'],
)
return kwargs
def _system_list_table(arguments):
if arguments['--long']:
col_names = 'name ip user installer cleaner config monitor comment'.split(' ')
elif arguments['--fields']:
col_names = arguments['--fields'].lower().split(',')
else:
col_names = ['name']
if not arguments['--col-1']:
col_names += ['user', 'comment']
where = dict(name=arguments['<name>']) if arguments['<name>'] else dict()
systems = db.list_table('systems', **where)
rows = ([system[col] for col in col_names] for system in systems)
col_titles = [name.upper() for name in col_names]
common.print_table(col_titles, sorted(rows, key=lambda row: row[0]))
def system_set(arguments):
kwargs = dict(
(field, arguments['<exe>'])
for field in ['installer', 'monitor', 'cleaner', 'config']
if arguments[field]
)
if not kwargs:
if arguments['ip']:
kwargs.update(ip=arguments['<ip>'])
if arguments['comment']:
kwargs.update(comment=arguments['<text>'])
db.update('systems', name=arguments['<name>'], **kwargs)
arguments['--fields'] = 'name,' + ','.join(kwargs.keys())
_system_list_table(arguments)
def system_execute(arguments) -> bool:
if arguments['set']:
return False
for cmd in 'acquire | release | install | clean | monitor | config'.split(' | '):
if arguments[cmd]:
try:
system = System(arguments['<name>'])
cmd_method = getattr(system, cmd)
kwargs = dict(force=arguments['--force']) if cmd == 'release' else dict()
cmd_method(**kwargs)
except PermissionError as exc:
print(PACKAGE_NAME, 'Error!', exc)
sys.exit(1)
except Exception as exc:
if type(exc).__name__.endswith('Warning'):
print(PACKAGE_NAME, 'Warning:', exc)
else:
raise
return True
return False
| apache-2.0 | 8,568,755,214,493,942,000 | 27.119718 | 112 | 0.556975 | false | 4.066191 | false | false | false | 0.001503 |
mabotech/mabo.io | py/vision/vision25/dc2.py | 4 | 1288 |
"""
http://stackoverflow.com/questions/9860667/writing-robust-color-and-size-invariant-circle-detection-with-opencv-based-on
"""
import cv2
import math
import numpy as np
d_red = cv2.cv.RGB(150, 55, 65)
l_red = cv2.cv.RGB(250, 200, 200)
orig = cv2.imread("N16.png")
img = orig.copy()
img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detector = cv2.FeatureDetector_create('MSER')
fs = detector.detect(img2)
fs.sort(key = lambda x: -x.size)
def supress(x):
for f in fs:
#print f
distx = f.pt[0] - x.pt[0]
disty = f.pt[1] - x.pt[1]
dist = math.sqrt(distx*distx + disty*disty)
#print f.size
#print f.size , x.size
if (f.size > x.size) and (dist > f.size/2):
return True
sfs = [x for x in fs if not supress(x)]
for f in sfs:
cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(f.size/2), d_red, 2, cv2.CV_AA)
cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(f.size/2), l_red, 1, cv2.CV_AA)
h, w = orig.shape[:2]
vis = np.zeros((h, w*2+5), np.uint8)
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
vis[:h, :w] = orig
vis[:h, w+5:w*2+5] = img
cv2.imshow("image", vis)
cv2.imwrite("c_o.jpg", vis)
cv2.waitKey()
cv2.destroyAllWindows() | mit | 4,597,357,482,822,205,400 | 25.854167 | 120 | 0.574534 | false | 2.421053 | false | false | false | 0.011646 |
sprockets/sprockets.http | examples.py | 1 | 1891 | from tornado import web
from sprockets.http import app, mixins
import sprockets.http
class StatusHandler(mixins.ErrorLogger, mixins.ErrorWriter,
web.RequestHandler):
"""Example that exercises the mix-ins in this library."""
def get(self, status_code):
"""
Returns the requested status.
:param int status_code: the status code to return
:queryparam str reason: optional reason phrase
"""
status_code = int(status_code)
if status_code >= 400:
kwargs = {'status_code': status_code}
if self.get_query_argument('reason', None):
kwargs['reason'] = self.get_query_argument('reason')
if self.get_query_argument('log_message', None):
kwargs['log_message'] = self.get_query_argument('log_message')
self.send_error(**kwargs)
else:
self.set_status(status_code)
class Application(app.Application):
def __init__(self, **kwargs):
kwargs['debug'] = True
super().__init__(
[web.url(r'/status/(?P<status_code>\d+)', StatusHandler)],
**kwargs)
if __name__ == '__main__':
sprockets.http.run(
Application,
settings={'port': 8888},
log_config={
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'readable': {
'format': '%(levelname)-13s %(name)s: %(message)s',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'readable',
'stream': 'ext://sys.stdout',
}
},
'root': {
'level': 'DEBUG',
'handlers': ['console'],
}
},
)
| bsd-3-clause | 898,375,526,712,694,900 | 28.546875 | 78 | 0.492861 | false | 4.347126 | false | false | false | 0 |
ptisserand/ansible | lib/ansible/plugins/lookup/_redis_kv.py | 22 | 3039 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: redis_kv
author: Jan-Piet Mens <jpmens(at)gmail.com>
version_added: "0.9"
short_description: fetch data from Redis
deprecated:
why: This lookup uses options intermingled with terms which blurs the interface between settings and data
version: '2.9'
alternative: new 'redis' lookup
description:
- this looup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse
requirements:
- redis (python library https://github.com/andymccurdy/redis-py/)
options:
_terms:
description: Two element comma separated strings composed of url of the Redis server and key to query
options:
_url:
description: location of redis host in url format
default: 'redis://localhost:6379'
_key:
description: key to query
required: True
"""
EXAMPLES = """
- name: query redis for somekey
debug: msg="{{ lookup('redis_kv', 'redis://localhost:6379,somekey') }} is value in Redis for somekey"
"""
RETURN = """
_raw:
description: values stored in Redis
"""
import os
import re
HAVE_REDIS = False
try:
import redis
HAVE_REDIS = True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
ret = []
for term in terms:
(url, key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret
| gpl-3.0 | 3,015,296,233,512,909,300 | 31.677419 | 144 | 0.567292 | false | 4.052 | false | false | false | 0.00362 |
factorlibre/openerp-server-6.1 | openerp/report/render/rml2html/rml2html.py | 14 | 16210 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Copyright (C) 2005, Fabien Pinckaers, UCL, FSA
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import cStringIO
from lxml import etree
import copy
import utils
from openerp.report.render.rml2pdf import utils
class _flowable(object):
def __init__(self, template, doc, localcontext = None):
self._tags = {
'title': self._tag_title,
'spacer': self._tag_spacer,
'para': self._tag_para,
'section':self._section,
'nextFrame': self._tag_next_frame,
'blockTable': self._tag_table,
'pageBreak': self._tag_page_break,
'setNextTemplate': self._tag_next_template,
}
self.template = template
self.doc = doc
self.localcontext = localcontext
self._cache = {}
def _tag_page_break(self, node):
return '<br/>'*3
def _tag_next_template(self, node):
return ''
def _tag_next_frame(self, node):
result=self.template.frame_stop()
result+='<br/>'
result+=self.template.frame_start()
return result
def _tag_title(self, node):
node.tag='h1'
return etree.tostring(node)
def _tag_spacer(self, node):
length = 1+int(utils.unit_get(node.get('length')))/35
return "<br/>"*length
def _tag_table(self, node):
new_node = copy.deepcopy(node)
for child in new_node:
new_node.remove(child)
new_node.tag = 'table'
def process(node,new_node):
for child in utils._child_get(node,self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.remove(n)
process(child, new_child)
else:
new_child.text = utils._process_text(self, child.text)
new_child.tag = 'p'
try:
if new_child.get('style').find('terp_tblheader')!= -1:
new_node.tag = 'th'
except:
pass
process(node,new_node)
if new_node.get('colWidths',False):
sizes = map(lambda x: utils.unit_get(x), new_node.get('colWidths').split(','))
tr = etree.SubElement(new_node, 'tr')
for s in sizes:
etree.SubElement(tr, 'td', width=str(s))
return etree.tostring(new_node)
def _tag_para(self, node):
new_node = copy.deepcopy(node)
new_node.tag = 'p'
if new_node.attrib.get('style',False):
new_node.set('class', new_node.get('style'))
new_node.text = utils._process_text(self, node.text)
return etree.tostring(new_node)
def _section(self, node):
result = ''
for child in utils._child_get(node, self):
if child.tag in self._tags:
result += self._tags[child.tag](child)
return result
def render(self, node):
result = self.template.start()
result += self.template.frame_start()
for n in utils._child_get(node, self):
if n.tag in self._tags:
result += self._tags[n.tag](n)
else:
pass
result += self.template.frame_stop()
result += self.template.end()
return result.encode('utf-8').replace('"',"\'").replace('°','°')
class _rml_tmpl_tag(object):
def __init__(self, *args):
pass
def tag_start(self):
return ''
def tag_end(self):
return False
def tag_stop(self):
return ''
def tag_mergeable(self):
return True
class _rml_tmpl_frame(_rml_tmpl_tag):
def __init__(self, posx, width):
self.width = width
self.posx = posx
def tag_start(self):
return "<table border=\'0\' width=\'%d\'><tr><td width=\'%d\'> </td><td>" % (self.width+self.posx,self.posx)
def tag_end(self):
return True
def tag_stop(self):
return '</td></tr></table><br/>'
def tag_mergeable(self):
return False
def merge(self, frame):
pass
class _rml_tmpl_draw_string(_rml_tmpl_tag):
def __init__(self, node, style,localcontext = {}):
self.localcontext = localcontext
self.posx = utils.unit_get(node.get('x'))
self.posy = utils.unit_get(node.get('y'))
aligns = {
'drawString': 'left',
'drawRightString': 'right',
'drawCentredString': 'center'
}
align = aligns[node.tag]
self.pos = [(self.posx, self.posy, align, utils._process_text(self, node.text), style.get('td'), style.font_size_get('td'))]
def tag_start(self):
self.pos.sort()
res = "<table border='0' cellpadding='0' cellspacing='0'><tr>"
posx = 0
i = 0
for (x,y,align,txt, style, fs) in self.pos:
if align=="left":
pos2 = len(txt)*fs
res+="<td width=\'%d\'></td><td style=\'%s\' width=\'%d\'>%s</td>" % (x - posx, style, pos2, txt)
posx = x+pos2
if align=="right":
res+="<td width=\'%d\' align=\'right\' style=\'%s\'>%s</td>" % (x - posx, style, txt)
posx = x
if align=="center":
res+="<td width=\'%d\' align=\'center\' style=\'%s\'>%s</td>" % ((x - posx)*2, style, txt)
posx = 2*x-posx
i+=1
res+='</tr></table>'
return res
def merge(self, ds):
self.pos+=ds.pos
class _rml_tmpl_draw_lines(_rml_tmpl_tag):
def __init__(self, node, style, localcontext = {}):
self.localcontext = localcontext
coord = [utils.unit_get(x) for x in utils._process_text(self, node.text).split(' ')]
self.ok = False
self.posx = coord[0]
self.posy = coord[1]
self.width = coord[2]-coord[0]
self.ok = coord[1]==coord[3]
self.style = style
self.style = style.get('hr')
def tag_start(self):
if self.ok:
return "<table border=\'0\' cellpadding=\'0\' cellspacing=\'0\' width=\'%d\'><tr><td width=\'%d\'></td><td><hr width=\'100%%\' style=\'margin:0px; %s\'></td></tr></table>" % (self.posx+self.width,self.posx,self.style)
else:
return ''
class _rml_stylesheet(object):
def __init__(self, localcontext, stylesheet, doc):
self.doc = doc
self.localcontext = localcontext
self.attrs = {}
self._tags = {
'fontSize': lambda x: ('font-size',str(utils.unit_get(x)+5.0)+'px'),
'alignment': lambda x: ('text-align',str(x))
}
result = ''
for ps in stylesheet.findall('paraStyle'):
attr = {}
attrs = ps.attrib
for key, val in attrs.items():
attr[key] = val
attrs = []
for a in attr:
if a in self._tags:
attrs.append('%s:%s' % self._tags[a](attr[a]))
if len(attrs):
result += 'p.'+attr['name']+' {'+'; '.join(attrs)+'}\n'
self.result = result
def render(self):
return self.result
class _rml_draw_style(object):
def __init__(self):
self.style = {}
self._styles = {
'fill': lambda x: {'td': {'color':x.get('color')}},
'setFont': lambda x: {'td': {'font-size':x.get('size')+'px'}},
'stroke': lambda x: {'hr': {'color':x.get('color')}},
}
def update(self, node):
if node.tag in self._styles:
result = self._styles[node.tag](node)
for key in result:
if key in self.style:
self.style[key].update(result[key])
else:
self.style[key] = result[key]
def font_size_get(self,tag):
size = utils.unit_get(self.style.get('td', {}).get('font-size','16'))
return size
def get(self,tag):
if not tag in self.style:
return ""
return ';'.join(['%s:%s' % (x[0],x[1]) for x in self.style[tag].items()])
class _rml_template(object):
def __init__(self, template, localcontext=None):
self.frame_pos = -1
self.localcontext = localcontext
self.frames = []
self.template_order = []
self.page_template = {}
self.loop = 0
self._tags = {
'drawString': _rml_tmpl_draw_string,
'drawRightString': _rml_tmpl_draw_string,
'drawCentredString': _rml_tmpl_draw_string,
'lines': _rml_tmpl_draw_lines
}
self.style = _rml_draw_style()
rc = 'data:image/png;base64,'
self.data = ''
for pt in template.findall('pageTemplate'):
frames = {}
id = pt.get('id')
self.template_order.append(id)
for tmpl in pt.findall('frame'):
posy = int(utils.unit_get(tmpl.get('y1')))
posx = int(utils.unit_get(tmpl.get('x1')))
frames[(posy,posx,tmpl.get('id'))] = _rml_tmpl_frame(posx, utils.unit_get(tmpl.get('width')))
for tmpl in pt.findall('pageGraphics'):
for n in tmpl:
if n.tag == 'image':
self.data = rc + utils._process_text(self, n.text)
if n.tag in self._tags:
t = self._tags[n.tag](n, self.style,self.localcontext)
frames[(t.posy,t.posx,n.tag)] = t
else:
self.style.update(n)
keys = frames.keys()
keys.sort()
keys.reverse()
self.page_template[id] = []
for key in range(len(keys)):
if key>0 and keys[key-1][0] == keys[key][0]:
if type(self.page_template[id][-1]) == type(frames[keys[key]]):
if self.page_template[id][-1].tag_mergeable():
self.page_template[id][-1].merge(frames[keys[key]])
continue
self.page_template[id].append(frames[keys[key]])
self.template = self.template_order[0]
def _get_style(self):
return self.style
def set_next_template(self):
self.template = self.template_order[(self.template_order.index(name)+1) % self.template_order]
self.frame_pos = -1
def set_template(self, name):
self.template = name
self.frame_pos = -1
def frame_start(self):
result = ''
frames = self.page_template[self.template]
ok = True
while ok:
self.frame_pos += 1
if self.frame_pos>=len(frames):
self.frame_pos=0
self.loop=1
ok = False
continue
f = frames[self.frame_pos]
result+=f.tag_start()
ok = not f.tag_end()
if ok:
result+=f.tag_stop()
return result
def frame_stop(self):
frames = self.page_template[self.template]
f = frames[self.frame_pos]
result=f.tag_stop()
return result
def start(self):
return ''
def end(self):
result = ''
while not self.loop:
result += self.frame_start()
result += self.frame_stop()
return result
class _rml_doc(object):
def __init__(self, data, localcontext):
self.dom = etree.XML(data)
self.localcontext = localcontext
self.filename = self.dom.get('filename')
self.result = ''
def render(self, out):
self.result += '''<!DOCTYPE HTML PUBLIC "-//w3c//DTD HTML 4.0 Frameset//EN">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style type="text/css">
p {margin:0px; font-size:12px;}
td {font-size:14px;}
'''
style = self.dom.findall('stylesheet')[0]
s = _rml_stylesheet(self.localcontext, style, self.dom)
self.result += s.render()
self.result+='''
</style>
'''
list_story =[]
for story in utils._child_get(self.dom, self, 'story'):
template = _rml_template(self.dom.findall('template')[0], self.localcontext)
f = _flowable(template, self.dom, localcontext = self.localcontext)
story_text = f.render(story)
list_story.append(story_text)
del f
if template.data:
tag = '''<img src = '%s' width=80 height=72/>'''%(template.data)
else:
tag = ''
self.result +='''
<script type="text/javascript">
var indexer = 0;
var aryTest = %s ;
function nextData()
{
if(indexer < aryTest.length -1)
{
indexer += 1;
document.getElementById("tiny_data").innerHTML=aryTest[indexer];
}
}
function prevData()
{
if (indexer > 0)
{
indexer -= 1;
document.getElementById("tiny_data").innerHTML=aryTest[indexer];
}
}
</script>
</head>
<body>
%s
<div id="tiny_data">
%s
</div>
<br>
<input type="button" value="next" onclick="nextData();">
<input type="button" value="prev" onclick="prevData();">
</body></html>'''%(list_story,tag,list_story[0])
out.write( self.result)
def parseString(data,localcontext = {}, fout=None):
r = _rml_doc(data, localcontext)
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = cStringIO.StringIO()
r.render(fp)
return fp.getvalue()
def rml2html_help():
print 'Usage: rml2html input.rml >output.html'
print 'Render the standard input (RML) and output an HTML file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
rml2html_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: rml2html input.rml >output.html'
print 'Try \'rml2html --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,473,923,017,847,851,000 | 34.39083 | 229 | 0.520513 | false | 3.779203 | false | false | false | 0.007773 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtscxml/tests/manual/testCpp/genTestSxcml.py | 1 | 2635 | # Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of the QtScxml module of the Qt Toolkit.
#
# $QT_BEGIN_LICENSE:GPL-EXCEPT$
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
# $QT_END_LICENSE$
import random
"""stupid generator of a large scxml state machine"""
nStatesMax=10
depth=5
breath=10
nTransitions=8
nEvents=100
nStates = 0
tTotal = 0
depthMax = 0
f = open("out.scxml", "w")
f.write("""<scxml xmlns="http://www.w3.org/2005/07/scxml"
initial="s_1"
version="1.0">""")
knownStates=[]
sIndex=[1]
depthLevel=0
breathLevel=[nStatesMax]
while True:
sName=reduce(lambda x,y:x+"_"+y, map(str,sIndex), "s")
knownStates.append(sName)
f.write("<state id=\"%s\" >\n" % sName)
nStates += 1
if nStates < nStatesMax and depthLevel < depth and random.random() < 0.5:
# go deeper
sIndex.append(1)
breathLevel.append(random.randint(1,breath))
depthLevel += 1
if depthMax < depthLevel:
depthMax = depthLevel
continue
while True:
for iTransition in range(random.randint(1,nTransitions)):
tTotal += 1
target = random.choice(knownStates)
event = ("E%d" % random.randint(1,nEvents))
f.write("""<transition event="%s" target="%s" />\n""" % (event, target))
f.write("</state>\n")
sIndex[depthLevel] += 1
if (nStates < nStatesMax and breathLevel[depthLevel] > sIndex[depthLevel]):
break
depthLevel -= 1
if depthLevel < 0:
break
sIndex.pop()
breathLevel.pop()
if depthLevel < 0:
break
f.write("</scxml>\n")
f.close()
print "totalStates: ", nStates
print "totalTransitions: ", tTotal
print "depthMax: ", depthMax + 1
| gpl-3.0 | -6,041,010,979,763,975,000 | 32.782051 | 84 | 0.670588 | false | 3.467105 | false | false | false | 0.006831 |
bgxavier/nova | nova/tests/functional/v3/test_rescue.py | 25 | 2920 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.v3 import test_servers
class RescueJsonTest(test_servers.ServersSampleBase):
extension_name = "os-rescue"
def _rescue(self, uuid):
req_subs = {
'password': 'MySecretPass'
}
response = self._do_post('servers/%s/action' % uuid,
'server-rescue-req', req_subs)
self._verify_response('server-rescue', req_subs, response, 200)
def _unrescue(self, uuid):
response = self._do_post('servers/%s/action' % uuid,
'server-unrescue-req', {})
self.assertEqual(response.status_code, 202)
def test_server_rescue(self):
uuid = self._post_server()
self._rescue(uuid)
# Do a server get to make sure that the 'RESCUE' state is set
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['status'] = 'RESCUE'
self._verify_response('server-get-resp-rescue', subs, response, 200)
def test_server_rescue_with_image_ref_specified(self):
uuid = self._post_server()
req_subs = {
'password': 'MySecretPass',
'image_ref': '2341-Abc'
}
response = self._do_post('servers/%s/action' % uuid,
'server-rescue-req-with-image-ref', req_subs)
self._verify_response('server-rescue', req_subs, response, 200)
# Do a server get to make sure that the 'RESCUE' state is set
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['status'] = 'RESCUE'
self._verify_response('server-get-resp-rescue', subs, response, 200)
def test_server_unrescue(self):
uuid = self._post_server()
self._rescue(uuid)
self._unrescue(uuid)
# Do a server get to make sure that the 'ACTIVE' state is back
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['status'] = 'ACTIVE'
self._verify_response('server-get-resp-unrescue', subs, response, 200)
| apache-2.0 | -3,440,162,389,865,488,000 | 34.609756 | 78 | 0.592808 | false | 3.715013 | true | false | false | 0 |
dstftw/youtube-dl | youtube_dl/extractor/googledrive.py | 11 | 10616 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
lowercase_escape,
update_url_query,
)
class GoogleDriveIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
(?:docs|drive)\.google\.com/
(?:
(?:uc|open)\?.*?id=|
file/d/
)|
video\.google\.com/get_player\?.*?docid=
)
(?P<id>[a-zA-Z0-9_-]{28,})
'''
_TESTS = [{
'url': 'https://drive.google.com/file/d/0ByeS4oOUV-49Zzh4R1J6R09zazQ/edit?pli=1',
'md5': '5c602afbbf2c1db91831f5d82f678554',
'info_dict': {
'id': '0ByeS4oOUV-49Zzh4R1J6R09zazQ',
'ext': 'mp4',
'title': 'Big Buck Bunny.mp4',
'duration': 45,
}
}, {
# video can't be watched anonymously due to view count limit reached,
# but can be downloaded (see https://github.com/ytdl-org/youtube-dl/issues/14046)
'url': 'https://drive.google.com/file/d/0B-vUyvmDLdWDcEt4WjBqcmI2XzQ/view',
'md5': 'bfbd670d03a470bb1e6d4a257adec12e',
'info_dict': {
'id': '0B-vUyvmDLdWDcEt4WjBqcmI2XzQ',
'ext': 'mp4',
'title': 'Annabelle Creation (2017)- Z.V1 [TH].MP4',
}
}, {
# video id is longer than 28 characters
'url': 'https://drive.google.com/file/d/1ENcQ_jeCuj7y19s66_Ou9dRP4GKGsodiDQ/edit',
'info_dict': {
'id': '1ENcQ_jeCuj7y19s66_Ou9dRP4GKGsodiDQ',
'ext': 'mp4',
'title': 'Andreea Banica feat Smiley - Hooky Song (Official Video).mp4',
'duration': 189,
},
'only_matching': True,
}, {
'url': 'https://drive.google.com/open?id=0B2fjwgkl1A_CX083Tkowdmt6d28',
'only_matching': True,
}, {
'url': 'https://drive.google.com/uc?id=0B2fjwgkl1A_CX083Tkowdmt6d28',
'only_matching': True,
}]
_FORMATS_EXT = {
'5': 'flv',
'6': 'flv',
'13': '3gp',
'17': '3gp',
'18': 'mp4',
'22': 'mp4',
'34': 'flv',
'35': 'flv',
'36': '3gp',
'37': 'mp4',
'38': 'mp4',
'43': 'webm',
'44': 'webm',
'45': 'webm',
'46': 'webm',
'59': 'mp4',
}
_BASE_URL_CAPTIONS = 'https://drive.google.com/timedtext'
_CAPTIONS_ENTRY_TAG = {
'subtitles': 'track',
'automatic_captions': 'target',
}
_caption_formats_ext = []
_captions_xml = None
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+src="https?://(?:video\.google\.com/get_player\?.*?docid=|(?:docs|drive)\.google\.com/file/d/)(?P<id>[a-zA-Z0-9_-]{28,})',
webpage)
if mobj:
return 'https://drive.google.com/file/d/%s' % mobj.group('id')
def _download_subtitles_xml(self, video_id, subtitles_id, hl):
if self._captions_xml:
return
self._captions_xml = self._download_xml(
self._BASE_URL_CAPTIONS, video_id, query={
'id': video_id,
'vid': subtitles_id,
'hl': hl,
'v': video_id,
'type': 'list',
'tlangs': '1',
'fmts': '1',
'vssids': '1',
}, note='Downloading subtitles XML',
errnote='Unable to download subtitles XML', fatal=False)
if self._captions_xml:
for f in self._captions_xml.findall('format'):
if f.attrib.get('fmt_code') and not f.attrib.get('default'):
self._caption_formats_ext.append(f.attrib['fmt_code'])
def _get_captions_by_type(self, video_id, subtitles_id, caption_type,
origin_lang_code=None):
if not subtitles_id or not caption_type:
return
captions = {}
for caption_entry in self._captions_xml.findall(
self._CAPTIONS_ENTRY_TAG[caption_type]):
caption_lang_code = caption_entry.attrib.get('lang_code')
if not caption_lang_code:
continue
caption_format_data = []
for caption_format in self._caption_formats_ext:
query = {
'vid': subtitles_id,
'v': video_id,
'fmt': caption_format,
'lang': (caption_lang_code if origin_lang_code is None
else origin_lang_code),
'type': 'track',
'name': '',
'kind': '',
}
if origin_lang_code is not None:
query.update({'tlang': caption_lang_code})
caption_format_data.append({
'url': update_url_query(self._BASE_URL_CAPTIONS, query),
'ext': caption_format,
})
captions[caption_lang_code] = caption_format_data
return captions
def _get_subtitles(self, video_id, subtitles_id, hl):
if not subtitles_id or not hl:
return
self._download_subtitles_xml(video_id, subtitles_id, hl)
if not self._captions_xml:
return
return self._get_captions_by_type(video_id, subtitles_id, 'subtitles')
def _get_automatic_captions(self, video_id, subtitles_id, hl):
if not subtitles_id or not hl:
return
self._download_subtitles_xml(video_id, subtitles_id, hl)
if not self._captions_xml:
return
track = self._captions_xml.find('track')
if track is None:
return
origin_lang_code = track.attrib.get('lang_code')
if not origin_lang_code:
return
return self._get_captions_by_type(
video_id, subtitles_id, 'automatic_captions', origin_lang_code)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://docs.google.com/file/d/%s' % video_id, video_id)
title = self._search_regex(
r'"title"\s*,\s*"([^"]+)', webpage, 'title',
default=None) or self._og_search_title(webpage)
duration = int_or_none(self._search_regex(
r'"length_seconds"\s*,\s*"([^"]+)', webpage, 'length seconds',
default=None))
formats = []
fmt_stream_map = self._search_regex(
r'"fmt_stream_map"\s*,\s*"([^"]+)', webpage,
'fmt stream map', default='').split(',')
fmt_list = self._search_regex(
r'"fmt_list"\s*,\s*"([^"]+)', webpage,
'fmt_list', default='').split(',')
if fmt_stream_map and fmt_list:
resolutions = {}
for fmt in fmt_list:
mobj = re.search(
r'^(?P<format_id>\d+)/(?P<width>\d+)[xX](?P<height>\d+)', fmt)
if mobj:
resolutions[mobj.group('format_id')] = (
int(mobj.group('width')), int(mobj.group('height')))
for fmt_stream in fmt_stream_map:
fmt_stream_split = fmt_stream.split('|')
if len(fmt_stream_split) < 2:
continue
format_id, format_url = fmt_stream_split[:2]
f = {
'url': lowercase_escape(format_url),
'format_id': format_id,
'ext': self._FORMATS_EXT[format_id],
}
resolution = resolutions.get(format_id)
if resolution:
f.update({
'width': resolution[0],
'height': resolution[1],
})
formats.append(f)
source_url = update_url_query(
'https://drive.google.com/uc', {
'id': video_id,
'export': 'download',
})
urlh = self._request_webpage(
source_url, video_id, note='Requesting source file',
errnote='Unable to request source file', fatal=False)
if urlh:
def add_source_format(src_url):
formats.append({
'url': src_url,
'ext': determine_ext(title, 'mp4').lower(),
'format_id': 'source',
'quality': 1,
})
if urlh.headers.get('Content-Disposition'):
add_source_format(source_url)
else:
confirmation_webpage = self._webpage_read_content(
urlh, url, video_id, note='Downloading confirmation page',
errnote='Unable to confirm download', fatal=False)
if confirmation_webpage:
confirm = self._search_regex(
r'confirm=([^&"\']+)', confirmation_webpage,
'confirmation code', fatal=False)
if confirm:
add_source_format(update_url_query(source_url, {
'confirm': confirm,
}))
if not formats:
reason = self._search_regex(
r'"reason"\s*,\s*"([^"]+)', webpage, 'reason', default=None)
if reason:
raise ExtractorError(reason, expected=True)
self._sort_formats(formats)
hl = self._search_regex(
r'"hl"\s*,\s*"([^"]+)', webpage, 'hl', default=None)
subtitles_id = None
ttsurl = self._search_regex(
r'"ttsurl"\s*,\s*"([^"]+)', webpage, 'ttsurl', default=None)
if ttsurl:
# the video Id for subtitles will be the last value in the ttsurl
# query string
subtitles_id = ttsurl.encode('utf-8').decode(
'unicode_escape').split('=')[-1]
return {
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
'subtitles': self.extract_subtitles(video_id, subtitles_id, hl),
'automatic_captions': self.extract_automatic_captions(
video_id, subtitles_id, hl),
}
| unlicense | -6,601,634,043,978,700,000 | 37.32491 | 148 | 0.477958 | false | 3.798211 | false | false | false | 0.000659 |
aaltinisik/OCBAltinkaya | openerp/tools/yaml_import.py | 75 | 43470 | # -*- coding: utf-8 -*-
import threading
import types
import time # used to eval time.strftime expressions
from datetime import datetime, timedelta
import logging
import openerp
import openerp.sql_db as sql_db
import openerp.workflow
import misc
from config import config
import yaml_tag
import yaml
import re
from lxml import etree
from openerp import SUPERUSER_ID
# YAML import needs both safe and unsafe eval, but let's
# default to /safe/.
unsafe_eval = eval
from safe_eval import safe_eval as eval
import assertion_report
_logger = logging.getLogger(__name__)
class YamlImportException(Exception):
pass
class YamlImportAbortion(Exception):
pass
def _is_yaml_mapping(node, tag_constructor):
value = isinstance(node, types.DictionaryType) \
and len(node.keys()) == 1 \
and isinstance(node.keys()[0], tag_constructor)
return value
def is_comment(node):
return isinstance(node, types.StringTypes)
def is_assert(node):
return isinstance(node, yaml_tag.Assert) \
or _is_yaml_mapping(node, yaml_tag.Assert)
def is_record(node):
return _is_yaml_mapping(node, yaml_tag.Record)
def is_python(node):
return _is_yaml_mapping(node, yaml_tag.Python)
def is_menuitem(node):
return isinstance(node, yaml_tag.Menuitem) \
or _is_yaml_mapping(node, yaml_tag.Menuitem)
def is_function(node):
return isinstance(node, yaml_tag.Function) \
or _is_yaml_mapping(node, yaml_tag.Function)
def is_report(node):
return isinstance(node, yaml_tag.Report)
def is_workflow(node):
return isinstance(node, yaml_tag.Workflow)
def is_act_window(node):
return isinstance(node, yaml_tag.ActWindow)
def is_delete(node):
return isinstance(node, yaml_tag.Delete)
def is_context(node):
return isinstance(node, yaml_tag.Context)
def is_url(node):
return isinstance(node, yaml_tag.Url)
def is_eval(node):
return isinstance(node, yaml_tag.Eval)
def is_ref(node):
return isinstance(node, yaml_tag.Ref) \
or _is_yaml_mapping(node, yaml_tag.Ref)
def is_ir_set(node):
return _is_yaml_mapping(node, yaml_tag.IrSet)
def is_string(node):
return isinstance(node, basestring)
class RecordDictWrapper(dict):
"""
Used to pass a record as locals in eval:
records do not strictly behave like dict, so we force them to.
"""
def __init__(self, record):
self.record = record
def __getitem__(self, key):
if key in self.record:
return self.record[key]
return dict.__getitem__(self, key)
class YamlInterpreter(object):
def __init__(self, cr, module, id_map, mode, filename, report=None, noupdate=False, loglevel=logging.DEBUG):
self.cr = cr
self.module = module
self.id_map = id_map
self.mode = mode
self.filename = filename
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self.loglevel = loglevel
self.pool = openerp.registry(cr.dbname)
self.uid = 1
self.context = {} # opererp context
self.eval_context = {'ref': self._ref(),
'_ref': self._ref(), # added '_ref' so that record['ref'] is possible
'time': time,
'datetime': datetime,
'timedelta': timedelta}
self.env = openerp.api.Environment(self.cr, self.uid, self.context)
def _log(self, *args, **kwargs):
_logger.log(self.loglevel, *args, **kwargs)
def _ref(self):
return lambda xml_id: self.get_id(xml_id)
def get_model(self, model_name):
return self.pool[model_name]
def validate_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, "The ID reference '%s' must contain at most one dot.\n" \
"It is used to refer to other modules ID, in the form: module.record_id" \
% (xml_id,)
if module != self.module:
module_count = self.pool['ir.module.module'].search_count(self.cr, self.uid, \
['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert module_count == 1, 'The ID "%s" refers to an uninstalled module.' % (xml_id,)
if len(id) > 64: # TODO where does 64 come from (DB is 128)? should be a constant or loaded form DB
_logger.error('id: %s is to long (max: 64)', id)
def get_id(self, xml_id):
if xml_id is False or xml_id is None:
return False
#if not xml_id:
# raise YamlImportException("The xml_id should be a non empty string.")
elif isinstance(xml_id, types.IntType):
id = xml_id
elif xml_id in self.id_map:
id = self.id_map[xml_id]
else:
if '.' in xml_id:
module, checked_xml_id = xml_id.split('.', 1)
else:
module = self.module
checked_xml_id = xml_id
try:
_, id = self.pool['ir.model.data'].get_object_reference(self.cr, self.uid, module, checked_xml_id)
self.id_map[xml_id] = id
except ValueError:
raise ValueError("""%r not found when processing %s.
This Yaml file appears to depend on missing data. This often happens for
tests that belong to a module's test suite and depend on each other.""" % (xml_id, self.filename))
return id
def get_record(self, xml_id):
if '.' not in xml_id:
xml_id = "%s.%s" % (self.module, xml_id)
return self.env.ref(xml_id)
def get_context(self, node, eval_dict):
context = self.context.copy()
if node.context:
context.update(eval(node.context, eval_dict))
return context
def isnoupdate(self, node):
return self.noupdate or node.noupdate or False
def _get_first_result(self, results, default=False):
if len(results):
value = results[0]
if isinstance(value, types.TupleType):
value = value[0]
else:
value = default
return value
def process_comment(self, node):
return node
def _log_assert_failure(self, msg, *args):
self.assertion_report.record_failure()
_logger.error(msg, *args)
def _get_assertion_id(self, assertion):
if assertion.id:
ids = [self.get_id(assertion.id)]
elif assertion.search:
q = eval(assertion.search, self.eval_context)
ids = self.pool[assertion.model].search(self.cr, self.uid, q, context=assertion.context)
else:
raise YamlImportException('Nothing to assert: you must give either an id or a search criteria.')
return ids
def process_assert(self, node):
if isinstance(node, dict):
assertion, expressions = node.items()[0]
else:
assertion, expressions = node, []
if self.isnoupdate(assertion) and self.mode != 'init':
_logger.warning('This assertion was not evaluated ("%s").', assertion.string)
return
model = self.get_model(assertion.model)
ids = self._get_assertion_id(assertion)
if assertion.count is not None and len(ids) != assertion.count:
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n'
args = (assertion.string, assertion.count, len(ids))
self._log_assert_failure(msg, *args)
else:
context = self.get_context(assertion, self.eval_context)
for id in ids:
record = model.browse(self.cr, self.uid, id, context)
for test in expressions:
try:
success = unsafe_eval(test, self.eval_context, RecordDictWrapper(record))
except Exception, e:
_logger.debug('Exception during evaluation of !assert block in yaml_file %s.', self.filename, exc_info=True)
raise YamlImportAbortion(e)
if not success:
msg = 'Assertion "%s" FAILED\ntest: %s\n'
args = (assertion.string, test)
for aop in ('==', '!=', '<>', 'in', 'not in', '>=', '<=', '>', '<'):
if aop in test:
left, right = test.split(aop,1)
lmsg = ''
rmsg = ''
try:
lmsg = unsafe_eval(left, self.eval_context, RecordDictWrapper(record))
except Exception, e:
lmsg = '<exc>'
try:
rmsg = unsafe_eval(right, self.eval_context, RecordDictWrapper(record))
except Exception, e:
rmsg = '<exc>'
msg += 'values: ! %s %s %s'
args += ( lmsg, aop, rmsg )
break
self._log_assert_failure(msg, *args)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _coerce_bool(self, value, default=False):
if isinstance(value, types.BooleanType):
b = value
if isinstance(value, types.StringTypes):
b = value.strip().lower() not in ('0', 'false', 'off', 'no')
elif isinstance(value, types.IntType):
b = bool(value)
else:
b = default
return b
def create_osv_memory_record(self, record, fields):
model = self.get_model(record.model)
context = self.get_context(record, self.eval_context)
record_dict = self._create_record(model, fields)
id_new = model.create(self.cr, self.uid, record_dict, context=context)
self.id_map[record.id] = int(id_new)
return record_dict
def process_record(self, node):
record, fields = node.items()[0]
model = self.get_model(record.model)
view_id = record.view
if view_id and (view_id is not True) and isinstance(view_id, basestring):
module = self.module
if '.' in view_id:
module, view_id = view_id.split('.',1)
view_id = self.pool['ir.model.data'].get_object_reference(self.cr, SUPERUSER_ID, module, view_id)[1]
if model.is_transient():
record_dict=self.create_osv_memory_record(record, fields)
else:
self.validate_xml_id(record.id)
try:
self.pool['ir.model.data']._get_id(self.cr, SUPERUSER_ID, self.module, record.id)
default = False
except ValueError:
default = True
if self.isnoupdate(record) and self.mode != 'init':
id = self.pool['ir.model.data']._update_dummy(self.cr, SUPERUSER_ID, record.model, self.module, record.id)
# check if the resource already existed at the last update
if id:
self.id_map[record] = int(id)
return None
else:
if not self._coerce_bool(record.forcecreate):
return None
#context = self.get_context(record, self.eval_context)
# FIXME: record.context like {'withoutemployee':True} should pass from self.eval_context. example: test_project.yml in project module
# TODO: cleaner way to avoid resetting password in auth_signup (makes user creation costly)
context = dict(record.context or {}, no_reset_password=True)
view_info = False
if view_id:
varg = view_id
if view_id is True: varg = False
view_info = model.fields_view_get(self.cr, SUPERUSER_ID, varg, 'form', context)
record_dict = self._create_record(model, fields, view_info, default=default)
id = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, record.model, \
self.module, record_dict, record.id, noupdate=self.isnoupdate(record), mode=self.mode, context=context)
self.id_map[record.id] = int(id)
if config.get('import_partial'):
self.cr.commit()
def _create_record(self, model, fields, view_info=None, parent={}, default=True):
"""This function processes the !record tag in yaml files. It simulates the record creation through an xml
view (either specified on the !record tag or the default one for this object), including the calls to
on_change() functions, and sending only values for fields that aren't set as readonly.
:param model: model instance
:param fields: dictonary mapping the field names and their values
:param view_info: result of fields_view_get() called on the object
:param parent: dictionary containing the values already computed for the parent, in case of one2many fields
:param default: if True, the default values must be processed too or not
:return: dictionary mapping the field names and their values, ready to use when calling the create() function
:rtype: dict
"""
def _get_right_one2many_view(fg, field_name, view_type):
one2many_view = fg[field_name]['views'].get(view_type)
# if the view is not defined inline, we call fields_view_get()
if not one2many_view:
one2many_view = self.pool[fg[field_name]['relation']].fields_view_get(self.cr, SUPERUSER_ID, False, view_type, self.context)
return one2many_view
def process_val(key, val):
if fg[key]['type'] == 'many2one':
if type(val) in (tuple,list):
val = val[0]
elif fg[key]['type'] == 'one2many':
if val and isinstance(val, (list,tuple)) and isinstance(val[0], dict):
# we want to return only the fields that aren't readonly
# For that, we need to first get the right tree view to consider for the field `key´
one2many_tree_view = _get_right_one2many_view(fg, key, 'tree')
arch = etree.fromstring(one2many_tree_view['arch'].encode('utf-8'))
for rec in val:
# make a copy for the iteration, as we will alter `rec´
rec_copy = rec.copy()
for field_key in rec_copy:
# if field is missing in view or has a readonly modifier, drop it
field_elem = arch.xpath("//field[@name='%s']" % field_key)
if field_elem and (field_elem[0].get('modifiers', '{}').find('"readonly": true') >= 0):
# TODO: currently we only support if readonly is True in the modifiers. Some improvement may be done in
# order to support also modifiers that look like {"readonly": [["state", "not in", ["draft", "confirm"]]]}
del rec[field_key]
# now that unwanted values have been removed from val, we can encapsulate it in a tuple as returned value
val = map(lambda x: (0,0,x), val)
elif fg[key]['type'] == 'many2many':
if val and isinstance(val,(list,tuple)) and isinstance(val[0], (int,long)):
val = [(6,0,val)]
# we want to return only the fields that aren't readonly
if el.get('modifiers', '{}').find('"readonly": true') >= 0:
# TODO: currently we only support if readonly is True in the modifiers. Some improvement may be done in
# order to support also modifiers that look like {"readonly": [["state", "not in", ["draft", "confirm"]]]}
return False
return val
if view_info:
arch = etree.fromstring(view_info['arch'].decode('utf-8'))
view = arch if len(arch) else False
else:
view = False
fields = fields or {}
if view is not False:
fg = view_info['fields']
onchange_spec = model._onchange_spec(self.cr, SUPERUSER_ID, view_info, context=self.context)
# gather the default values on the object. (Can't use `fields´ as parameter instead of {} because we may
# have references like `base.main_company´ in the yaml file and it's not compatible with the function)
defaults = default and model._add_missing_default_values(self.cr, self.uid, {}, context=self.context) or {}
# copy the default values in record_dict, only if they are in the view (because that's what the client does)
# the other default values will be added later on by the create().
record_dict = dict([(key, val) for key, val in defaults.items() if key in fg])
# Process all on_change calls
nodes = [view]
while nodes:
el = nodes.pop(0)
if el.tag=='field':
field_name = el.attrib['name']
assert field_name in fg, "The field '%s' is defined in the form view but not on the object '%s'!" % (field_name, model._name)
if field_name in fields:
one2many_form_view = None
if (view is not False) and (fg[field_name]['type']=='one2many'):
# for one2many fields, we want to eval them using the inline form view defined on the parent
one2many_form_view = _get_right_one2many_view(fg, field_name, 'form')
field_value = self._eval_field(model, field_name, fields[field_name], one2many_form_view or view_info, parent=record_dict, default=default)
#call process_val to not update record_dict if values were given for readonly fields
val = process_val(field_name, field_value)
if val:
record_dict[field_name] = val
#if (field_name in defaults) and defaults[field_name] == field_value:
# print '*** You can remove these lines:', field_name, field_value
#if field_name has a default value or a value is given in the yaml file, we must call its on_change()
elif field_name not in defaults:
continue
if not el.attrib.get('on_change', False):
continue
if el.attrib['on_change'] in ('1', 'true'):
# New-style on_change
recs = model.browse(self.cr, SUPERUSER_ID, [], self.context)
result = recs.onchange(record_dict, field_name, onchange_spec)
else:
match = re.match("([a-z_1-9A-Z]+)\((.*)\)", el.attrib['on_change'], re.DOTALL)
assert match, "Unable to parse the on_change '%s'!" % (el.attrib['on_change'], )
# creating the context
class parent2(object):
def __init__(self, d):
self.d = d
def __getattr__(self, name):
return self.d.get(name, False)
ctx = record_dict.copy()
ctx['context'] = self.context
ctx['uid'] = SUPERUSER_ID
ctx['parent'] = parent2(parent)
for a in fg:
if a not in ctx:
ctx[a] = process_val(a, defaults.get(a, False))
# Evaluation args
args = map(lambda x: eval(x, ctx), match.group(2).split(','))
result = getattr(model, match.group(1))(self.cr, self.uid, [], *args)
for key, val in (result or {}).get('value', {}).items():
if key in fg:
if key not in fields:
# do not shadow values explicitly set in yaml.
record_dict[key] = process_val(key, val)
else:
_logger.debug("The returning field '%s' from your on_change call '%s'"
" does not exist either on the object '%s', either in"
" the view '%s'",
key, match.group(1), model._name, view_info['name'])
else:
nodes = list(el) + nodes
else:
record_dict = {}
for field_name, expression in fields.items():
if field_name in record_dict:
continue
field_value = self._eval_field(model, field_name, expression, default=False)
record_dict[field_name] = field_value
return record_dict
def process_ref(self, node, field=None):
assert node.search or node.id, '!ref node should have a `search` attribute or `id` attribute'
if node.search:
if node.model:
model_name = node.model
elif field:
model_name = field.comodel_name
else:
raise YamlImportException('You need to give a model for the search, or a field to infer it.')
model = self.get_model(model_name)
q = eval(node.search, self.eval_context)
ids = model.search(self.cr, self.uid, q)
if node.use:
instances = model.browse(self.cr, self.uid, ids)
value = [inst[node.use] for inst in instances]
else:
value = ids
elif node.id:
if field and field.type == 'reference':
record = self.get_record(node.id)
value = "%s,%s" % (record._name, record.id)
else:
value = self.get_id(node.id)
else:
value = None
return value
def process_eval(self, node):
return eval(node.expression, self.eval_context)
def _eval_field(self, model, field_name, expression, view_info=False, parent={}, default=True):
# TODO this should be refactored as something like model.get_field() in bin/osv
if field_name not in model._fields:
raise KeyError("Object '%s' does not contain field '%s'" % (model, field_name))
field = model._fields[field_name]
if is_ref(expression):
elements = self.process_ref(expression, field)
if field.type in ("many2many", "one2many"):
value = [(6, 0, elements)]
else: # many2one or reference
if isinstance(elements, (list,tuple)):
value = self._get_first_result(elements)
else:
value = elements
elif field.type == "many2one":
value = self.get_id(expression)
elif field.type == "one2many":
other_model = self.get_model(field.comodel_name)
value = [(0, 0, self._create_record(other_model, fields, view_info, parent, default=default)) for fields in expression]
elif field.type == "many2many":
ids = [self.get_id(xml_id) for xml_id in expression]
value = [(6, 0, ids)]
elif field.type == "date" and is_string(expression):
# enforce ISO format for string date values, to be locale-agnostic during tests
time.strptime(expression, misc.DEFAULT_SERVER_DATE_FORMAT)
value = expression
elif field.type == "datetime" and is_string(expression):
# enforce ISO format for string datetime values, to be locale-agnostic during tests
time.strptime(expression, misc.DEFAULT_SERVER_DATETIME_FORMAT)
value = expression
elif field.type == "reference":
record = self.get_record(expression)
value = "%s,%s" % (record._name, record.id)
else: # scalar field
if is_eval(expression):
value = self.process_eval(expression)
else:
value = expression
# raise YamlImportException('Unsupported field "%s" or value %s:%s' % (field_name, type(expression), expression))
return value
def process_context(self, node):
self.context = node.__dict__
if node.uid:
self.uid = self.get_id(node.uid)
if node.noupdate:
self.noupdate = node.noupdate
self.env = openerp.api.Environment(self.cr, self.uid, self.context)
def process_python(self, node):
python, statements = node.items()[0]
assert python.model or python.id, "!python node must have attribute `model` or `id`"
if python.id is None:
record = self.pool[python.model]
elif isinstance(python.id, basestring):
record = self.get_record(python.id)
else:
record = self.env[python.model].browse(python.id)
if python.model:
assert record._name == python.model, "`id` is not consistent with `model`"
statements = "\n" * python.first_line + statements.replace("\r\n", "\n")
code_context = {
'self': record,
'model': record._model,
'cr': self.cr,
'uid': self.uid,
'log': self._log,
'context': self.context,
'openerp': openerp,
}
try:
code_obj = compile(statements, self.filename, 'exec')
unsafe_eval(code_obj, {'ref': self.get_id}, code_context)
except AssertionError, e:
self._log_assert_failure('AssertionError in Python code %s (line %d): %s',
python.name, python.first_line, e)
return
except Exception, e:
_logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True)
raise
else:
self.assertion_report.record_success()
def process_workflow(self, node):
workflow, values = node.items()[0]
if self.isnoupdate(workflow) and self.mode != 'init':
return
if workflow.ref:
id = self.get_id(workflow.ref)
else:
if not values:
raise YamlImportException('You must define a child node if you do not give a ref.')
if not len(values) == 1:
raise YamlImportException('Only one child node is accepted (%d given).' % len(values))
value = values[0]
if not 'model' in value and (not 'eval' in value or not 'search' in value):
raise YamlImportException('You must provide a "model" and an "eval" or "search" to evaluate.')
value_model = self.get_model(value['model'])
local_context = {'obj': lambda x: value_model.browse(self.cr, self.uid, x, context=self.context)}
local_context.update(self.id_map)
id = eval(value['eval'], self.eval_context, local_context)
if workflow.uid is not None:
uid = workflow.uid
else:
uid = self.uid
self.cr.execute('select distinct signal, sequence, id from wkf_transition ORDER BY sequence,id')
signals=[x['signal'] for x in self.cr.dictfetchall()]
if workflow.action not in signals:
raise YamlImportException('Incorrect action %s. No such action defined' % workflow.action)
openerp.workflow.trg_validate(uid, workflow.model, id, workflow.action, self.cr)
def _eval_params(self, model, params):
args = []
for i, param in enumerate(params):
if isinstance(param, types.ListType):
value = self._eval_params(model, param)
elif is_ref(param):
value = self.process_ref(param)
elif is_eval(param):
value = self.process_eval(param)
elif isinstance(param, types.DictionaryType): # supports XML syntax
param_model = self.get_model(param.get('model', model))
if 'search' in param:
q = eval(param['search'], self.eval_context)
ids = param_model.search(self.cr, self.uid, q)
value = self._get_first_result(ids)
elif 'eval' in param:
local_context = {'obj': lambda x: param_model.browse(self.cr, self.uid, x, self.context)}
local_context.update(self.id_map)
value = eval(param['eval'], self.eval_context, local_context)
else:
raise YamlImportException('You must provide either a !ref or at least a "eval" or a "search" to function parameter #%d.' % i)
else:
value = param # scalar value
args.append(value)
return args
def process_function(self, node):
function, params = node.items()[0]
if self.isnoupdate(function) and self.mode != 'init':
return
model = self.get_model(function.model)
if function.eval:
args = self.process_eval(function.eval)
else:
args = self._eval_params(function.model, params)
method = function.name
getattr(model, method)(self.cr, self.uid, *args)
def _set_group_values(self, node, values):
if node.groups:
group_names = node.groups.split(',')
groups_value = []
for group in group_names:
if group.startswith('-'):
group_id = self.get_id(group[1:])
groups_value.append((3, group_id))
else:
group_id = self.get_id(group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
def process_menuitem(self, node):
self.validate_xml_id(node.id)
if not node.parent:
parent_id = False
self.cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (node.name,))
res = self.cr.fetchone()
values = {'parent_id': parent_id, 'name': node.name}
else:
parent_id = self.get_id(node.parent)
values = {'parent_id': parent_id}
if node.name:
values['name'] = node.name
try:
res = [ self.get_id(node.id) ]
except: # which exception ?
res = None
if node.action:
action_type = node.type or 'act_window'
icons = {
"act_window": 'STOCK_NEW',
"report.xml": 'STOCK_PASTE',
"wizard": 'STOCK_EXECUTE',
"url": 'STOCK_JUMP_TO',
}
values['icon'] = icons.get(action_type, 'STOCK_NEW')
if action_type == 'act_window':
action_id = self.get_id(node.action)
self.cr.execute('select view_type,view_mode,name,view_id,target from ir_act_window where id=%s', (action_id,))
ir_act_window_result = self.cr.fetchone()
assert ir_act_window_result, "No window action defined for this id %s !\n" \
"Verify that this is a window action or add a type argument." % (node.action,)
action_type, action_mode, action_name, view_id, target = ir_act_window_result
if view_id:
self.cr.execute('SELECT type FROM ir_ui_view WHERE id=%s', (view_id,))
# TODO guess why action_mode is ir_act_window.view_mode above and ir_ui_view.type here
action_mode = self.cr.fetchone()
self.cr.execute('SELECT view_mode FROM ir_act_window_view WHERE act_window_id=%s ORDER BY sequence LIMIT 1', (action_id,))
if self.cr.rowcount:
action_mode = self.cr.fetchone()
if action_type == 'tree':
values['icon'] = 'STOCK_INDENT'
elif action_mode and action_mode.startswith('tree'):
values['icon'] = 'STOCK_JUSTIFY_FILL'
elif action_mode and action_mode.startswith('graph'):
values['icon'] = 'terp-graph'
elif action_mode and action_mode.startswith('calendar'):
values['icon'] = 'terp-calendar'
if target == 'new':
values['icon'] = 'STOCK_EXECUTE'
if not values.get('name', False):
values['name'] = action_name
elif action_type == 'wizard':
action_id = self.get_id(node.action)
self.cr.execute('select name from ir_act_wizard where id=%s', (action_id,))
ir_act_wizard_result = self.cr.fetchone()
if (not values.get('name', False)) and ir_act_wizard_result:
values['name'] = ir_act_wizard_result[0]
else:
raise YamlImportException("Unsupported type '%s' in menuitem tag." % action_type)
if node.sequence:
values['sequence'] = node.sequence
if node.icon:
values['icon'] = node.icon
self._set_group_values(node, values)
pid = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, \
'ir.ui.menu', self.module, values, node.id, mode=self.mode, \
noupdate=self.isnoupdate(node), res_id=res and res[0] or False)
if node.id and parent_id:
self.id_map[node.id] = int(pid)
if node.action and pid:
action_type = node.type or 'act_window'
action_id = self.get_id(node.action)
action = "ir.actions.%s,%d" % (action_type, action_id)
self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, 'action', \
'tree_but_open', 'Menuitem', [('ir.ui.menu', int(pid))], action, True, True, xml_id=node.id)
def process_act_window(self, node):
assert getattr(node, 'id'), "Attribute %s of act_window is empty !" % ('id',)
assert getattr(node, 'name'), "Attribute %s of act_window is empty !" % ('name',)
assert getattr(node, 'res_model'), "Attribute %s of act_window is empty !" % ('res_model',)
self.validate_xml_id(node.id)
view_id = False
if node.view:
view_id = self.get_id(node.view)
if not node.context:
node.context={}
context = eval(str(node.context), self.eval_context)
values = {
'name': node.name,
'type': node.type or 'ir.actions.act_window',
'view_id': view_id,
'domain': node.domain,
'context': context,
'res_model': node.res_model,
'src_model': node.src_model,
'view_type': node.view_type or 'form',
'view_mode': node.view_mode or 'tree,form',
'usage': node.usage,
'limit': node.limit,
'auto_refresh': node.auto_refresh,
'multi': getattr(node, 'multi', False),
}
self._set_group_values(node, values)
if node.target:
values['target'] = node.target
id = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, \
'ir.actions.act_window', self.module, values, node.id, mode=self.mode)
self.id_map[node.id] = int(id)
if node.src_model:
keyword = 'client_action_relate'
value = 'ir.actions.act_window,%s' % id
replace = node.replace or True
self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, 'action', keyword, \
node.id, [node.src_model], value, replace=replace, noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id)
# TODO add remove ir.model.data
def process_delete(self, node):
assert getattr(node, 'model'), "Attribute %s of delete tag is empty !" % ('model',)
if node.model in self.pool:
if node.search:
ids = self.pool[node.model].search(self.cr, self.uid, eval(node.search, self.eval_context))
else:
ids = [self.get_id(node.id)]
if len(ids):
self.pool[node.model].unlink(self.cr, self.uid, ids)
else:
self._log("Record not deleted.")
def process_url(self, node):
self.validate_xml_id(node.id)
res = {'name': node.name, 'url': node.url, 'target': node.target}
id = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, \
"ir.actions.act_url", self.module, res, node.id, mode=self.mode)
self.id_map[node.id] = int(id)
# ir_set
if (not node.menu or eval(node.menu)) and id:
keyword = node.keyword or 'client_action_multi'
value = 'ir.actions.act_url,%s' % id
replace = node.replace or True
self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, 'action', \
keyword, node.url, ["ir.actions.act_url"], value, replace=replace, \
noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id)
def process_ir_set(self, node):
if not self.mode == 'init':
return False
_, fields = node.items()[0]
res = {}
for fieldname, expression in fields.items():
if is_eval(expression):
value = eval(expression.expression, self.eval_context)
else:
value = expression
res[fieldname] = value
self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, res['key'], res['key2'], \
res['name'], res['models'], res['value'], replace=res.get('replace',True), \
isobject=res.get('isobject', False), meta=res.get('meta',None))
def process_report(self, node):
values = {}
for dest, f in (('name','string'), ('model','model'), ('report_name','name')):
values[dest] = getattr(node, f)
assert values[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')):
if getattr(node, field):
values[dest] = getattr(node, field)
if node.auto:
values['auto'] = eval(node.auto)
if node.sxw:
sxw_file = misc.file_open(node.sxw)
try:
sxw_content = sxw_file.read()
values['report_sxw_content'] = sxw_content
finally:
sxw_file.close()
if node.header:
values['header'] = eval(node.header)
values['multi'] = node.multi and eval(node.multi)
xml_id = node.id
self.validate_xml_id(xml_id)
self._set_group_values(node, values)
id = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, "ir.actions.report.xml", \
self.module, values, xml_id, noupdate=self.isnoupdate(node), mode=self.mode)
self.id_map[xml_id] = int(id)
if not node.menu or eval(node.menu):
keyword = node.keyword or 'client_print_multi'
value = 'ir.actions.report.xml,%s' % id
replace = node.replace or True
self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, 'action', \
keyword, values['name'], [values['model']], value, replace=replace, isobject=True, xml_id=xml_id)
def process_none(self):
"""
Empty node or commented node should not pass silently.
"""
self._log_assert_failure("You have an empty block in your tests.")
def process(self, yaml_string):
"""
Processes a Yaml string. Custom tags are interpreted by 'process_' instance methods.
"""
yaml_tag.add_constructors()
is_preceded_by_comment = False
for node in yaml.load(yaml_string):
is_preceded_by_comment = self._log_node(node, is_preceded_by_comment)
try:
self._process_node(node)
except Exception, e:
_logger.exception(e)
raise
def _process_node(self, node):
if is_comment(node):
self.process_comment(node)
elif is_assert(node):
self.process_assert(node)
elif is_record(node):
self.process_record(node)
elif is_python(node):
self.process_python(node)
elif is_menuitem(node):
self.process_menuitem(node)
elif is_delete(node):
self.process_delete(node)
elif is_url(node):
self.process_url(node)
elif is_context(node):
self.process_context(node)
elif is_ir_set(node):
self.process_ir_set(node)
elif is_act_window(node):
self.process_act_window(node)
elif is_report(node):
self.process_report(node)
elif is_workflow(node):
if isinstance(node, types.DictionaryType):
self.process_workflow(node)
else:
self.process_workflow({node: []})
elif is_function(node):
if isinstance(node, types.DictionaryType):
self.process_function(node)
else:
self.process_function({node: []})
elif node is None:
self.process_none()
else:
raise YamlImportException("Can not process YAML block: %s" % node)
def _log_node(self, node, is_preceded_by_comment):
if is_comment(node):
is_preceded_by_comment = True
self._log(node)
elif not is_preceded_by_comment:
if isinstance(node, types.DictionaryType):
msg = "Creating %s\n with %s"
args = node.items()[0]
self._log(msg, *args)
else:
self._log(node)
else:
is_preceded_by_comment = False
return is_preceded_by_comment
def yaml_import(cr, module, yamlfile, kind, idref=None, mode='init', noupdate=False, report=None):
if idref is None:
idref = {}
loglevel = logging.DEBUG
yaml_string = yamlfile.read()
yaml_interpreter = YamlInterpreter(cr, module, idref, mode, filename=yamlfile.name, report=report, noupdate=noupdate, loglevel=loglevel)
yaml_interpreter.process(yaml_string)
# keeps convention of convert.py
convert_yaml_import = yaml_import
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,484,451,005,321,534,000 | 43.810309 | 177 | 0.544863 | false | 4.036965 | false | false | false | 0.005821 |
madjam/mxnet | python/mxnet/symbol/register.py | 11 | 7366 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Register backend ops in mxnet.symbol namespace."""
import os as _os
import ctypes
import numpy as np
from . import _internal
from ._internal import SymbolBase, _symbol_creator
from ..attribute import AttrScope
from ..base import mx_uint, check_call, _LIB, py_str
from ..symbol_doc import _build_doc
from ..base import _Null, _init_op_module
from ..name import NameManager
# pylint: enable=unused-import
def _generate_symbol_function_code(handle, name, func_name, signature_only=False):
"""Generate function for symbol op by handle and function name."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
arg_names = [py_str(arg_names[i]) for i in range(narg)]
arg_types = [py_str(arg_types[i]) for i in range(narg)]
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(name,
py_str(desc.value),
arg_names,
arg_types,
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
dtype_name = None
arr_name = None
ndsignature = []
signature = []
ndarg_names = []
kwarg_names = []
for i in range(narg):
name, atype = arg_names[i], arg_types[i]
if name == 'dtype':
dtype_name = name
signature.append('%s=_Null'%name)
elif atype.startswith('NDArray') or atype.startswith('Symbol'):
assert not arr_name, \
"Op can only have one argument with variable " \
"size and it must be the last argument."
if atype.endswith('[]'):
ndsignature.append('*%s'%name)
arr_name = name
else:
ndsignature.append('%s=None'%name)
ndarg_names.append(name)
else:
signature.append('%s=_Null'%name)
kwarg_names.append(name)
#signature.append('is_train=False')
signature.append('name=None')
signature.append('attr=None')
signature.append('out=None')
signature.append('**kwargs')
signature = ndsignature + signature
code = []
if arr_name:
code.append("""
def %s(*%s, **kwargs):"""%(func_name, arr_name))
if not signature_only:
code.append("""
sym_args = []
for i in {}:
assert isinstance(i, SymbolBase), \\
"Positional arguments must be Symbol instances, " \\
"but got %s"%str(i)
sym_args.append(i)""".format(arr_name))
if dtype_name is not None:
code.append("""
if '%s' in kwargs:
kwargs['%s'] = np.dtype(kwargs['%s']).name"""%(
dtype_name, dtype_name, dtype_name))
code.append("""
attr = kwargs.pop('attr', None)
kwargs.update(AttrScope.current.get(attr))
name = kwargs.pop('name', None)
name = NameManager.current.get(name, '%s')
_ = kwargs.pop('out', None)
keys = []
vals = []
sym_kwargs = dict()
for k, v in kwargs.items():
if isinstance(v, SymbolBase):
sym_kwargs[k] = v
else:
keys.append(k)
vals.append(v)"""%(func_name.lower()))
if key_var_num_args:
code.append("""
if '%s' not in kwargs:
keys.append('%s')
vals.append(len(sym_args) + len(sym_kwargs))"""%(
key_var_num_args, key_var_num_args))
code.append("""
return _symbol_creator(%d, sym_args, sym_kwargs, keys, vals, name)"""%(
handle.value))
else:
code.append("""
def %s(%s):"""%(func_name, ', '.join(signature)))
if not signature_only:
code.append("""
kwargs.update(AttrScope.current.get(attr))
sym_kwargs = dict()
keys = []
vals = []
for k, v in kwargs.items():
if isinstance(v, SymbolBase):
sym_kwargs[k] = v
else:
keys.append(k)
vals.append(v)""")
# NDArray args
for name in ndarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if {name} is not None:
assert isinstance({name}, SymbolBase), \\
"Argument {name} must be Symbol instances, but got %s"%str({name})
sym_kwargs['{name}'] = {name}""".format(name=name))
# kwargs
for name in kwarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(%s)"""%(name, name, name))
# dtype
if dtype_name is not None:
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(np.dtype(%s).name)"""%(dtype_name, dtype_name, dtype_name))
code.append("""
name = NameManager.current.get(name, '%s')
return _symbol_creator(%d, None, sym_kwargs, keys, vals, name)"""%(
func_name.lower(), handle.value))
if signature_only:
code.append("""
return (0,)""")
doc_str_lines = _os.linesep+''.join([' '+s if s.strip() else s
for s in 'r"""{doc_str}"""'.format(doc_str=doc_str)
.splitlines(True)])
code.insert(1, doc_str_lines)
return ''.join(code), doc_str
def _make_symbol_function(handle, name, func_name):
"""Create a symbol function by handle and function name."""
code, doc_str = _generate_symbol_function_code(handle, name, func_name)
local = {}
exec(code, None, local) # pylint: disable=exec-used
symbol_function = local[func_name]
symbol_function.__name__ = func_name
symbol_function.__doc__ = doc_str
symbol_function.__module__ = 'mxnet.symbol'
return symbol_function
_init_op_module('mxnet', 'symbol', _make_symbol_function)
| apache-2.0 | 7,386,424,131,491,884,000 | 35.465347 | 92 | 0.577247 | false | 3.688533 | false | false | false | 0.003394 |
thijstriemstra/pyamf | doc/tutorials/examples/actionscript/ohloh/python/client.py | 8 | 1319 | #!/usr/bin/python
#
# Copyright (c) The PyAMF Project.
# See LICENSE for details.
"""
This is an example of using the Ohloh API from a Python client.
Detailed information can be found at the Ohloh website:
http://www.ohloh.net/api
This example uses the ElementTree library for XML parsing
(included in Python 2.5 and newer):
http://effbot.org/zone/element-index.htm
This example retrieves basic Ohloh account information
and outputs it as simple name: value pairs.
Pass your Ohloh API key as the first parameter to this script.
Ohloh API keys are free. If you do not have one, you can obtain
one at the Ohloh website:
http://www.ohloh.net/api_keys/new
Pass the email address of the account as the second parameter
to this script.
"""
import sys
import ohloh
if len(sys.argv) == 3:
api_key = sys.argv[1]
email = sys.argv[2]
else:
print "Usage: client.py <api-key> <email-address>"
sys.exit()
elem = ohloh.getAccount(email, api_key)
# Output all the immediate child properties of an Account
for node in elem.find("result/account"):
if node.tag == "kudo_score":
print "%s:" % node.tag
for score in elem.find("result/account/kudo_score"):
print "\t%s:\t%s" % (score.tag, score.text)
else:
print "%s:\t%s" % (node.tag, node.text)
| mit | -7,514,754,861,452,274,000 | 24.365385 | 63 | 0.685368 | false | 3.18599 | false | false | false | 0.000758 |
cszipper/PyAIML3 | build/lib.linux-x86_64-2.7/aiml/DefaultSubs.py | 14 | 3743 | """This file contains the default (English) substitutions for the
PyAIML kernel. These substitutions may be overridden by using the
Kernel.loadSubs(filename) method. The filename specified should refer
to a Windows-style INI file with the following format:
# lines that start with '#' are comments
# The 'gender' section contains the substitutions performed by the
# <gender> AIML tag, which swaps masculine and feminine pronouns.
[gender]
he = she
she = he
# and so on...
# The 'person' section contains the substitutions performed by the
# <person> AIML tag, which swaps 1st and 2nd person pronouns.
[person]
I = you
you = I
# and so on...
# The 'person2' section contains the substitutions performed by
# the <person2> AIML tag, which swaps 1st and 3nd person pronouns.
[person2]
I = he
he = I
# and so on...
# the 'normal' section contains subtitutions run on every input
# string passed into Kernel.respond(). It's mainly used to
# correct common misspellings, and to convert contractions
# ("WHAT'S") into a format that will match an AIML pattern ("WHAT
# IS").
[normal]
what's = what is
"""
defaultGender = {
# masculine -> feminine
"he": "she",
"him": "her",
"his": "her",
"himself": "herself",
# feminine -> masculine
"she": "he",
"her": "him",
"hers": "his",
"herself": "himself",
}
defaultPerson = {
# 1st->3rd (masculine)
"I": "he",
"me": "him",
"my": "his",
"mine": "his",
"myself": "himself",
# 3rd->1st (masculine)
"he":"I",
"him":"me",
"his":"my",
"himself":"myself",
# 3rd->1st (feminine)
"she":"I",
"her":"me",
"hers":"mine",
"herself":"myself",
}
defaultPerson2 = {
# 1st -> 2nd
"I": "you",
"me": "you",
"my": "your",
"mine": "yours",
"myself": "yourself",
# 2nd -> 1st
"you": "me",
"your": "my",
"yours": "mine",
"yourself": "myself",
}
# TODO: this list is far from complete
defaultNormal = {
"wanna": "want to",
"gonna": "going to",
"I'm": "I am",
"I'd": "I would",
"I'll": "I will",
"I've": "I have",
"you'd": "you would",
"you're": "you are",
"you've": "you have",
"you'll": "you will",
"he's": "he is",
"he'd": "he would",
"he'll": "he will",
"she's": "she is",
"she'd": "she would",
"she'll": "she will",
"we're": "we are",
"we'd": "we would",
"we'll": "we will",
"we've": "we have",
"they're": "they are",
"they'd": "they would",
"they'll": "they will",
"they've": "they have",
"y'all": "you all",
"can't": "can not",
"cannot": "can not",
"couldn't": "could not",
"wouldn't": "would not",
"shouldn't": "should not",
"isn't": "is not",
"ain't": "is not",
"don't": "do not",
"aren't": "are not",
"won't": "will not",
"weren't": "were not",
"wasn't": "was not",
"didn't": "did not",
"hasn't": "has not",
"hadn't": "had not",
"haven't": "have not",
"where's": "where is",
"where'd": "where did",
"where'll": "where will",
"who's": "who is",
"who'd": "who did",
"who'll": "who will",
"what's": "what is",
"what'd": "what did",
"what'll": "what will",
"when's": "when is",
"when'd": "when did",
"when'll": "when will",
"why's": "why is",
"why'd": "why did",
"why'll": "why will",
"it's": "it is",
"it'd": "it would",
"it'll": "it will",
}
| bsd-2-clause | -6,177,530,144,578,082,000 | 21.99359 | 70 | 0.500134 | false | 3.063011 | false | false | false | 0.003206 |
domenicosolazzo/Providentia | providentia/core/ml/nlp/keywords.py | 1 | 1068 | from tdidf import TfIdf
import operator
class Keywords(object):
def __init__(self, number_of_keywords=1):
self.number_of_keywords = number_of_keywords
def top_keywords_in_document(self, doc, corpus):
"""
Top n keywords for a document compared with a corpus
:param doc: The document
:param corpus: The corpus of documents
:return: The top n keywords for the document
"""
word_dictionary = {}
for word in set(doc):
word_dictionary[word] = TfIdf(word,doc,corpus)
sorted_d = sorted(word_dictionary.iteritems(), key=operator.itemgetter(1))
sorted_d.reverse()
return [w[0] for w in sorted_d[:self.number_of_keywords]]
def top_keywords_in_corpus(self, corpus):
"""
Top keywords in a corpus
:param corpus: The corpus
:return:Top keywords in a corpus
"""
keyword_list = set()
[[keyword_list.add(x) for x in self.top_keywords_in_document(doc,corpus)] for doc in corpus]
return keyword_list
| mit | -1,245,000,862,953,338,000 | 33.451613 | 100 | 0.616105 | false | 3.89781 | false | false | false | 0.006554 |
sgerhart/ansible | lib/ansible/modules/windows/win_unzip.py | 28 | 3750 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Phil Schwartz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_unzip
version_added: "2.0"
short_description: Unzips compressed files and archives on the Windows node
description:
- Unzips compressed files and archives.
- Supports .zip files natively.
- Supports other formats supported by the Powershell Community Extensions (PSCX) module (basically everything 7zip supports).
- For non-Windows targets, use the M(unarchive) module instead.
requirements:
- PSCX
options:
src:
description:
- File to be unzipped (provide absolute path).
required: yes
type: path
dest:
description:
- Destination of zip file (provide absolute path of directory). If it does not exist, the directory will be created.
required: yes
type: path
delete_archive:
description:
- Remove the zip file, after unzipping.
type: bool
default: 'no'
aliases: [ rm ]
recurse:
description:
- Recursively expand zipped files within the src file.
- Setting to a value of C(yes) requires the PSCX module to be installed.
type: bool
default: 'no'
creates:
description:
- If this file or directory exists the specified src will not be extracted.
type: path
notes:
- This module is not really idempotent, it will extract the archive every time, and report a change.
- For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX)
has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination
directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction.
- For non-Windows targets, use the M(unarchive) module instead.
author:
- Phil Schwartz (@schwartzmx)
'''
EXAMPLES = r'''
# This unzips a library that was downloaded with win_get_url, and removes the file after extraction
# $ ansible -i hosts -m win_unzip -a "src=C:\LibraryToUnzip.zip dest=C:\Lib remove=true" all
- name: Unzip a bz2 (BZip) file
win_unzip:
src: C:\Users\Phil\Logs.bz2
dest: C:\Users\Phil\OldLogs
creates: C:\Users\Phil\OldLogs
- name: Unzip gz log
win_unzip:
src: C:\Logs\application-error-logs.gz
dest: C:\ExtractedLogs\application-error-logs
# Unzip .zip file, recursively decompresses the contained .gz files and removes all unneeded compressed files after completion.
- name: Unzip ApplicationLogs.zip and decompress all GZipped log files
hosts: all
gather_facts: no
tasks:
- name: Recursively decompress GZ files in ApplicationLogs.zip
win_unzip:
src: C:\Downloads\ApplicationLogs.zip
dest: C:\Application\Logs
recurse: yes
delete_archive: yes
- name: Install PSCX
win_psmodule:
name: Pscx
state: present
'''
RETURN = r'''
dest:
description: The provided destination path
returned: always
type: string
sample: C:\ExtractedLogs\application-error-logs
removed:
description: Whether the module did remove any files during task run
returned: always
type: boolean
sample: True
src:
description: The provided source path
returned: always
type: string
sample: C:\Logs\application-error-logs.gz
'''
| mit | -5,894,211,942,401,742,000 | 32.482143 | 156 | 0.709867 | false | 3.82263 | false | false | false | 0.002933 |
ozamiatin/glance | glance/cmd/replicator.py | 7 | 25625 | #!/usr/bin/env python
# Copyright 2012 Michael Still and Canonical Inc
# Copyright 2014 SoftLayer Technologies, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from six.moves import http_client
import six.moves.urllib.parse as urlparse
from webob import exc
from glance.common import config
from glance.common import exception
from glance.common import utils
from glance import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LI = i18n._LI
_LE = i18n._LE
_LW = i18n._LW
# NOTE: positional arguments <args> will be parsed before <command> until
# this bug is corrected https://bugs.launchpad.net/oslo.config/+bug/1392428
cli_opts = [
cfg.IntOpt('chunksize',
short='c',
default=65536,
help="Amount of data to transfer per HTTP write."),
cfg.StrOpt('dontreplicate',
short='D',
default=('created_at date deleted_at location updated_at'),
help="List of fields to not replicate."),
cfg.BoolOpt('metaonly',
short='m',
default=False,
help="Only replicate metadata, not images."),
cfg.StrOpt('token',
short='t',
default='',
help=("Pass in your authentication token if you have "
"one. If you use this option the same token is "
"used for both the master and the slave.")),
cfg.StrOpt('mastertoken',
short='M',
default='',
help=("Pass in your authentication token if you have "
"one. This is the token used for the master.")),
cfg.StrOpt('slavetoken',
short='S',
default='',
help=("Pass in your authentication token if you have "
"one. This is the token used for the slave.")),
cfg.StrOpt('command',
positional=True,
help="Command to be given to replicator"),
cfg.ListOpt('args',
positional=True,
help="Arguments for the command"),
]
CONF = cfg.CONF
CONF.register_cli_opts(cli_opts)
logging.register_options(CONF)
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
COMMANDS = """Commands:
help <command> Output help for one of the commands below
compare What is missing from the slave glance?
dump Dump the contents of a glance instance to local disk.
livecopy Load the contents of one glance instance into another.
load Load the contents of a local directory into glance.
size Determine the size of a glance instance if dumped to disk.
"""
IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on '
'the slave, but our check for it did '
'not find it. This indicates that we '
'do not have permissions to see all '
'the images on the slave server.')
class ImageService(object):
def __init__(self, conn, auth_token):
"""Initialize the ImageService.
conn: a http_client.HTTPConnection to the glance server
auth_token: authentication token to pass in the x-auth-token header
"""
self.auth_token = auth_token
self.conn = conn
def _http_request(self, method, url, headers, body,
ignore_result_body=False):
"""Perform an HTTP request against the server.
method: the HTTP method to use
url: the URL to request (not including server portion)
headers: headers for the request
body: body to send with the request
ignore_result_body: the body of the result will be ignored
Returns: a http_client response object
"""
if self.auth_token:
headers.setdefault('x-auth-token', self.auth_token)
LOG.debug('Request: %(method)s http://%(server)s:%(port)s'
'%(url)s with headers %(headers)s'
% {'method': method,
'server': self.conn.host,
'port': self.conn.port,
'url': url,
'headers': repr(headers)})
self.conn.request(method, url, body, headers)
response = self.conn.getresponse()
headers = self._header_list_to_dict(response.getheaders())
code = response.status
code_description = http_client.responses[code]
LOG.debug('Response: %(code)s %(status)s %(headers)s'
% {'code': code,
'status': code_description,
'headers': repr(headers)})
if code == 400:
raise exc.HTTPBadRequest(
explanation=response.read())
if code == 500:
raise exc.HTTPInternalServerError(
explanation=response.read())
if code == 401:
raise exc.HTTPUnauthorized(
explanation=response.read())
if code == 403:
raise exc.HTTPForbidden(
explanation=response.read())
if code == 409:
raise exc.HTTPConflict(
explanation=response.read())
if ignore_result_body:
# NOTE: because we are pipelining requests through a single HTTP
# connection, http_client requires that we read the response body
# before we can make another request. If the caller knows they
# don't care about the body, they can ask us to do that for them.
response.read()
return response
def get_images(self):
"""Return a detailed list of images.
Yields a series of images as dicts containing metadata.
"""
params = {'is_public': None}
while True:
url = '/v1/images/detail'
query = urlparse.urlencode(params)
if query:
url += '?%s' % query
response = self._http_request('GET', url, {}, '')
result = jsonutils.loads(response.read())
if not result or 'images' not in result or not result['images']:
return
for image in result.get('images', []):
params['marker'] = image['id']
yield image
def get_image(self, image_uuid):
"""Fetch image data from glance.
image_uuid: the id of an image
Returns: a http_client Response object where the body is the image.
"""
url = '/v1/images/%s' % image_uuid
return self._http_request('GET', url, {}, '')
@staticmethod
def _header_list_to_dict(headers):
"""Expand a list of headers into a dictionary.
headers: a list of [(key, value), (key, value), (key, value)]
Returns: a dictionary representation of the list
"""
d = {}
for (header, value) in headers:
if header.startswith('x-image-meta-property-'):
prop = header.replace('x-image-meta-property-', '')
d.setdefault('properties', {})
d['properties'][prop] = value
else:
d[header.replace('x-image-meta-', '')] = value
return d
def get_image_meta(self, image_uuid):
"""Return the metadata for a single image.
image_uuid: the id of an image
Returns: image metadata as a dictionary
"""
url = '/v1/images/%s' % image_uuid
response = self._http_request('HEAD', url, {}, '',
ignore_result_body=True)
return self._header_list_to_dict(response.getheaders())
@staticmethod
def _dict_to_headers(d):
"""Convert a dictionary into one suitable for a HTTP request.
d: a dictionary
Returns: the same dictionary, with x-image-meta added to every key
"""
h = {}
for key in d:
if key == 'properties':
for subkey in d[key]:
if d[key][subkey] is None:
h['x-image-meta-property-%s' % subkey] = ''
else:
h['x-image-meta-property-%s' % subkey] = d[key][subkey]
else:
h['x-image-meta-%s' % key] = d[key]
return h
def add_image(self, image_meta, image_data):
"""Upload an image.
image_meta: image metadata as a dictionary
image_data: image data as a object with a read() method
Returns: a tuple of (http response headers, http response body)
"""
url = '/v1/images'
headers = self._dict_to_headers(image_meta)
headers['Content-Type'] = 'application/octet-stream'
headers['Content-Length'] = int(image_meta['size'])
response = self._http_request('POST', url, headers, image_data)
headers = self._header_list_to_dict(response.getheaders())
LOG.debug('Image post done')
body = response.read()
return headers, body
def add_image_meta(self, image_meta):
"""Update image metadata.
image_meta: image metadata as a dictionary
Returns: a tuple of (http response headers, http response body)
"""
url = '/v1/images/%s' % image_meta['id']
headers = self._dict_to_headers(image_meta)
headers['Content-Type'] = 'application/octet-stream'
response = self._http_request('PUT', url, headers, '')
headers = self._header_list_to_dict(response.getheaders())
LOG.debug('Image post done')
body = response.read()
return headers, body
def get_image_service():
"""Get a copy of the image service.
This is done like this to make it easier to mock out ImageService.
"""
return ImageService
def replication_size(options, args):
"""%(prog)s size <server:port>
Determine the size of a glance instance if dumped to disk.
server:port: the location of the glance instance.
"""
# Make sure server info is provided
if len(args) < 1:
raise TypeError(_("Too few arguments."))
server, port = utils.parse_valid_host_port(args.pop())
total_size = 0
count = 0
imageservice = get_image_service()
client = imageservice(http_client.HTTPConnection(server, port),
options.slavetoken)
for image in client.get_images():
LOG.debug('Considering image: %(image)s' % {'image': image})
if image['status'] == 'active':
total_size += int(image['size'])
count += 1
print(_('Total size is %(size)d bytes across %(img_count)d images') %
{'size': total_size,
'img_count': count})
def replication_dump(options, args):
"""%(prog)s dump <server:port> <path>
Dump the contents of a glance instance to local disk.
server:port: the location of the glance instance.
path: a directory on disk to contain the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server, port = utils.parse_valid_host_port(args.pop())
imageservice = get_image_service()
client = imageservice(http_client.HTTPConnection(server, port),
options.mastertoken)
for image in client.get_images():
LOG.debug('Considering: %s' % image['id'])
data_path = os.path.join(path, image['id'])
if not os.path.exists(data_path):
LOG.info(_LI('Storing: %s') % image['id'])
# Dump glance information
with open(data_path, 'w') as f:
f.write(jsonutils.dumps(image))
if image['status'] == 'active' and not options.metaonly:
# Now fetch the image. The metadata returned in headers here
# is the same as that which we got from the detailed images
# request earlier, so we can ignore it here. Note that we also
# only dump active images.
LOG.debug('Image %s is active' % image['id'])
image_response = client.get_image(image['id'])
with open(data_path + '.img', 'wb') as f:
while True:
chunk = image_response.read(options.chunksize)
if not chunk:
break
f.write(chunk)
def _dict_diff(a, b):
"""A one way dictionary diff.
a: a dictionary
b: a dictionary
Returns: True if the dictionaries are different
"""
# Only things the master has which the slave lacks matter
if set(a.keys()) - set(b.keys()):
LOG.debug('metadata diff -- master has extra keys: %(keys)s'
% {'keys': ' '.join(set(a.keys()) - set(b.keys()))})
return True
for key in a:
if str(a[key]) != str(b[key]):
LOG.debug('metadata diff -- value differs for key '
'%(key)s: master "%(master_value)s" vs '
'slave "%(slave_value)s"' %
{'key': key,
'master_value': a[key],
'slave_value': b[key]})
return True
return False
def replication_load(options, args):
"""%(prog)s load <server:port> <path>
Load the contents of a local directory into glance.
server:port: the location of the glance instance.
path: a directory on disk containing the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server, port = utils.parse_valid_host_port(args.pop())
imageservice = get_image_service()
client = imageservice(http_client.HTTPConnection(server, port),
options.slavetoken)
updated = []
for ent in os.listdir(path):
if utils.is_uuid_like(ent):
image_uuid = ent
LOG.info(_LI('Considering: %s') % image_uuid)
meta_file_name = os.path.join(path, image_uuid)
with open(meta_file_name) as meta_file:
meta = jsonutils.loads(meta_file.read())
# Remove keys which don't make sense for replication
for key in options.dontreplicate.split(' '):
if key in meta:
LOG.debug('Stripping %(header)s from saved '
'metadata', {'header': key})
del meta[key]
if _image_present(client, image_uuid):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
LOG.debug('Image %s already present', image_uuid)
headers = client.get_image_meta(image_uuid)
for key in options.dontreplicate.split(' '):
if key in headers:
LOG.debug('Stripping %(header)s from slave '
'metadata', {'header': key})
del headers[key]
if _dict_diff(meta, headers):
LOG.info(_LI('Image %s metadata has changed') %
image_uuid)
headers, body = client.add_image_meta(meta)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
else:
if not os.path.exists(os.path.join(path, image_uuid + '.img')):
LOG.debug('%s dump is missing image data, skipping' %
image_uuid)
continue
# Upload the image itself
with open(os.path.join(path, image_uuid + '.img')) as img_file:
try:
headers, body = client.add_image(meta, img_file)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
except exc.HTTPConflict:
LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE)
% image_uuid) # noqa
return updated
def replication_livecopy(options, args):
"""%(prog)s livecopy <fromserver:port> <toserver:port>
Load the contents of one glance instance into another.
fromserver:port: the location of the master glance instance.
toserver:port: the location of the slave glance instance.
"""
# Make sure from-server and to-server are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
imageservice = get_image_service()
slave_server, slave_port = utils.parse_valid_host_port(args.pop())
slave_conn = http_client.HTTPConnection(slave_server, slave_port)
slave_client = imageservice(slave_conn, options.slavetoken)
master_server, master_port = utils.parse_valid_host_port(args.pop())
master_conn = http_client.HTTPConnection(master_server, master_port)
master_client = imageservice(master_conn, options.mastertoken)
updated = []
for image in master_client.get_images():
LOG.debug('Considering %(id)s' % {'id': image['id']})
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master metadata',
{'header': key})
del image[key]
if _image_present(slave_client, image['id']):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
headers = slave_client.get_image_meta(image['id'])
if headers['status'] == 'active':
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master '
'metadata', {'header': key})
del image[key]
if key in headers:
LOG.debug('Stripping %(header)s from slave '
'metadata', {'header': key})
del headers[key]
if _dict_diff(image, headers):
LOG.info(_LI('Image %s metadata has changed') %
image['id'])
headers, body = slave_client.add_image_meta(image)
_check_upload_response_headers(headers, body)
updated.append(image['id'])
elif image['status'] == 'active':
LOG.info(_LI('Image %s is being synced') % image['id'])
if not options.metaonly:
image_response = master_client.get_image(image['id'])
try:
headers, body = slave_client.add_image(image,
image_response)
_check_upload_response_headers(headers, body)
updated.append(image['id'])
except exc.HTTPConflict:
LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id']) # noqa
return updated
def replication_compare(options, args):
"""%(prog)s compare <fromserver:port> <toserver:port>
Compare the contents of fromserver with those of toserver.
fromserver:port: the location of the master glance instance.
toserver:port: the location of the slave glance instance.
"""
# Make sure from-server and to-server are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
imageservice = get_image_service()
slave_server, slave_port = utils.parse_valid_host_port(args.pop())
slave_conn = http_client.HTTPConnection(slave_server, slave_port)
slave_client = imageservice(slave_conn, options.slavetoken)
master_server, master_port = utils.parse_valid_host_port(args.pop())
master_conn = http_client.HTTPConnection(master_server, master_port)
master_client = imageservice(master_conn, options.mastertoken)
differences = {}
for image in master_client.get_images():
if _image_present(slave_client, image['id']):
headers = slave_client.get_image_meta(image['id'])
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master metadata',
{'header': key})
del image[key]
if key in headers:
LOG.debug('Stripping %(header)s from slave metadata',
{'header': key})
del headers[key]
for key in image:
if image[key] != headers.get(key, None):
LOG.warn(_LW('%(image_id)s: field %(key)s differs '
'(source is %(master_value)s, destination '
'is %(slave_value)s)')
% {'image_id': image['id'],
'key': key,
'master_value': image[key],
'slave_value': headers.get(key, 'undefined')})
differences[image['id']] = 'diff'
else:
LOG.debug('%(image_id)s is identical'
% {'image_id': image['id']})
elif image['status'] == 'active':
LOG.warn(_LW('Image %s entirely missing from the destination')
% image['id'])
differences[image['id']] = 'missing'
return differences
def _check_upload_response_headers(headers, body):
"""Check that the headers of an upload are reasonable.
headers: the headers from the upload
body: the body from the upload
"""
if 'status' not in headers:
try:
d = jsonutils.loads(body)
if 'image' in d and 'status' in d['image']:
return
except Exception:
raise exception.UploadException(body)
def _image_present(client, image_uuid):
"""Check if an image is present in glance.
client: the ImageService
image_uuid: the image uuid to check
Returns: True if the image is present
"""
headers = client.get_image_meta(image_uuid)
return 'status' in headers
def print_help(options, args):
"""Print help specific to a command.
options: the parsed command line options
args: the command line
"""
if len(args) != 1:
print(COMMANDS)
sys.exit(1)
command_name = args.pop()
command = lookup_command(command_name)
print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])})
def lookup_command(command_name):
"""Lookup a command.
command_name: the command name
Returns: a method which implements that command
"""
BASE_COMMANDS = {'help': print_help}
REPLICATION_COMMANDS = {'compare': replication_compare,
'dump': replication_dump,
'livecopy': replication_livecopy,
'load': replication_load,
'size': replication_size}
commands = {}
for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS):
commands.update(command_set)
try:
command = commands[command_name]
except KeyError:
sys.exit(_("Unknown command: %s") % command_name)
return command
def main():
"""The main function."""
try:
config.parse_args()
except RuntimeError as e:
sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e))
# Setup logging
logging.setup('glance')
if CONF.token:
CONF.slavetoken = CONF.token
CONF.mastertoken = CONF.token
command = lookup_command(CONF.command)
try:
command(CONF, CONF.args)
except TypeError as e:
LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa
sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e))
except ValueError as e:
LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa
sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e))
if __name__ == '__main__':
main()
| apache-2.0 | 7,169,373,453,539,626,000 | 34.296143 | 87 | 0.55481 | false | 4.342484 | false | false | false | 0 |
t-animal/sigal | tests/test_settings.py | 2 | 1689 | import os
from sigal.settings import read_settings, get_thumb
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
def test_read_settings(settings):
"""Test that the settings are correctly read."""
assert settings['img_size'] == (640, 480)
assert settings['thumb_size'] == (200, 150)
assert settings['thumb_suffix'] == '.tn'
assert settings['source'] == os.path.join(CURRENT_DIR, 'sample',
'pictures')
def test_get_thumb(settings):
"""Test the get_thumb function."""
tests = [('example.jpg', 'thumbnails/example.tn.jpg'),
('test/example.jpg', 'test/thumbnails/example.tn.jpg'),
('test/t/example.jpg', 'test/t/thumbnails/example.tn.jpg')]
for src, ref in tests:
assert get_thumb(settings, src) == ref
tests = [('example.webm', 'thumbnails/example.tn.jpg'),
('test/example.mp4', 'test/thumbnails/example.tn.jpg'),
('test/t/example.avi', 'test/t/thumbnails/example.tn.jpg')]
for src, ref in tests:
assert get_thumb(settings, src) == ref
def test_img_sizes(tmpdir):
"""Test that image size is swaped if needed."""
conf = tmpdir.join('sigal.conf.py')
conf.write("thumb_size = (150, 200)")
settings = read_settings(str(conf))
assert settings['thumb_size'] == (200, 150)
def test_theme_path(tmpdir):
"""Test that image size is swaped if needed."""
tmpdir.join('theme').mkdir()
tmpdir.join('theme').join('templates').mkdir()
conf = tmpdir.join('sigal.conf.py')
conf.write("theme = 'theme'")
settings = read_settings(str(conf))
assert settings['theme'] == tmpdir.join('theme')
| mit | 9,055,541,111,521,047,000 | 32.117647 | 72 | 0.612789 | false | 3.51875 | true | false | false | 0 |
tudyzhb/yichui | django/core/handlers/modpython.py | 78 | 7047 | import os
import sys
from warnings import warn
from django import http
from django.core import signals
from django.core.handlers.base import BaseHandler
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.log import getLogger
logger = getLogger('django.request')
class ModPythonRequest(http.HttpRequest):
def __init__(self, req):
self._req = req
# FIXME: This isn't ideal. The request URI may be encoded (it's
# non-normalized) slightly differently to the "real" SCRIPT_NAME
# and PATH_INFO values. This causes problems when we compute path_info,
# below. For now, don't use script names that will be subject to
# encoding/decoding.
self.path = force_unicode(req.uri)
root = req.get_options().get('django.root', '')
self.django_root = root
# req.path_info isn't necessarily computed correctly in all
# circumstances (it's out of mod_python's control a bit), so we use
# req.uri and some string manipulations to get the right value.
if root and req.uri.startswith(root):
self.path_info = force_unicode(req.uri[len(root):])
else:
self.path_info = self.path
if not self.path_info:
# Django prefers empty paths to be '/', rather than '', to give us
# a common start character for URL patterns. So this is a little
# naughty, but also pretty harmless.
self.path_info = u'/'
self._post_parse_error = False
self._stream = self._req
self._read_started = False
def get_full_path(self):
# RFC 3986 requires self._req.args to be in the ASCII range, but this
# doesn't always happen, so rather than crash, we defensively encode it.
return '%s%s' % (self.path, self._req.args and ('?' + iri_to_uri(self._req.args)) or '')
def _is_secure(self):
try:
return self._req.is_https()
except AttributeError:
# mod_python < 3.2.10 doesn't have req.is_https().
return self._req.subprocess_env.get('HTTPS', '').lower() in ('on', '1')
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
self._get = http.QueryDict(self._req.args, encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self._req.headers_in.get('cookie', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_meta(self):
"Lazy loader that returns self.META dictionary"
if not hasattr(self, '_meta'):
self._meta = {
'AUTH_TYPE': self._req.ap_auth_type,
'CONTENT_LENGTH': self._req.headers_in.get('content-length', 0),
'CONTENT_TYPE': self._req.headers_in.get('content-type'),
'GATEWAY_INTERFACE': 'CGI/1.1',
'PATH_INFO': self.path_info,
'PATH_TRANSLATED': None, # Not supported
'QUERY_STRING': self._req.args,
'REMOTE_ADDR': self._req.connection.remote_ip,
'REMOTE_HOST': None, # DNS lookups not supported
'REMOTE_IDENT': self._req.connection.remote_logname,
'REMOTE_USER': self._req.user,
'REQUEST_METHOD': self._req.method,
'SCRIPT_NAME': self.django_root,
'SERVER_NAME': self._req.server.server_hostname,
'SERVER_PORT': self._req.connection.local_addr[1],
'SERVER_PROTOCOL': self._req.protocol,
'SERVER_SOFTWARE': 'mod_python'
}
for key, value in self._req.headers_in.items():
key = 'HTTP_' + key.upper().replace('-', '_')
self._meta[key] = value
return self._meta
def _get_method(self):
return self.META['REQUEST_METHOD'].upper()
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
META = property(_get_meta)
REQUEST = property(_get_request)
method = property(_get_method)
class ModPythonHandler(BaseHandler):
request_class = ModPythonRequest
def __call__(self, req):
warn(('The mod_python handler is deprecated; use a WSGI or FastCGI server instead.'),
DeprecationWarning)
# mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that
os.environ.update(req.subprocess_env)
# now that the environ works we can see the correct settings, so imports
# that use settings now can work
from django.conf import settings
# if we need to set up middleware, now that settings works we can do it now.
if self._request_middleware is None:
self.load_middleware()
set_script_prefix(req.get_options().get('django.root', ''))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(req)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
finally:
signals.request_finished.send(sender=self.__class__)
# Convert our custom HttpResponse object back into the mod_python req.
req.content_type = response['Content-Type']
for key, value in response.items():
if key != 'content-type':
req.headers_out[str(key)] = str(value)
for c in response.cookies.values():
req.headers_out.add('Set-Cookie', c.output(header=''))
req.status = response.status_code
try:
for chunk in response:
req.write(chunk)
finally:
response.close()
return 0 # mod_python.apache.OK
def handler(req):
# mod_python hooks into this function.
return ModPythonHandler()(req)
| bsd-3-clause | -7,566,448,638,201,583,000 | 38.15 | 96 | 0.577976 | false | 4.087587 | false | false | false | 0.002412 |
Vvkmnn/books | TensorFlowForMachineIntelligence/chapters/06_recurrent_neural_networks_and_natural_language_processing/02_imdb/SequenceClassificationModel.py | 1 | 2502 | import tensorflow as tf
from helpers import lazy_property
class SequenceClassificationModel:
def __init__(self, data, target, params):
self.data = data
self.target = target
self.params = params
self.prediction
self.cost
self.error
self.optimize
@lazy_property
def length(self):
used = tf.sign(tf.reduce_max(tf.abs(self.data), reduction_indices=2))
length = tf.reduce_sum(used, reduction_indices=1)
length = tf.cast(length, tf.int32)
return length
@lazy_property
def prediction(self):
# Recurrent network.
output, _ = tf.nn.dynamic_rnn(
self.params.rnn_cell(self.params.rnn_hidden),
self.data,
dtype=tf.float32,
sequence_length=self.length,
)
last = self._last_relevant(output, self.length)
# Softmax layer.
num_classes = int(self.target.get_shape()[1])
weight = tf.Variable(tf.truncated_normal(
[self.params.rnn_hidden, num_classes], stddev=0.01))
bias = tf.Variable(tf.constant(0.1, shape=[num_classes]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
return prediction
@lazy_property
def cost(self):
cross_entropy = -tf.reduce_sum(self.target * tf.log(self.prediction))
return cross_entropy
@lazy_property
def error(self):
mistakes = tf.not_equal(
tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32))
@lazy_property
def optimize(self):
gradient = self.params.optimizer.compute_gradients(self.cost)
try:
limit = self.params.gradient_clipping
gradient = [
(tf.clip_by_value(g, -limit, limit), v)
if g is not None else (None, v)
for g, v in gradient]
except AttributeError:
print('No gradient clipping parameter specified.')
optimize = self.params.optimizer.apply_gradients(gradient)
return optimize
@staticmethod
def _last_relevant(output, length):
batch_size = tf.shape(output)[0]
max_length = int(output.get_shape()[1])
output_size = int(output.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (length - 1)
flat = tf.reshape(output, [-1, output_size])
relevant = tf.gather(flat, index)
return relevant
| gpl-3.0 | 8,482,397,022,878,103,000 | 32.36 | 77 | 0.595923 | false | 3.819847 | false | false | false | 0 |
huazhisong/race_code | kaggle_ws/dog_cat_ws/codes/new_version/train_and_val.py | 1 | 7333 | #By @Kevin Xu
#[email protected]
#Youtube: https://www.youtube.com/channel/UCVCSn4qQXTDAtGWpWAe4Plw
#
#The aim of this project is to use TensorFlow to process our own data.
# - input_data.py: read in data and generate batches
# - model: build the model architecture
# - training: train
# I used Ubuntu with Python 3.5, TensorFlow 1.0*, other OS should also be good.
# With current settings, 10000 traing steps needed 50 minutes on my laptop.
# data: cats vs. dogs from Kaggle
# Download link: https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition/data
# data size: ~540M
# How to run?
# 1. run the training.py once
# 2. call the run_training() in the console to train the model.
# Note:
# it is suggested to restart your kenel to train the model multiple times
#(in order to clear all the variables in the memory)
# Otherwise errors may occur: conv1/weights/biases already exist......
#%%
import os
import numpy as np
import tensorflow as tf
import input_train_val_split
import model
#%%
N_CLASSES = 2
IMG_W = 208 # resize the image, if the input image is too large, training will be very slow.
IMG_H = 208
RATIO = 0.2 # take 20% of dataset as validation data
BATCH_SIZE = 64
CAPACITY = 2000
MAX_STEP = 6000 # with current parameters, it is suggested to use MAX_STEP>10k
learning_rate = 0.0001 # with current parameters, it is suggested to use learning rate<0.0001
#%%
def run_training():
# you need to change the directories to yours.
train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/'
logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/'
logs_val_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/val/'
train, train_label, val, val_label = input_train_val_split.get_files(train_dir, RATIO)
train_batch, train_label_batch = input_train_val_split.get_batch(train,
train_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY)
val_batch, val_label_batch = input_train_val_split.get_batch(val,
val_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY)
logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
loss = model.losses(logits, train_label_batch)
train_op = model.trainning(loss, learning_rate)
acc = model.evaluation(logits, train_label_batch)
x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE])
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess= sess, coord=coord)
summary_op = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
val_writer = tf.summary.FileWriter(logs_val_dir, sess.graph)
try:
for step in np.arange(MAX_STEP):
if coord.should_stop():
break
tra_images,tra_labels = sess.run([train_batch, train_label_batch])
_, tra_loss, tra_acc = sess.run([train_op, loss, acc],
feed_dict={x:tra_images, y_:tra_labels})
if step % 50 == 0:
print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
summary_str = sess.run(summary_op)
train_writer.add_summary(summary_str, step)
if step % 200 == 0 or (step + 1) == MAX_STEP:
val_images, val_labels = sess.run([val_batch, val_label_batch])
val_loss, val_acc = sess.run([loss, acc],
feed_dict={x:val_images, y_:val_labels})
print('** Step %d, val loss = %.2f, val accuracy = %.2f%% **' %(step, val_loss, val_acc*100.0))
summary_str = sess.run(summary_op)
val_writer.add_summary(summary_str, step)
if step % 2000 == 0 or (step + 1) == MAX_STEP:
checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
#%% Evaluate one image
# when training, comment the following codes.
#from PIL import Image
#import matplotlib.pyplot as plt
#
#def get_one_image(train):
# '''Randomly pick one image from training data
# Return: ndarray
# '''
# n = len(train)
# ind = np.random.randint(0, n)
# img_dir = train[ind]
#
# image = Image.open(img_dir)
# plt.imshow(image)
# image = image.resize([208, 208])
# image = np.array(image)
# return image
#
#def evaluate_one_image():
# '''Test one image against the saved models and parameters
# '''
#
# # you need to change the directories to yours.
# train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/'
# train, train_label = input_data.get_files(train_dir)
# image_array = get_one_image(train)
#
# with tf.Graph().as_default():
# BATCH_SIZE = 1
# N_CLASSES = 2
#
# image = tf.cast(image_array, tf.float32)
# image = tf.image.per_image_standardization(image)
# image = tf.reshape(image, [1, 208, 208, 3])
#
# logit = model.inference(image, BATCH_SIZE, N_CLASSES)
#
# logit = tf.nn.softmax(logit)
#
# x = tf.placeholder(tf.float32, shape=[208, 208, 3])
#
# # you need to change the directories to yours.
# logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/'
#
# saver = tf.train.Saver()
#
# with tf.Session() as sess:
#
# print("Reading checkpoints...")
# ckpt = tf.train.get_checkpoint_state(logs_train_dir)
# if ckpt and ckpt.model_checkpoint_path:
# global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
# saver.restore(sess, ckpt.model_checkpoint_path)
# print('Loading success, global_step is %s' % global_step)
# else:
# print('No checkpoint file found')
#
# prediction = sess.run(logit, feed_dict={x: image_array})
# max_index = np.argmax(prediction)
# if max_index==0:
# print('This is a cat with possibility %.6f' %prediction[:, 0])
# else:
# print('This is a dog with possibility %.6f' %prediction[:, 1])
#%%
| gpl-3.0 | 4,241,528,122,895,725,000 | 36.994819 | 117 | 0.542752 | false | 3.598135 | false | false | false | 0.011455 |
jbtule/keyczar | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Sig.py | 19 | 2341 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Sig.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """Place-holder for the old SCons.Sig module hierarchy
This is no longer used, but code out there (such as the NSIS module on
the SCons wiki) may try to import SCons.Sig. If so, we generate a warning
that points them to the line that caused the import, and don't die.
If someone actually tried to use the sub-modules or functions within
the package (for example, SCons.Sig.MD5.signature()), then they'll still
get an AttributeError, but at least they'll know where to start looking.
"""
import SCons.Util
import SCons.Warnings
msg = 'The SCons.Sig module no longer exists.\n' \
' Remove the following "import SCons.Sig" line to eliminate this warning:'
SCons.Warnings.warn(SCons.Warnings.DeprecatedWarning, msg)
default_calc = None
default_module = None
class MD5Null(SCons.Util.Null):
def __repr__(self):
return "MD5Null()"
class TimeStampNull(SCons.Util.Null):
def __repr__(self):
return "TimeStampNull()"
MD5 = MD5Null()
TimeStamp = TimeStampNull()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | -2,733,129,527,998,132,700 | 36.15873 | 89 | 0.745835 | false | 3.869421 | false | false | false | 0.002136 |
bhavani33/iiit_project | labourwages/admin.py | 4 | 3844 | from django.contrib import admin
from labourwages.models import *
# Register your models here.
class AgoperationAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Agoperation,AgoperationAdmin)
class WagetypeAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Wagetype,WagetypeAdmin)
class WorkprogrammeAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Workprogramme,WorkprogrammeAdmin)
class ObligationtypeAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Obligationtype,ObligationtypeAdmin)
class TaskdescriptionAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Taskdescription,TaskdescriptionAdmin)
class CollecteditemsAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Collecteditems,CollecteditemsAdmin)
class MagencyAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Magency,MagencyAdmin)
class CheatingfacedAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Cheatingfaced,CheatingfacedAdmin)
class WorkdescriptionAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Workdescription,WorkdescriptionAdmin)
class AnimaltypeAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Animaltype,AnimaltypeAdmin)
class AnimalproductionAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Animalproduction,AnimalproductionAdmin)
class LabourdaysAdmin(admin.ModelAdmin):
fields=('household','household_number','labour_deployed','s_no','crop','extent','agricultural_operation','family_labour_days_m','family_labour_days_w','family_labour_days_c','family_labour_hours_m','family_labour_hours_w','family_labour_hours_c','daily_labour_days_m','daily_labour_days_w','daily_labour_days_c','daily_labour_hours_m','daily_labour_hours_w','daily_labour_hours_c','daily_labour_wages_m','daily_labour_wages_w','daily_labour_wages_c','exchange_labour_days_m','exchange_labour_days_w','exchange_labour_days_c','exchange_labour_hours_m','exchange_labour_hours_w','exchange_labour_hours_c','piece_rated_cash','piece_rated_kind','machine_labour_workhours','machine_labourpayment','comments',)
admin.site.register(Labourdays,LabourdaysAdmin)
class WagesAdmin(admin.ModelAdmin):
fields=('household','household_number','is_agricultural_labour','worker_name','crop','operation','type_wage','place_work','labour_days','work_hours','earnings_cash','income','piece_rate_kind','contract_number_acres','contract_remuniration','contract_howmany_workers','contract_total_wage','wagerates_increased','migrations_declined','isthere_change_peasants','has_baragaining_power_increased','comments',)
admin.site.register(Wages,WagesAdmin)
class NonaglabourAdmin(admin.ModelAdmin):
fields=('household','household_number','workedin_nonag_operation','worker_name','description_specify_programme', 'type_wage_contract','place_work','number_days','work_hours','wage_rate' ,'totalearnings_cash','comments')
admin.site.register(Nonaglabour,NonaglabourAdmin)
class EmpfreedomAdmin(admin.ModelAdmin):
fields=('household' , 'household_number', 'comments',)
admin.site.register(Empfreedom,EmpfreedomAdmin)
class IncomeotherAdmin(admin.ModelAdmin):
fields=('household','household_number','worker_name','work_description','work_place','totalnet_earnings','earlier_income_kind','comments',)
admin.site.register(Incomeother,IncomeotherAdmin)
class AnimalsourceAdmin(admin.ModelAdmin):
fields=('household','household_number','animal_owned','type','s_no','nu','age','feed_home_grown','feed_purchased','total_present_value','veternary_charges','maintanence_buildings','insurance','interest_loans_livestock','labour_charges','others','income_production_one','production_work_qty_one','production_work_price_one','income_production_two','production_work_qty_two','production_work_price_two','comments')
admin.site.register(Animalsource,AnimalsourceAdmin)
| gpl-3.0 | 251,688,444,045,934,460 | 52.388889 | 705 | 0.788502 | false | 3.214047 | false | false | false | 0.048127 |
shabda/pychart | pychart/axis_x_doc.py | 13 | 2720 | #
# Copyright (C) 2000-2005 by Yasushi Saito ([email protected])
#
# Jockey is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Jockey is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
doc="""Attributes supported by this class are:
tic_label_offset(type:(x,y)):
The location where the tick labels is drawn. Relative to the
tip of the tick mark.
default value=(0, 0).
format(type:printf format string):
The format string for tick labels. It can be a `printf' style
format string, or a single-parameter function that returns a
string. See also font.
default value=%s.
minor_tic_len(type:number):
The length of minor tick marks.
default value=3.
label_offset(type:(x,y) or None):
The location where the axis label is drawn. Relative to the
left-bottom corner of the axis.
default value=(None, None).
grid_interval(type:Number or function):
When the value is a number, it specifies the interval with which
grid lines are drawn. Otherwise, the value must be a function.
It must take no argument and return the list of numbers, which
specifies the X or Y points where grid lines are drawn.
default value=None.
label(type:str):
The description of the axis. See also font.
default value=axis label.
grid_style(type:line_style.T):
The style of grid lines.
default value=None.
tic_interval(type:Number or function):
When the value is a number, it specifies the interval with which
tick marks are drawn. Otherwise, the value must be a function.
It must take no argument and return the list of numbers, which
specifies the X or Y points where tick marks are drawn.
default value=None.
line_style(type:line_style.T):
The style of tick lines.
default value=default.
tic_len(type:number):
The length of tick lines
default value=6.
minor_tic_interval(type:Number or function):
When the value is a number, it specifies the interval with which
minor tick marks are drawn. Otherwise, the value must be a function.
It must take no argument and return the list of numbers, which
specifies the X or Y points where minor tick marks are drawn.
default value=None.
first_tic_value(type:number):
The location of the first tick mark. Defaults to the x_range[0]
(or y_range[0]) of the area.
default value=None.
""" | gpl-2.0 | 1,675,746,578,737,732,600 | 40.861538 | 73 | 0.720956 | false | 3.959243 | false | false | false | 0.011029 |
PlayOnLinux/PlayOnLinux_3 | python/readfile.py | 1 | 4028 | #!/usr/bin/python
# -*- coding:Utf-8 -*-
# L'encodage Utf-8 sera a joindre dans tous les fichiers, sinon bug d'accents...
# Copyright (C) 2007 Pâris Quentin
# Cassarin-Grand Arthur
# Copyright (C) 2007-2010 PlayOnLinux Team
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import wxversion, os, getopt, sys, urllib, signal
wxversion.select("2.8")
import wx
ID_CANCEL = 101
ID_NEXT = 102
class Lng:
step = os.popen("printf $LNG_STEP", "r").read()
sur = os.popen("printf $LNG_SUR", "r").read()
cancel = os.popen("printf $LNG_CANCEL", "r").read()
class Variables: #classe qui va contenir les différentes variables (pas de variables globales)
if len(sys.argv) > 6:
titre = sys.argv[1] #le titre dans la fenêtre
texte = sys.argv[2] #le contenu du message de la fenêtre
numeroEtape = sys.argv[3] #le numro actuel des étapes
nombreEtape = sys.argv[4] #Le numéro maximal d'étape
cancel_present = sys.argv[5] #Faut t'il afficher le bouton annuler ?
image = sys.argv[6] # C'est cool si on peut choisir l'image ;)
playonlinux_env = os.popen("printf $PLAYONLINUX", "r").read() #Recuperer le repertoire de PlayOnLinux
theme_env = os.popen("printf $POL_THEME", "r").read() #Recuperer le theme utilisé
image_use = playonlinux_env+"/themes/"+theme_env+"/"+image
etape_txt = Lng.step+" "+numeroEtape+" "+Lng.sur+" "+nombreEtape
next = sys.argv[7]
else:
print "Il manque des arguments"
exit(255)
class Ok_frame(wx.Frame): #fenêtre principale
def __init__(self, titre):
wx.Frame.__init__(self, None, -1, title = titre, style = wx.CLOSE_BOX | wx.MINIMIZE_BOX, size = (520, 290))
self.SetIcon(wx.Icon(Variables.playonlinux_env+"/etc/playonlinux.png", wx.BITMAP_TYPE_ANY))
self.panelFenp = wx.Panel(self, -1)
self.fontTexte = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, "", wx.FONTENCODING_DEFAULT)
self.txtTitre = wx.StaticText(self.panelFenp, -1, Variables.titre, (20,25), wx.DefaultSize)
self.txtTitre.SetFont(self.fontTexte)
self.txtTexte = wx.TextCtrl(self.panelFenp, -1, Variables.texte, (155,65), size=(330,150), style = wx.TE_MULTILINE | wx.TE_RICH2 | wx.CB_READONLY)
#self.txtTexte.Wrap(330)
if Variables.nombreEtape != "0":
self.txtEtape = wx.StaticText(self.panelFenp, -1, Variables.etape_txt, (20, 265), wx.DefaultSize)
self.buttonSuivant = wx.Button(self.panelFenp, ID_NEXT, Variables.next, (425, 250), wx.DefaultSize)
if Variables.cancel_present == "1":
self.buttonAnnuler = wx.Button(self.panelFenp, ID_CANCEL, Lng.cancel, (330, 250), wx.DefaultSize)
self.imageLogo = wx.Bitmap(Variables.image_use)
self.canvasLogo = wx.StaticBitmap(self.panelFenp, -1, self.imageLogo, (30,65), wx.DefaultSize)
wx.EVT_BUTTON(self, ID_CANCEL, self.Cancel)
wx.EVT_BUTTON(self, ID_NEXT, self.Next)
def Cancel(self, event):
print("Canceled") #Indiquera à PlayOnLinux bash qu'il faut arreter l'instalaltion
self.Close()
def Next(self, event):
self.Close()
class Ok_message(wx.App): #instance principale classe application
def OnInit(self):
ok_boite = Ok_frame("PlayOnLinux")
ok_boite.Center(wx.BOTH)
ok_boite.Show(True)
self.SetTopWindow(ok_boite)
return True
ok_message = Ok_message() #création de l'application
ok_message.MainLoop()
| gpl-3.0 | 3,124,211,852,931,196,400 | 41.734043 | 149 | 0.698531 | false | 2.805168 | false | false | false | 0.029624 |
squidsoup/snapcraft | snapcraft/file_utils.py | 3 | 7787 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
import os
import shutil
import subprocess
import logging
from snapcraft.internal.errors import (
RequiredCommandFailure,
RequiredCommandNotFound,
RequiredPathDoesNotExist,
)
logger = logging.getLogger(__name__)
def replace_in_file(directory, file_pattern, search_pattern, replacement):
"""Searches and replaces patterns that match a file pattern.
:param str directory: The directory to look for files.
:param str file_pattern: The file pattern to match inside directory.
:param search_pattern: A re.compile'd pattern to search for within
matching files.
:param str replacement: The string to replace the matching search_pattern
with.
"""
for root, directories, files in os.walk(directory):
for file_name in files:
if file_pattern.match(file_name):
_search_and_replace_contents(os.path.join(root, file_name),
search_pattern, replacement)
def link_or_copy(source, destination, follow_symlinks=False):
"""Hard-link source and destination files. Copy if it fails to link.
Hard-linking may fail (e.g. a cross-device link, or permission denied), so
as a backup plan we just copy it.
:param str source: The source to which destination will be linked.
:param str destination: The destination to be linked to source.
:param bool follow_symlinks: Whether or not symlinks should be followed.
"""
try:
# Note that follow_symlinks doesn't seem to work for os.link, so we'll
# implement this logic ourselves using realpath.
source_path = source
if follow_symlinks:
source_path = os.path.realpath(source)
if not os.path.exists(os.path.dirname(destination)):
create_similar_directory(
os.path.dirname(source_path),
os.path.dirname(destination))
# Setting follow_symlinks=False in case this bug is ever fixed
# upstream-- we want this function to continue supporting NOT following
# symlinks.
os.link(source_path, destination, follow_symlinks=False)
except OSError:
shutil.copy2(source, destination, follow_symlinks=follow_symlinks)
uid = os.stat(source, follow_symlinks=follow_symlinks).st_uid
gid = os.stat(source, follow_symlinks=follow_symlinks).st_gid
try:
os.chown(destination, uid, gid, follow_symlinks=follow_symlinks)
except PermissionError as e:
logger.debug('Unable to chown {destination}: {error}'.format(
destination=destination, error=e))
def link_or_copy_tree(source_tree, destination_tree,
copy_function=link_or_copy):
"""Copy a source tree into a destination, hard-linking if possile.
:param str source_tree: Source directory to be copied.
:param str destination_tree: Destination directory. If this directory
already exists, the files in `source_tree`
will take precedence.
:param str boundary: Filesystem boundary no symlinks are allowed to cross.
"""
if not os.path.isdir(source_tree):
raise NotADirectoryError('{!r} is not a directory'.format(source_tree))
if (not os.path.isdir(destination_tree) and
os.path.exists(destination_tree)):
raise NotADirectoryError(
'Cannot overwrite non-directory {!r} with directory '
'{!r}'.format(destination_tree, source_tree))
create_similar_directory(source_tree, destination_tree)
for root, directories, files in os.walk(source_tree):
for directory in directories:
source = os.path.join(root, directory)
destination = os.path.join(
destination_tree, os.path.relpath(source, source_tree))
create_similar_directory(source, destination)
for file_name in files:
source = os.path.join(root, file_name)
destination = os.path.join(
destination_tree, os.path.relpath(source, source_tree))
copy_function(source, destination)
def create_similar_directory(source, destination, follow_symlinks=False):
"""Create a directory with the same permission bits and owner information.
:param str source: Directory from which to copy name, permission bits, and
owner information.
:param str destintion: Directory to create and to which the `source`
information will be copied.
:param bool follow_symlinks: Whether or not symlinks should be followed.
"""
stat = os.stat(source, follow_symlinks=follow_symlinks)
uid = stat.st_uid
gid = stat.st_gid
os.makedirs(destination, exist_ok=True)
try:
os.chown(destination, uid, gid, follow_symlinks=follow_symlinks)
except PermissionError as exception:
logger.debug('Unable to chown {}: {}'.format(destination, exception))
shutil.copystat(source, destination, follow_symlinks=follow_symlinks)
def _search_and_replace_contents(file_path, search_pattern, replacement):
# Don't bother trying to rewrite a symlink. It's either invalid or the
# linked file will be rewritten on its own.
if os.path.islink(file_path):
return
try:
with open(file_path, 'r+') as f:
try:
original = f.read()
except UnicodeDecodeError:
# This was probably a binary file. Skip it.
return
replaced = search_pattern.sub(replacement, original)
if replaced != original:
f.seek(0)
f.truncate()
f.write(replaced)
except PermissionError as e:
logger.warning('Unable to open {path} for writing: {error}'.format(
path=file_path, error=e))
def executable_exists(path):
"""Return True if 'path' exists and is readable and executable."""
return os.path.exists(path) and os.access(path, os.R_OK | os.X_OK)
@contextmanager
def requires_command_success(command, not_found_fmt=None, failure_fmt=None):
if isinstance(command, str):
cmd_list = command.split()
else:
raise TypeError('command must be a string.')
kwargs = dict(command=command, cmd_list=cmd_list)
try:
subprocess.check_call(
cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
if not_found_fmt is not None:
kwargs['fmt'] = not_found_fmt
raise RequiredCommandNotFound(**kwargs)
except subprocess.CalledProcessError:
if failure_fmt is not None:
kwargs['fmt'] = failure_fmt
raise RequiredCommandFailure(**kwargs)
yield
@contextmanager
def requires_path_exists(path, error_fmt=None):
if not os.path.exists(path):
kwargs = dict(path=path)
if error_fmt is not None:
kwargs['fmt'] = error_fmt
raise RequiredPathDoesNotExist(**kwargs)
yield
| gpl-3.0 | 5,294,693,437,406,654,000 | 37.171569 | 79 | 0.653525 | false | 4.30221 | false | false | false | 0 |
istresearch/readthedocs.org | readthedocs/comments/backend.py | 34 | 1098 | import json
from django.core import serializers
from sphinx.websupport.storage import StorageBackend
from .models import DocumentNode
from readthedocs.comments.models import NodeSnapshot
class DjangoStorage(StorageBackend):
"""
A Sphinx StorageBackend using Django.
"""
def get_metadata(self, docname, moderator=None):
ret_dict = {}
for node in DocumentNode.objects.filter(page=docname):
ret_dict[node.latest_hash()] = node.comments.count()
return ret_dict
def get_data(self, node_id, username, moderator=None):
try:
node = DocumentNode.objects.get(snapshots__hash=node_id)
except DocumentNode.DoesNotExist:
return None
ret_comments = []
for comment in node.comments.all():
json_data = json.loads(serializers.serialize("json", [comment]))[0]
fields = json_data['fields']
fields['pk'] = json_data['pk']
ret_comments.append(
fields
)
return {'source': '',
'comments': ret_comments}
| mit | -2,446,256,679,193,222,000 | 30.371429 | 79 | 0.615665 | false | 4.289063 | false | false | false | 0 |
FroggedTV/grenouilleAPI | backend/routes/stats.py | 1 | 15657 | import logging
import csv
import os
from io import StringIO
from flask import request, jsonify, send_file
from models import db, CSVData, DynamicConfiguration
from helpers.general import safe_json_loads
from helpers.endpoint import secure
from helpers.image_gen import ImageGenerator
def build_api_stats(app):
"""Factory to setup the routes for the stats api."""
ig = ImageGenerator(app)
@app.route('/api/stats/csv/get', methods=['GET'])
@secure(app, ['key', 'user'], ['stats_manage'])
def get_stats_csv(auth_token):
"""
@api {get} /api/stats/csv/get StatsCSVGet
@apiVersion 1.1.0
@apiName StatsCSVGet
@apiGroup Stats
@apiDescription Get CSV saved for stats.
@apiHeader {String} Authorization 'Bearer <Auth_Token>'
@apiError (Errors){String} AuthorizationHeaderInvalid Authorization Header is Invalid.
@apiError (Errors){String} AuthTokenExpired Token has expired, must be refreshed by client.
@apiError (Errors){String} AuthTokenInvalid Token is invalid, decode is impossible.
@apiError (Errors){String} ClientAccessImpossible This type of client can't access target endpoint.
@apiError (Errors){String} ClientAccessRefused Client has no scope access to target endpoint.
@apiParam {String} key CSV key to get.
@apiError (Errors){String} KeyInvalid key is not a valid string.
@apiError (Errors){String} KeyDataDoesntExist key has no data associated.
@apiSuccess {Number} csv CSVData associated to the key.
"""
data = safe_json_loads(request.args.get('data', '{}'))
# key check
key = data.get('key', 10)
if not isinstance(key, str) or len(key) <= 0:
return jsonify({'success': 'no',
'error': 'KeyInvalid',
'payload': {}
}), 200
csv_data = db.session.query(CSVData).filter(CSVData.key==key).one_or_none()
if csv_data is None:
return jsonify({'success': 'no',
'error': 'KeyDataDoesntExist',
'payload': {}
}), 200
else:
return jsonify({'success': 'yes',
'error': '',
'payload': {
'csv': csv_data.value
}
}), 200
@app.route('/api/stats/csv/update', methods=['POST'])
@secure(app, ['key', 'user'], ['stats_manage'])
def post_stats_csv(auth_token):
"""
@api {get} /api/stats/csv/update StatsCSVUpdate
@apiVersion 1.1.0
@apiName StatsCSVUpdate
@apiGroup Stats
@apiDescription Update CSV saved for stats.
@apiHeader {String} Authorization 'Bearer <Auth_Token>'
@apiError (Errors){String} AuthorizationHeaderInvalid Authorization Header is Invalid.
@apiError (Errors){String} AuthTokenExpired Token has expired, must be refreshed by client.
@apiError (Errors){String} AuthTokenInvalid Token is invalid, decode is impossible.
@apiError (Errors){String} ClientAccessImpossible This type of client can't access target endpoint.
@apiError (Errors){String} ClientAccessRefused Client has no scope access to target endpoint.
@apiParam {String} key CSV key to get.
@apiError (Errors){String} KeyInvalid key is not a valid string.
@apiParam {String} value CSV data.
@apiError (Errors){String} ValueInvalid value is not a valid string.
@apiError (Errors){String} ValueCSVInvalid value CSV is not a valid same length column csv.
"""
data = request.get_json(force=True)
# key check
key = data.get('key', 10)
if not isinstance(key, str) or len(key) <= 0:
return jsonify({'success': 'no',
'error': 'KeyInvalid',
'payload': {}
}), 200
# value check
value = data.get('value', 10)
if not isinstance(value, str) or len(value) <= 0:
return jsonify({'success': 'no',
'error': 'ValueInvalid',
'payload': {}
}), 200
columns = None
for row in csv.reader(StringIO(value), delimiter=','):
if columns is None:
columns = len(row)
else:
if len(row) != columns:
return jsonify({'success': 'no',
'error': 'ValueCSVInvalid',
'payload': {}
}), 200
CSVData.upsert(key, value)
return jsonify({'success': 'yes',
'error': '',
'payload': {}
}), 200
@app.route('/api/stats/img/<name>', methods=['GET'])
def get_stats_img(name):
path_file = os.path.join(app.config['IMG_GENERATE_PATH'], name + '.png')
if not os.path.isfile(path_file):
return send_file('static/img/stats_default.jpg')
else:
return send_file(path_file)
@app.route('/api/stats/csv/img/generate', methods=['POST'])
@secure(app, ['key', 'user'], ['stats_manage'])
def post_stats_img_generate(auth_token):
"""
@api {get} /api/stats/img/generate StatsCSVGenerateIMG
@apiVersion 1.1.0
@apiName StatsCSVGenerateIMG
@apiGroup Stats
@apiDescription Start the generation of CSV image.
@apiHeader {String} Authorization 'Bearer <Auth_Token>'
@apiError (Errors){String} AuthorizationHeaderInvalid Authorization Header is Invalid.
@apiError (Errors){String} AuthTokenExpired Token has expired, must be refreshed by client.
@apiError (Errors){String} AuthTokenInvalid Token is invalid, decode is impossible.
@apiError (Errors){String} ClientAccessImpossible This type of client can't access target endpoint.
@apiError (Errors){String} ClientAccessRefused Client has no scope access to target endpoint.
@apiParam {String} key CSV key to generate.
@apiError (Errors){String} KeyInvalid key is not a valid string.
@apiParam {Number} [payload] Optional payload to refine the generation with.
@apiError (Errors){String} OpenDotaNotReady Data is not ready on OpenDota.
"""
data = request.get_json(force=True)
# key check
key = data.get('key', 10)
if not isinstance(key, str) or len(key) <= 0:
return jsonify({'success': 'no',
'error': 'KeyInvalid',
'payload': {}
}), 200
payload = data.get('payload', {})
# Generate
result = ig.generate_image(key, payload)
if result:
return jsonify({'success': 'yes',
'error': '',
'payload': {}
}), 200
else:
return jsonify({'success': 'no',
'error': 'OpenDotaNotReady',
'payload': {}
}), 200
@app.route('/api/stats/scene/status/get', methods=['GET'])
@secure(app, ['key', 'user'], ['stats_manage_scene'])
def get_stats_scene_status(auth_token):
"""
@api {get} /api/stats/scene/status/get StatsSceneStatusGet
@apiVersion 1.1.0
@apiName StatsSceneStatusGet
@apiGroup Stats
@apiDescription Get the status of the stat scene.
@apiHeader {String} Authorization 'Bearer <Auth_Token>'
@apiError (Errors){String} AuthorizationHeaderInvalid Authorization Header is Invalid.
@apiError (Errors){String} AuthTokenExpired Token has expired, must be refreshed by client.
@apiError (Errors){String} AuthTokenInvalid Token is invalid, decode is impossible.
@apiError (Errors){String} ClientAccessImpossible This type of client can't access target endpoint.
@apiError (Errors){String} ClientAccessRefused Client has no scope access to target endpoint.
@apiSuccess {Boolean} activated Boolean to show if the stat scene is activated or disabled.
"""
stats_scene_status_dc = db.session.query(DynamicConfiguration).filter(DynamicConfiguration.key=='stats_scene_status').one_or_none()
if stats_scene_status_dc is None:
stats_scene_status = 'False'
else:
stats_scene_status = stats_scene_status_dc.value
return jsonify({'success': 'yes',
'error': '',
'payload': {
'activated': stats_scene_status == 'True'
}
}), 200
@app.route('/api/stats/scene/status/update', methods=['POST'])
@secure(app, ['key', 'user'], ['stats_manage_scene'])
def post_stats_scene_status(auth_token):
"""
@api {get} /api/stats/scene/status/update StatsSceneStatusUpdate
@apiVersion 1.1.0
@apiName StatsSceneStatusUpdate
@apiGroup Stats
@apiDescription Update the status of the stat scene.
@apiHeader {String} Authorization 'Bearer <Auth_Token>'
@apiError (Errors){String} AuthorizationHeaderInvalid Authorization Header is Invalid.
@apiError (Errors){String} AuthTokenExpired Token has expired, must be refreshed by client.
@apiError (Errors){String} AuthTokenInvalid Token is invalid, decode is impossible.
@apiError (Errors){String} ClientAccessImpossible This type of client can't access target endpoint.
@apiError (Errors){String} ClientAccessRefused Client has no scope access to target endpoint.
@apiParam {Boolean} activated New value of the stat scene status.
@apiError (Errors){String} ActivatedInvalid activated is not a valid boolean.
@apiSuccess {Boolean} activated Boolean to show if the stat scene is activated or disabled.
"""
data = request.get_json(force=True)
# activated check
activated = data.get('activated', False)
if not isinstance(activated, bool):
return jsonify({'success': 'no',
'error': 'ActivatedInvalid',
'payload': {}
}), 200
# change scene status
stats_scene_status_dc = db.session.query(DynamicConfiguration).filter(DynamicConfiguration.key=='stats_scene_status').one_or_none()
if stats_scene_status_dc is None:
stats_scene_status_dc = DynamicConfiguration('stats_scene_status', str(activated))
db.session.add(stats_scene_status_dc)
else:
stats_scene_status_dc.value = str(activated)
db.session.commit()
return jsonify({'success': 'yes',
'error': '',
'payload': {
'activated': stats_scene_status_dc.value == 'True'
}
}), 200
@app.route('/api/stats/scene/get', methods=['GET'])
def get_stats_scene():
"""
@api {get} /api/stats/scene/get StatsSceneGet
@apiVersion 1.1.0
@apiName StatsSceneGet
@apiGroup Stats
@apiDescription Get the stat image.
@apiSuccess {String} img Image to use in the stat scene.
@apiSuccess {String} last_modified Last time the file was modified.
@apiSuccess {Boolean} continue Should the stat scene user continue.
"""
stats_scene_dc = db.session.query(DynamicConfiguration).filter(DynamicConfiguration.key=='stats_scene').one_or_none()
if stats_scene_dc is None:
stats_scene = 'empty'
else:
stats_scene = stats_scene_dc.value
db.session.commit()
path_file = os.path.join(app.config['IMG_GENERATE_PATH'], stats_scene + '.png')
if not os.path.isfile(path_file):
last_modified = ''
else:
last_modified = os.path.getmtime(path_file)
# Give status inside
stats_scene_status_dc = db.session.query(DynamicConfiguration).filter(DynamicConfiguration.key=='stats_scene_status').one_or_none()
if stats_scene_status_dc is None:
stats_scene_status = 'False'
else:
stats_scene_status = stats_scene_status_dc.value
return jsonify({'success': 'yes',
'error': '',
'payload': {
'continue': stats_scene_status == 'True',
'img': stats_scene,
'last_modified': last_modified
}
}), 200
@app.route('/api/stats/scene/update', methods=['POST'])
@secure(app, ['key', 'user'], ['stats_manage_scene'])
def post_stats_scene(auth_token):
"""
@api {get} /api/stats/scene/update StatsSceneUpdate
@apiVersion 1.1.0
@apiName StatsSceneUpdate
@apiGroup Stats
@apiDescription Update the stat scene.
@apiHeader {String} Authorization 'Bearer <Auth_Token>'
@apiError (Errors){String} AuthorizationHeaderInvalid Authorization Header is Invalid.
@apiError (Errors){String} AuthTokenExpired Token has expired, must be refreshed by client.
@apiError (Errors){String} AuthTokenInvalid Token is invalid, decode is impossible.
@apiError (Errors){String} ClientAccessImpossible This type of client can't access target endpoint.
@apiError (Errors){String} ClientAccessRefused Client has no scope access to target endpoint.
@apiParam {String} img New scene.
@apiError (Errors){String} ImgInvalid img is not a valid string.
@apiError (Errors){String} ImgNoFile img is not a valid file image.
@apiSuccess {String} img Image to show appended with a cache bang.
@apiSuccess {String} last_modified Last time the file was modified.
"""
data = request.get_json(force=True)
# activated check
img = data.get('img', '')
if not isinstance(img, str) or len(img) <= 0:
return jsonify({'success': 'no',
'error': 'ImgInvalid',
'payload': {}
}), 200
# change scene
stats_scene_status_dc = db.session.query(DynamicConfiguration).filter(DynamicConfiguration.key=='stats_scene').one_or_none()
if stats_scene_status_dc is None:
stats_scene_status_dc = DynamicConfiguration('stats_scene', img)
db.session.add(stats_scene_status_dc)
# File look on disk
path_file = os.path.join(app.config['IMG_GENERATE_PATH'], img + '.png')
if not os.path.isfile(path_file):
return jsonify({'success': 'no',
'error': 'ImgNoFile',
'payload': {}
}), 200
last_modified = os.path.getmtime(path_file)
stats_scene_status_dc.value = img
db.session.commit()
return jsonify({'success': 'yes',
'error': '',
'payload': {
'img': stats_scene_status_dc.value,
'last_modified': last_modified
}
}), 200
| gpl-3.0 | 1,828,865,874,087,294,500 | 42.371191 | 139 | 0.568308 | false | 4.431644 | true | false | false | 0.003385 |
petecummings/django-blog-zinnia | zinnia/tests/utils.py | 7 | 2729 | """Utils for Zinnia's tests"""
try:
from urllib.parse import parse_qs
from urllib.parse import urlparse
from xmlrpc.client import Transport
except ImportError: # Python 2
from urlparse import parse_qs
from urlparse import urlparse
from xmlrpclib import Transport
from datetime import datetime as original_datetime
from django.utils import six
from django.conf import settings
from django.utils import timezone
from django.test.client import Client
from django.template.loaders.base import Loader
class TestTransport(Transport):
"""
Handles connections to XML-RPC server through Django test client.
"""
def __init__(self, *args, **kwargs):
Transport.__init__(self, *args, **kwargs)
self.client = Client()
def request(self, host, handler, request_body, verbose=0):
self.verbose = verbose
response = self.client.post(handler,
request_body,
content_type="text/xml")
res = six.BytesIO(response.content)
setattr(res, 'getheader', lambda *args: '') # For Python >= 2.7
res.seek(0)
return self.parse_response(res)
def omniscient_datetime(*args):
"""
Generating a datetime aware or naive depending of USE_TZ.
"""
d = original_datetime(*args)
if settings.USE_TZ:
d = timezone.make_aware(d, timezone.utc)
return d
datetime = omniscient_datetime
def is_lib_available(library):
"""
Check if a Python library is available.
"""
try:
__import__(library)
return True
except ImportError:
return False
def urlEqual(url_1, url_2):
"""
Compare two URLs with query string where
ordering does not matter.
"""
parse_result_1 = urlparse(url_1)
parse_result_2 = urlparse(url_2)
return (parse_result_1[:4] == parse_result_2[:4] and
parse_qs(parse_result_1[5]) == parse_qs(parse_result_2[5]))
class VoidLoader(Loader):
"""
Template loader which is always returning
an empty template.
"""
is_usable = True
_accepts_engine_in_init = True
def load_template_source(self, template_name, template_dirs=None):
return ('', 'voidloader:%s' % template_name)
class EntryDetailLoader(Loader):
"""
Template loader which only return the content
of an entry detail template.
"""
is_usable = True
_accepts_engine_in_init = True
def load_template_source(self, template_name, template_dirs=None):
return ('<html><head><title>{{ object.title }}</title></head>'
'<body>{{ object.html_content|safe }}</body></html>',
'entrydetailloader:%s' % template_name)
| bsd-3-clause | 8,142,980,033,508,062,000 | 27.134021 | 72 | 0.632833 | false | 4.019146 | false | false | false | 0.000366 |
dirn/ansible | lib/ansible/plugins/strategies/linear.py | 3 | 12455 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
from ansible.template import Templar
from ansible.utils.debug import debug
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
lowest_cur_block = len(iterator._blocks)
for (k, v) in host_tasks.iteritems():
if v is None:
continue
(s, t) = v
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
for host in hosts:
(s, t) = host_tasks[host.name]
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
#if new_t != t:
# raise AnsibleError("iterator error, wtf?")
rvals.append((host, t))
else:
rvals.append((host, noop_task))
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
debug("getting the remaining hosts for this loop")
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
for (host, task) in host_tasks:
if not task:
continue
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run():
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
continue
elif meta_action == 'flush_handlers':
self.run_handlers(iterator, play_context)
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
else:
debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
debug("done getting variables")
if not callback_sent:
temp_task = task.copy()
temp_task.name = templar.template(temp_task.get_name(), fail_on_undefined=False)
self._tqm.send_callback('v2_playbook_on_task_start', temp_task, is_conditional=False)
callback_sent = True
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
# if we're bypassing the host loop, break out now
if run_once:
break
debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
debug("out of hosts to run on")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError, e:
return False
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
# FIXME: callback here?
print(e)
continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
debug("results queue empty")
except (IOError, EOFError), e:
debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 | -4,817,692,257,264,442,000 | 45.823308 | 179 | 0.548936 | false | 4.663048 | false | false | false | 0.003292 |
redhatrises/freeipa | ipaclient/remote_plugins/2_114/sudocmd.py | 8 | 10873 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Sudo Commands
Commands used as building blocks for sudo
EXAMPLES:
Create a new command
ipa sudocmd-add --desc='For reading log files' /usr/bin/less
Remove a command
ipa sudocmd-del /usr/bin/less
""")
register = Registry()
@register()
class sudocmd(Object):
takes_params = (
parameters.Str(
'sudocmd',
primary_key=True,
label=_(u'Sudo Command'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
doc=_(u'A description of this command'),
),
parameters.Str(
'memberof_sudocmdgroup',
required=False,
label=_(u'Sudo Command Groups'),
),
)
@register()
class sudocmd_add(Method):
__doc__ = _("Create new Sudo Command.")
takes_args = (
parameters.Str(
'sudocmd',
cli_name='command',
label=_(u'Sudo Command'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'A description of this command'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class sudocmd_del(Method):
__doc__ = _("Delete Sudo Command.")
takes_args = (
parameters.Str(
'sudocmd',
multivalue=True,
cli_name='command',
label=_(u'Sudo Command'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class sudocmd_find(Method):
__doc__ = _("Search for Sudo Commands.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'sudocmd',
required=False,
cli_name='command',
label=_(u'Sudo Command'),
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'A description of this command'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("command")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class sudocmd_mod(Method):
__doc__ = _("Modify Sudo Command.")
takes_args = (
parameters.Str(
'sudocmd',
cli_name='command',
label=_(u'Sudo Command'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'A description of this command'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class sudocmd_show(Method):
__doc__ = _("Display Sudo Command.")
takes_args = (
parameters.Str(
'sudocmd',
cli_name='command',
label=_(u'Sudo Command'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
| gpl-3.0 | 8,469,646,051,195,053,000 | 26.596447 | 162 | 0.493792 | false | 4.522879 | false | false | false | 0.001747 |
GeoNode/geonode | geonode/people/signals.py | 3 | 5613 | #########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""Signal handlers pertaining to the people app
Some of these signals deal with authentication related workflows.
"""
import logging
import traceback
from uuid import uuid1
from allauth.account.models import EmailAddress
from django.contrib.auth import get_user_model
from django.db import IntegrityError
from django.db.models import Q
from geonode.base.auth import (
get_or_create_token,
delete_old_tokens,
set_session_token,
remove_session_token)
from geonode.groups.models import GroupProfile
from geonode.groups.conf import settings as groups_settings
from geonode.notifications_helper import send_notification
from .adapters import get_data_extractor
logger = logging.getLogger(__name__)
def _add_user_to_registered_members(user):
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME:
group_name = groups_settings.REGISTERED_MEMBERS_GROUP_NAME
groupprofile = GroupProfile.objects.filter(slug=group_name).first()
if groupprofile:
groupprofile.join(user)
def _remove_user_from_registered_members(user):
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME:
group_name = groups_settings.REGISTERED_MEMBERS_GROUP_NAME
groupprofile = GroupProfile.objects.filter(slug=group_name).first()
if groupprofile:
groupprofile.leave(user)
def do_login(sender, user, request, **kwargs):
"""
Take action on user login. Generate a new user access_token to be shared
with GeoServer, and store it into the request.session
"""
if user and user.is_authenticated:
token = None
try:
token = get_or_create_token(user)
except Exception:
u = uuid1()
token = u.hex
tb = traceback.format_exc()
logger.debug(tb)
set_session_token(request.session, token)
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_AT == 'login':
_add_user_to_registered_members(user)
def do_logout(sender, user, request, **kwargs):
if 'access_token' in request.session:
try:
delete_old_tokens(user)
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
remove_session_token(request.session)
request.session.modified = True
def update_user_email_addresses(sender, **kwargs):
sociallogin = kwargs["sociallogin"]
user = sociallogin.user
extractor = get_data_extractor(sociallogin.account.provider)
try:
sociallogin_email = extractor.extract_email(
sociallogin.account.extra_data)
except NotImplementedError:
sociallogin_email = None
if sociallogin_email is not None:
try:
EmailAddress.objects.add_email(
request=None, user=user, email=sociallogin_email, confirm=False)
except IntegrityError:
logging.exception(msg=f"Could not add email address {sociallogin_email} to user {user}")
def notify_admins_new_signup(sender, **kwargs):
staff = get_user_model().objects.filter(Q(is_active=True) & (Q(is_staff=True) | Q(is_superuser=True)))
send_notification(
users=staff,
label="account_approve",
extra_context={"from_user": kwargs["user"]}
)
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_AT == 'registration':
_add_user_to_registered_members(kwargs["user"])
def profile_post_save(instance, sender, **kwargs):
"""
Make sure the user belongs by default to the anonymous and contributors groups.
This will make sure that anonymous and contributors permissions will be granted to the new users.
"""
from django.contrib.auth.models import Group
created = kwargs.get('created', False)
if created:
anon_group, _ = Group.objects.get_or_create(name='anonymous')
instance.groups.add(anon_group)
is_anonymous = instance.username == 'AnonymousUser'
if Group.objects.filter(name='contributors').count() and not (instance.is_staff or instance.is_superuser or is_anonymous):
cont_group = Group.objects.get(name='contributors')
instance.groups.add(cont_group)
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_AT == 'activation':
became_active = instance.is_active and (not instance._previous_active_state or created)
if became_active:
_add_user_to_registered_members(instance)
elif not instance.is_active:
_remove_user_from_registered_members(instance)
# do not create email, when user-account signup code is in use
if getattr(instance, '_disable_account_creation', False):
return
| gpl-3.0 | -5,892,325,385,097,922,000 | 35.448052 | 130 | 0.676109 | false | 3.992176 | false | false | false | 0.002138 |
boustrophedon/honey-badger-attack | honeybadger/Systems/InputSystem.py | 1 | 2447 | import pygame
from honeybadger.ecs.System import System
from honeybadger.Components import *
from syst_util import remove_dead
class InputSystem(System):
def __init__(self, world):
super(InputSystem, self).__init__(world)
#pygame.key.set_repeat(5,5)
self.pressed = dict()
self.lastup = dict() #doubletap detection
self.world.subscribe_event('KeyDown', self.onKeyDown)
self.world.subscribe_event('KeyUp', self.onKeyUp)
def update(self, dt):
# I think this essentially does pygame.key.set_repeat
# rather than sending the keydown i should "do the thing inside the corresponding if statement" for that key
# so i can distinguish between physically pressing the key and having it be pressed
for key in self.pressed.keys(): #haha keys
self.press_key(key)
def press_key(self, key):
# should really just have a keyhandlers.py and then hook the functions in
# maybe using python magic to do it automatically
# or a KeyHandler class with an instance for KeyDown and KeyUp?
# some time after above: i think i still need a ControlScheme class or something to map between keys and what they do
# essentially a dict that has callbacks for various keypresses
# actually two dicts:
# one "function description (eg 'jump') --> key" for remapping purposes
# two "key --> callback"
self.pressed[key] = True
if (key == pygame.K_ESCAPE):
#self.world.send_event("Quit")
self.world.stop = True # above would probably be better, but I don't know where to handle it
elif (key == pygame.K_UP):
if (pygame.time.get_ticks() - self.lastup.get(key, -100) < 100):
self.world.send_event("PlayerAttack")
self.lastup.pop(key)
else:
self.world.send_event("MovePlayer", dir=(0,-1))
elif (key == pygame.K_DOWN):
self.world.send_event("MovePlayer", dir=(0,1))
elif (key == pygame.K_LEFT):
self.world.send_event("MovePlayer", dir=(-1,0))
elif (key == pygame.K_RIGHT):
self.world.send_event("MovePlayer", dir=(1,0))
elif (key == pygame.K_SPACE):
self.world.send_event("MobFireLaser", mob=None)
self.pressed.pop(key)
elif (key == pygame.K_y):
print(self.world.clock.get_fps())
self.pressed.pop(key)
def lift_key(self, key):
if self.pressed.get(key, False):
self.pressed.pop(key)
self.lastup[key] = pygame.time.get_ticks()
def onKeyDown(self, event_type, event):
self.press_key(event.key)
def onKeyUp(self, event_type, event):
self.lift_key(event.key)
| mit | 592,583,236,834,326,800 | 31.626667 | 119 | 0.70045 | false | 3.070263 | false | false | false | 0.028606 |
uwcirg/true_nth_usa_portal | portal/models/qbd.py | 1 | 2760 | """QBD (Questionnaire Bank Details) Module"""
from ..date_tools import FHIR_datetime
class QBD(object):
"""Details needed to define a QB"""
def __init__(
self, relative_start, iteration, recur=None, recur_id=None,
questionnaire_bank=None, qb_id=None):
"""Hold details needed to uniquely define a QB visit
For db objects ``questionnaire_bank`` and ``recur``, provide either
the id or object version of each, not both. If the other is requsted,
it'll be looked up and cached.
:param relative_start: UTC datetime value marking start point for QBD
:param iteration: None w/o a recurrence, otherwise a zero indexed int
:param recur: If the qb has one or more recurrences, set to the correct
recurrence, or alternatively pass a ``recur_id`` value.
:param recur_id: foreign key value for recur, if object not at hand
:param questionnaire_bank: The QB for the QBD, or alternatively pass
a ``qb_id`` value.
:param qb_id: foreign key value for the questionnaire_bank
"""
if recur and recur_id:
raise ValueError("expect *either* recur itself or id, not both")
if questionnaire_bank and qb_id:
raise ValueError("expect *either* QB itself or id, not both")
self.relative_start = relative_start
self.iteration = iteration
self._recur = recur
self.recur_id = recur.id if recur else recur_id
self._questionnaire_bank = questionnaire_bank
self.qb_id = questionnaire_bank.id if questionnaire_bank else qb_id
@property
def recur(self):
from .recur import Recur
if not self._recur and self.recur_id is not None:
self._recur = Recur.query.get(self.recur_id)
return self._recur
@property
def questionnaire_bank(self):
from .questionnaire_bank import QuestionnaireBank
if not self._questionnaire_bank and self.qb_id is not None:
self._questionnaire_bank = QuestionnaireBank.query.get(self.qb_id)
return self._questionnaire_bank
@questionnaire_bank.setter
def questionnaire_bank(self, qb):
self.qb_id = qb.id
self._questionnaire_bank = qb
def as_json(self):
from ..models.questionnaire_bank import visit_name
results = {}
results['questionnaire_bank'] = (
self.questionnaire_bank.as_json()
if self.questionnaire_bank else None)
results['relative_start'] = (
FHIR_datetime.as_fhir(self.relative_start)
if self.relative_start else None)
results['iteration'] = self.iteration
results['visit'] = visit_name(self)
return results
| bsd-3-clause | -3,123,563,303,063,146,000 | 39 | 79 | 0.638043 | false | 3.887324 | false | false | false | 0 |
lukius/wifi-deauth | attack/impl.py | 1 | 6834 | import logging
import random
import time
from itertools import chain
from scapy.all import sendp, RadioTap, Dot11, Dot11ProbeResp,\
Dot11Deauth, Dot11Disas
from sniffer import WiFiSniffer
from utils import ChannelFinder, WiFiInterface
class WiFiDeauthAttack(object):
INITIAL_SEQ_NUMBER = 0
PACKETS_PER_PROBE = 1
NUM_PROBES = 50
DEFAULT_DEAUTH_REASON = 3
WIFI_BROADCAST_ADDRESS = 'ff:ff:ff:ff:ff:ff'
def __init__(self, interface, bssid):
self.interface = WiFiInterface(interface)
self.bssid = bssid.lower()
def run(self, executions, persistence_times):
# First, retrieve the channel used by the target AP in order to
# configure the wireless interface so it can inject deauth packets.
self._log('Finding channel in use by AP %s...' % self.bssid)
channel = ChannelFinder(self.interface, self.bssid).find()
self._log('Done. Using channel %d.' % channel)
self.interface.set_channel(channel)
# Finally, run the attack as many times as requested.
message = 'Running attack: iteration %d.'
self._log(message % 1)
self._do_run()
for i in range(executions-1):
idle_time = random.randint(*persistence_times)
self._log('Retrying again in %d seconds.' % idle_time)
time.sleep(idle_time)
self._log(message % (i+2))
self._do_run()
self._log('Done!')
def _log(self, message):
logging.log(logging.INFO, message)
def _build_packet(self, seq, source, dest, body):
encoded_seq = seq << 4
return RadioTap()\
/ Dot11(SC=encoded_seq, addr1=dest, addr2=source,
addr3=self.bssid)\
/ body
def _build_deauth_packet(self, seq, source, dest):
body = Dot11Deauth(reason=self.DEFAULT_DEAUTH_REASON)
return self._build_packet(seq, source, dest, body)
def _build_disas_packet(self, seq, source, dest):
body = Dot11Disas(reason=self.DEFAULT_DEAUTH_REASON)
return self._build_packet(seq, source, dest, body)
def _replicate_and_send(self, packet1, packet2):
packets = [(packet1, packet2)
for _ in range(self.PACKETS_PER_PROBE)]
packets = list(chain.from_iterable(packets))
self._send(packets)
def _send(self, packets):
sendp(packets, iface=self.interface.get_name(), verbose=0)
def _do_run(self):
raise NotImplementedError
class FixedClientDeauthAttack(WiFiDeauthAttack):
'''This attack injects two deauthentication packets coming from and sent to
each of the client MAC addresses provided as argument. This is then retried
as many times as NUM_PROBES specifies.'''
def __init__(self, interface, bssid, clients):
super(FixedClientDeauthAttack, self).__init__(interface, bssid)
self.clients = clients
def _deauth_client(self, client, seq):
client_packet = self._build_deauth_packet(seq,
source=client,
dest=self.bssid)
ap_packet = self._build_deauth_packet(seq,
source=self.bssid,
dest=client)
self._replicate_and_send(client_packet, ap_packet)
def _do_run(self):
msg = 'Injecting deauth packets for client %s using SN=%d...'
for seq in xrange(self.INITIAL_SEQ_NUMBER,
self.INITIAL_SEQ_NUMBER+self.NUM_PROBES):
for client in self.clients:
self._log(msg % (client, seq))
self._deauth_client(client, seq)
class GlobalDisassociationAttack(WiFiDeauthAttack):
'''This attack will inject broadcast disassociation and deauthentication
packets having as source the BSSID provided. However, it is not as
effective as the standard deauth attack against a single target.'''
def _do_run(self):
msg = 'Injecting disassociation and deauth packets sent to broadcast address %s...' %\
self.WIFI_BROADCAST_ADDRESS
self._log(msg)
for seq in xrange(self.INITIAL_SEQ_NUMBER,
self.INITIAL_SEQ_NUMBER+self.NUM_PROBES):
deauth_packet = self._build_deauth_packet(seq, source=self.bssid,
dest=self.WIFI_BROADCAST_ADDRESS)
disas_packet = self._build_disas_packet(seq, source=self.bssid,
dest=self.WIFI_BROADCAST_ADDRESS)
self._replicate_and_send(deauth_packet, disas_packet)
class SniffedClientDeauthAttack(WiFiDeauthAttack):
'''This attack consists in sniffing the network so as to retrieve MAC
addresses of potential clients of the given access point. Once this is
done, a standard fixed deauth attack against every client will be run.'''
def __init__(self, interface, bssid, timeout):
super(SniffedClientDeauthAttack, self).__init__(interface, bssid)
self.timeout = timeout
self.clients = set()
def _is_valid(self, address):
# Filter client addresses by discarding broadcast addresses as well as
# AP address occurrences (also discarding null entries since Scapy
# fills with None those missing addresses).
address = address.lower()
return address is not None and\
address != self.WIFI_BROADCAST_ADDRESS and\
address != self.bssid
def _get_client_addresses(self):
sniffer = WiFiSniffer(self.interface)
packets = sniffer.sniff(timeout=self.timeout,
lfilter=lambda pkt: not pkt.haslayer(Dot11ProbeResp) and\
pkt.addr3 == self.bssid)
clients = set()
for packet in packets:
if self._is_valid(packet.addr1):
clients.add(packet.addr1)
if self._is_valid(packet.addr2):
clients.add(packet.addr2)
return clients
def _do_run(self):
# First get client addresses by sniffing the network. Avoid computing
# this if it was already done in previous executions.
if not self.clients:
self._log('Sniffing network...')
self.clients = self._get_client_addresses()
self._log('Done. Found %d clients.' % len(self.clients))
# Now launch the attack against these clients.
attack = FixedClientDeauthAttack(self.interface, self.bssid, self.clients)
attack._do_run() | mit | 5,340,904,551,404,415,000 | 38.508671 | 97 | 0.592186 | false | 4.089767 | false | false | false | 0.005121 |
DraXus/andaluciapeople | atom/core.py | 26 | 15218 | #!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# TODO: handle UTF-8 and unicode as done in src/atom/__init__.py
__author__ = '[email protected] (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
class XmlElement(object):
_qname = None
_other_elements = None
_other_attributes = None
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1]))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1]))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version)
return new_tree
def _attach_members(self, tree, version=1):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
"""
qname, elements, attributes = self.__class__._get_rules(version)
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
tree.attrib[key] = value
if self.text:
tree.text = self.text
def to_string(self, version=1):
"""Converts this object to XML."""
return ElementTree.tostring(self._to_tree(version))
ToString = to_string
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def xml_element_from_string(xml_string, target_class,
version=1, encoding='UTF-8'):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
"""
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == target_class._qname:
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value
| agpl-3.0 | 7,917,675,708,619,230,000 | 37.526582 | 80 | 0.658234 | false | 4.169315 | false | false | false | 0.01058 |
chemelnucfin/tensorflow | tensorflow/contrib/session_bundle/example/export_half_plus_two.py | 75 | 6029 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a toy linear regression inference graph.
Exports a TensorFlow graph to /tmp/half_plus_two/ based on the Exporter
format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise Session
loading and execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
FLAGS = None
def Export(export_dir, use_checkpoint_v2):
with tf.Session() as sess:
# Make model parameters a&b variables instead of constants to
# exercise the variable reloading mechanisms.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Create a placeholder for serialized tensorflow.Example messages to be fed.
serialized_tf_example = tf.placeholder(tf.string, name="tf_example")
# Parse the tensorflow.Example looking for a feature named "x" with a single
# floating point value.
feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
# Use tf.identity() to assign name
x = tf.identity(tf_example["x"], name="x")
# Calculate, y = a*x + b
y = tf.add(tf.multiply(a, x), b, name="y")
# Setup a standard Saver for our variables.
save = tf.train.Saver(
{
"a": a,
"b": b
},
sharded=True,
write_version=tf.train.SaverDef.V2 if use_checkpoint_v2 else
tf.train.SaverDef.V1)
# asset_path contains the base directory of assets used in training (e.g.
# vocabulary files).
original_asset_path = tf.constant("/tmp/original/export/assets")
# Ops reading asset files should reference the asset_path tensor
# which stores the original asset path at training time and the
# overridden assets directory at restore time.
asset_path = tf.Variable(original_asset_path,
name="asset_path",
trainable=False,
collections=[])
assign_asset_path = asset_path.assign(original_asset_path)
# Use a fixed global step number.
global_step_tensor = tf.Variable(123, name="global_step")
# Create a RegressionSignature for our input and output.
regression_signature = exporter.regression_signature(
input_tensor=serialized_tf_example,
# Use tf.identity here because we export two signatures here.
# Otherwise only graph for one of the signatures will be loaded
# (whichever is created first) during serving.
output_tensor=tf.identity(y))
named_graph_signature = {
"inputs": exporter.generic_signature({"x": x}),
"outputs": exporter.generic_signature({"y": y})
}
# Create two filename assets and corresponding tensors.
# TODO(b/26254158) Consider adding validation of file existence as well as
# hashes (e.g. sha1) for consistency.
original_filename1 = tf.constant("hello1.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)
filename1 = tf.Variable(original_filename1,
name="filename1",
trainable=False,
collections=[])
assign_filename1 = filename1.assign(original_filename1)
original_filename2 = tf.constant("hello2.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename2)
filename2 = tf.Variable(original_filename2,
name="filename2",
trainable=False,
collections=[])
assign_filename2 = filename2.assign(original_filename2)
# Init op contains a group of all variables that we assign.
init_op = tf.group(assign_asset_path, assign_filename1, assign_filename2)
# CopyAssets is used as a callback during export to copy files to the
# given export directory.
def CopyAssets(filepaths, export_path):
print("copying asset files to: %s" % export_path)
for filepath in filepaths:
print("copying asset file: %s" % filepath)
# Run an export.
tf.global_variables_initializer().run()
export = exporter.Exporter(save)
export.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=regression_signature,
named_graph_signatures=named_graph_signature,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
assets_callback=CopyAssets)
export.export(export_dir, global_step_tensor, sess)
def main(_):
Export(FLAGS.export_dir, FLAGS.use_checkpoint_v2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--export_dir",
type=str,
default="/tmp/half_plus_two",
help="Directory where to export inference model."
)
parser.add_argument(
"--use_checkpoint_v2",
type="bool",
nargs="?",
const=True,
default=False,
help="If true, write v2 checkpoint files.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | -3,706,325,346,188,387,300 | 36.447205 | 80 | 0.655664 | false | 3.969059 | false | false | false | 0.002156 |
joshuajan/odoo | openerp/osv/osv.py | 43 | 1371 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import except_orm, Model, TransientModel, AbstractModel
# Deprecated, kept for backward compatibility.
# openerp.exceptions.Warning should be used instead.
except_osv = except_orm
# Deprecated, kept for backward compatibility.
osv = Model
osv_memory = TransientModel
osv_abstract = AbstractModel # ;-)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,774,787,961,518,786,000 | 40.545455 | 78 | 0.652808 | false | 4.366242 | false | false | false | 0.000729 |
MKRoughDiamond/graduate-adventure | backend/graduate/settings.py | 5 | 4554 | # Copyright (c) 2016, Shin DongJin. See the LICENSE file
# at the top-level directory of this distribution and at
# https://github.com/LastOne817/graduate-adventure/blob/master/LICENSE
#
# Licensed under the MIT license <http://opensource.org/licenses/MIT>.
# This file may not be copied, modified, or distributed except according
# to those terms.
"""
Django settings for graduate project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from .secret import SECRET_KEY
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'core',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'graduate.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'graduate.wsgi.application'
# REST framework settings
REST_FRAMEWORK = {
'PAGE_SIZE': 10
}
# Using file-based session
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'TEST': {
'NAME': os.path.join(BASE_DIR, 'test_sample', 'db.sqlite3'),
},
},
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Logging
# LOG_DIR = os.path.join(BASE_DIR, 'log')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s: %(message)s'
},
},
'handlers': {
'backend': {
'class': 'logging.FileHandler',
'level': 'INFO',
'formatter': 'simple',
'filename': os.path.join(BASE_DIR, 'backend.log'),
},
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'simple',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
'backend': {
'level': 'INFO',
'handlers': ['backend', 'console'],
},
},
'root': {
'level': 'INFO',
'handler': ['backend', 'console'],
},
}
| mit | -4,321,784,457,970,073,000 | 24.160221 | 91 | 0.633729 | false | 3.597156 | false | false | false | 0.000878 |
JorgeDeLosSantos/NanchiPlot | nanchi/uimpl.py | 1 | 3881 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
import matplotlib.lines as lines
import numpy as np
import wx
class ZoomRectangle(object):
def __init__(self,figure,axes,canvas):
self.canvas = canvas
self.figure = figure
self.axes = axes
self.cline = lines.Line2D([],[], color="#00ff00", ls="--")
def connect(self):
print "connect: ",self.canvas,self.figure,self.axes
self.btpress = self.canvas.mpl_connect("button_press_event", self.on_press)
self.btrelease = self.canvas.mpl_connect("button_release_event", self.on_release)
print self.btpress, self.btrelease
self.axes.add_line(self.cline)
def on_motion(self,event):
self.cline.set_xdata([])
self.cline.set_ydata([])
# ---
self.x = event.xdata
self.y = event.ydata
# ---
xdata = [self.x0, self.x0, self.x, self.x, self.x0]
ydata = [self.y0, self.y, self.y, self.y0, self.y0]
# ---
self.cline.set_xdata(xdata)
self.cline.set_ydata(ydata)
# ---
self.canvas.draw()
def on_press(self,event):
#~ print "Press"
self.x0 = event.xdata
self.y0 = event.ydata
self.motion = self.canvas.mpl_connect("motion_notify_event", self.on_motion)
def on_release(self,event):
"Release"
self.canvas.mpl_disconnect(self.motion)
self.canvas.mpl_disconnect(self.btpress)
self.canvas.mpl_disconnect(self.btrelease)
min_x = min([self.x0, self.x])
max_x = max([self.x0, self.x])
min_y = min([self.y0, self.y])
max_y = max([self.y0, self.y])
self.axes.set_xlim(min_x, max_x)
self.axes.set_ylim(min_y, max_y)
self.canvas.draw()
class FigureCanvas(FigureCanvasWxAgg):
def __init__(self,parent,id,figure,**kwargs):
FigureCanvasWxAgg.__init__(self,parent=parent, id=id, figure=figure,**kwargs)
self.figure = figure
self.axes = self.figure.get_axes()[0]
def disconnect_all(self):
try:
self.mpl_disconnect(self.motion)
self.mpl_disconnect(self.btpress)
self.mpl_disconnect(self.btrelease)
except:
pass
def zoomit(self):
self.cline = lines.Line2D([],[], color="#ff00ff", ls="--", lw=2.0)
self.btpress = self.mpl_connect("button_press_event", self.on_press)
self.btrelease = self.mpl_connect("button_release_event", self.on_release)
self.axes.add_line(self.cline)
def on_motion(self,event):
self.cline.set_xdata([])
self.cline.set_ydata([])
# ---
self.x = event.xdata
self.y = event.ydata
# ---
xdata = [self.x0, self.x0, self.x, self.x, self.x0]
ydata = [self.y0, self.y, self.y, self.y0, self.y0]
# ---
self.cline.set_xdata(xdata)
self.cline.set_ydata(ydata)
# ---
self.draw()
def on_press(self,event):
self.x0 = event.xdata
self.y0 = event.ydata
self.motion = self.mpl_connect("motion_notify_event", self.on_motion)
def on_release(self,event):
self.disconnect_all()
try:
self.cline.remove() # Delete box
except:
self.stop_event_loop()
min_x = min([self.x0, self.x])
max_x = max([self.x0, self.x])
min_y = min([self.y0, self.y])
max_y = max([self.y0, self.y])
self.axes.set_xlim(min_x, max_x)
self.axes.set_ylim(min_y, max_y)
self.draw()
if __name__ == '__main__':
plt.plot([1,2,3,12,1,3])
fig = plt.gcf()
ax = plt.gca()
zr = ZoomRectangle(fig,ax,fig.canvas)
zr.connect()
plt.show()
| mit | 5,342,352,163,705,054,000 | 31.07438 | 89 | 0.558361 | false | 3.188989 | false | false | false | 0.013141 |
siliconsmiley/QGIS | python/plugins/db_manager/info_viewer.py | 6 | 5477 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import Qt, SIGNAL
from PyQt4.QtGui import QTextBrowser, QApplication
from .db_plugins.plugin import BaseError, DbError, DBPlugin, Schema, Table
from .dlg_db_error import DlgDbError
class InfoViewer(QTextBrowser):
def __init__(self, parent=None):
QTextBrowser.__init__(self, parent)
self.setOpenLinks(False)
self.item = None
self.dirty = False
self._clear()
self._showPluginInfo()
self.connect(self, SIGNAL("anchorClicked(const QUrl&)"), self._linkClicked)
def _linkClicked(self, url):
if self.item is None:
return
if url.scheme() == "action":
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
if self.item.runAction(url.path()):
self.refresh()
except BaseError, e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
def refresh(self):
self.setDirty(True)
self.showInfo(self.item)
def showInfo(self, item):
if item == self.item and not self.dirty:
return
self._clear()
if item is None:
return
if isinstance(item, DBPlugin):
self._showDatabaseInfo(item)
elif isinstance(item, Schema):
self._showSchemaInfo(item)
elif isinstance(item, Table):
self._showTableInfo(item)
else:
return
self.item = item
self.connect(self.item, SIGNAL('aboutToChange'), self.setDirty)
def setDirty(self, val=True):
self.dirty = val
def _clear(self):
if self.item is not None:
self.disconnect(self.item, SIGNAL('aboutToChange'), self.setDirty)
self.item = None
self.dirty = False
self.item = None
self.setHtml("")
def _showPluginInfo(self):
from .db_plugins import getDbPluginErrors
html = u'<div style="background-color:#ffffcc;"><h1> ' + self.tr("DB Manager") + '</h1></div>'
html += '<div style="margin-left:8px;">'
for msg in getDbPluginErrors():
html += u"<p>%s" % msg
self.setHtml(html)
def _showDatabaseInfo(self, connection):
html = u'<div style="background-color:#ccffcc;"><h1> %s</h1></div>' % connection.connectionName()
html += '<div style="margin-left:8px;">'
try:
if connection.database() is None:
html += connection.info().toHtml()
else:
html += connection.database().info().toHtml()
except DbError, e:
html += u'<p style="color:red">%s</p>' % unicode(e).replace('\n', '<br>')
html += '</div>'
self.setHtml(html)
def _showSchemaInfo(self, schema):
html = u'<div style="background-color:#ffcccc;"><h1> %s</h1></div>' % schema.name
html += '<div style="margin-left:8px;">'
try:
html += schema.info().toHtml()
except DbError, e:
html += u'<p style="color:red">%s</p>' % unicode(e).replace('\n', '<br>')
html += "</div>"
self.setHtml(html)
def _showTableInfo(self, table):
html = u'<div style="background-color:#ccccff"><h1> %s</h1></div>' % table.name
html += '<div style="margin-left:8px;">'
try:
html += table.info().toHtml()
except DbError, e:
html += u'<p style="color:red">%s</p>' % unicode(e).replace('\n', '<br>')
html += '</div>'
self.setHtml(html)
return True
def setHtml(self, html):
# convert special tags :)
html = unicode(html).replace('<warning>', '<img src=":/db_manager/warning"> ')
# add default style
html = u"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<style type="text/css">
.section { margin-top: 25px; }
table.header th { background-color: #dddddd; }
table.header td { background-color: #f5f5f5; }
table.header th, table.header td { padding: 0px 10px; }
table td { padding-right: 20px; }
.underline { text-decoration:underline; }
</style>
</head>
<body>
%s <br>
</body>
</html>
""" % html
# print ">>>>>\n", html, "\n<<<<<<"
return QTextBrowser.setHtml(self, html)
| gpl-2.0 | 3,801,272,692,163,495,000 | 31.993976 | 110 | 0.507942 | false | 4.012454 | false | false | false | 0.002739 |
c2corg/v6_api | c2corg_api/scripts/redis-flushdb.py | 1 | 1456 | """ A script to call `flushdb` on the Redis database that is used as cache.
Note that all keys from the Redis database are deleted. If the database is
shared with other instances or applications, the keys of these will also be
removed.
"""
import logging
import os
import sys
from pyramid.paster import get_appsettings, setup_logging
from pyramid.scripts.common import parse_vars
from redis.client import Redis
from redis.connection import ConnectionPool
log = logging.getLogger('c2corg_api.redis_flushdb')
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
logging.getLogger('c2corg_api').setLevel(logging.INFO)
redis_url = '{0}?db={1}'.format(
settings['redis.url'], settings['redis.db_cache'])
log.info('Cache Redis: {0}'.format(redis_url))
# we don't really need a connection pool here, but the `from_url`
# function is convenient
redis_pool = ConnectionPool.from_url(redis_url, max_connections=1)
# remove all keys from the database
r = Redis(connection_pool=redis_pool)
r.flushdb()
log.info('Flushed cache')
if __name__ == "__main__":
main()
| agpl-3.0 | 4,112,885,119,490,071,000 | 27 | 75 | 0.682005 | false | 3.450237 | false | false | false | 0 |
adamtiger/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/inline.py | 85 | 1138 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inline bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.distributions.python.ops.bijectors.inline_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["Inline"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 556,204,707,703,441,860 | 38.241379 | 80 | 0.714411 | false | 4.262172 | false | false | false | 0 |
bitmovin/bitmovin-python | bitmovin/resources/models/manifests/dash/webm_representation.py | 1 | 2082 | from bitmovin.errors import InvalidTypeError
from bitmovin.resources.models import AbstractModel
from bitmovin.resources.enums import WebMRepresentationType
from bitmovin.utils import Serializable
class WebMRepresentation(AbstractModel, Serializable):
def __init__(self, type, encoding_id, muxing_id, segment_path, start_segment_number=None,
id_=None, custom_data=None):
super().__init__(id_=id_, custom_data=custom_data)
self._type = None
self.type = type
self.encodingId = encoding_id
self.muxingId = muxing_id
self.segmentPath = segment_path
self.startSegmentNumber = start_segment_number
@property
def type(self):
if self._type is not None:
return self._type
else:
return WebMRepresentationType.default().value
@type.setter
def type(self, new_type):
if new_type is None:
return
if isinstance(new_type, str):
self._type = new_type
elif isinstance(new_type, WebMRepresentationType):
self._type = new_type.value
else:
raise InvalidTypeError(
'Invalid type {} for \'type\': must be either str or WebMRepresentationType!'.format(type(new_type)))
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
custom_data = json_object.get('customData')
type_ = json_object['type']
encoding_id = json_object['encodingId']
muxing_id = json_object['muxingId']
segment_path = json_object['segmentPath']
start_segment_number = json_object.get('startSegmentNumber')
webm_representation = WebMRepresentation(
id_=id_, custom_data=custom_data, type=type_, encoding_id=encoding_id, muxing_id=muxing_id,
segment_path=segment_path, start_segment_number=start_segment_number)
return webm_representation
def serialize(self):
serialized = super().serialize()
serialized['type'] = self.type
return serialized
| unlicense | -4,330,746,042,180,526,000 | 36.178571 | 117 | 0.639769 | false | 4.050584 | false | false | false | 0.002402 |
kmoocdev/edx-platform | lms/djangoapps/mobile_api/users/serializers.py | 13 | 2758 | """
Serializer for user API
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
from courseware.courses import course_image_url
from student.models import CourseEnrollment, User
class CourseField(serializers.RelatedField):
"""Custom field to wrap a CourseDescriptor object. Read-only."""
def to_native(self, course):
course_id = unicode(course.id)
request = self.context.get('request', None)
if request:
video_outline_url = reverse(
'video-summary-list',
kwargs={'course_id': course_id},
request=request
)
course_updates_url = reverse(
'course-updates-list',
kwargs={'course_id': course_id},
request=request
)
course_handouts_url = reverse(
'course-handouts-list',
kwargs={'course_id': course_id},
request=request
)
course_about_url = reverse(
'course-about-detail',
kwargs={'course_id': course_id},
request=request
)
else:
video_outline_url = None
course_updates_url = None
course_handouts_url = None
course_about_url = None
return {
"id": course_id,
"name": course.display_name,
"number": course.display_number_with_default,
"org": course.display_org_with_default,
"start": course.start,
"end": course.end,
"course_image": course_image_url(course),
"latest_updates": {
"video": None
},
"video_outline": video_outline_url,
"course_updates": course_updates_url,
"course_handouts": course_handouts_url,
"course_about": course_about_url,
}
class CourseEnrollmentSerializer(serializers.ModelSerializer):
"""
Serializes CourseEnrollment models
"""
course = CourseField()
class Meta: # pylint: disable=missing-docstring
model = CourseEnrollment
fields = ('created', 'mode', 'is_active', 'course')
lookup_field = 'username'
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes User models
"""
name = serializers.Field(source='profile.name')
course_enrollments = serializers.HyperlinkedIdentityField(
view_name='courseenrollment-detail',
lookup_field='username'
)
class Meta: # pylint: disable=missing-docstring
model = User
fields = ('id', 'username', 'email', 'name', 'course_enrollments')
lookup_field = 'username'
| agpl-3.0 | -2,718,227,543,846,317,600 | 30.701149 | 74 | 0.567078 | false | 4.521311 | false | false | false | 0 |
ctogle/nnets | src/nnets/nnetworks/nmlp.py | 1 | 2565 | import nnets.nnetworks.nnet as nn
import nnets.nlayers.nlayer_logistic as nll
import nnets.nlayers.nlayer_hidden as nlh
import theano
import theano.tensor as T
import numpy,timeit
import pdb
#################################################################################
#################################################################################
class multilayer_perceptron_network(nn.network):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
#############################################
def grow(self,n_in,n_out,hidden = [(100,None)],**kws):
rng = numpy.random.RandomState(1234)
self.input = T.matrix('x')
n_hiddenlayers = len(hidden)
assert n_hiddenlayers > 0
n_hidden,n_activator = hidden[0]
ilayer = nlh.hidden_layer(
self.input,n_in,n_hidden,rng,activation = n_activator)
self.params = ilayer.params
last_n_hidden = n_hidden
lhlayer = ilayer
for j in range(1,n_hiddenlayers):
n_hidden,n_activator = hidden[j]
hlayer = nlh.hidden_layer(
lhlayer.output,last_n_hidden,n_hidden,
rng,activation = n_activator)
self.params += hlayer.params
last_n_hidden = n_hidden
lhlayer = hlayer
self.llayer = nll.logreg_layer(lhlayer.output,last_n_hidden,n_out)
# L1 norm ; one regularization option is to enforce L1 norm to be small
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L1 = (abs(ilayer.W).sum()+abs(self.llayer.W).sum())
self.L2_sqr = ((ilayer.W**2).sum()+(self.llayer.W**2).sum())
self.negative_log_likelihood = self.llayer.negative_log_likelihood
self.errors = self.llayer.errors
self.params += self.llayer.params
self.predictor = theano.function(
inputs = [ilayer.input],outputs = self.llayer.y_pred)
#############################################
#################################################################################
#################################################################################
| mit | 2,948,486,047,406,725,600 | 33.662162 | 81 | 0.536452 | false | 4.064976 | false | false | false | 0.014815 |
sslavic/kafka | tests/kafkatest/sanity_checks/test_kafka_version.py | 23 | 2662 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.tests.test import Test
from ducktape.mark.resource import cluster
from kafkatest.services.kafka import KafkaService, config_property
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.utils import is_version
from kafkatest.version import LATEST_0_8_2, DEV_BRANCH
class KafkaVersionTest(Test):
"""Sanity checks on kafka versioning."""
def __init__(self, test_context):
super(KafkaVersionTest, self).__init__(test_context)
self.topic = "topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
def setUp(self):
self.zk.start()
@cluster(num_nodes=2)
def test_0_8_2(self):
"""Test kafka service node-versioning api - verify that we can bring up a single-node 0.8.2.X cluster."""
self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk,
topics={self.topic: {"partitions": 1, "replication-factor": 1}})
node = self.kafka.nodes[0]
node.version = LATEST_0_8_2
self.kafka.start()
assert is_version(node, [LATEST_0_8_2], logger=self.logger)
@cluster(num_nodes=3)
def test_multi_version(self):
"""Test kafka service node-versioning api - ensure we can bring up a 2-node cluster, one on version 0.8.2.X,
the other on the current development branch."""
self.kafka = KafkaService(self.test_context, num_nodes=2, zk=self.zk,
topics={self.topic: {"partitions": 1, "replication-factor": 2}})
self.kafka.nodes[1].version = LATEST_0_8_2
self.kafka.nodes[1].config[config_property.INTER_BROKER_PROTOCOL_VERSION] = "0.8.2.X"
self.kafka.start()
assert is_version(self.kafka.nodes[0], [DEV_BRANCH.vstring], logger=self.logger)
assert is_version(self.kafka.nodes[1], [LATEST_0_8_2], logger=self.logger)
| apache-2.0 | 8,580,142,756,036,014,000 | 44.896552 | 116 | 0.690458 | false | 3.607046 | true | false | false | 0.00263 |
tx137884746/IzayoiMiku | toughradius/tools/livecd.py | 4 | 4933 | #!/usr/bin/env python
#coding:utf-8
from toughradius.tools.secret import gen_secret
def echo_radiusd_cnf():
return '''[DEFAULT]
debug = 0
tz = CST-8
secret = %s
ssl = 1
privatekey = /var/toughradius/privkey.pem
certificate = /var/toughradius/cacert.pem
[database]
dbtype = mysql
dburl = mysql://radiusd:[email protected]/toughradius?charset=utf8
echo = false
pool_size = 120
pool_recycle = 300
[radiusd]
acctport = 1813
adminport = 1815
authport = 1812
cache_timeout = 600
logfile = /var/toughradius/log/radiusd.log
[admin]
port = 1816
logfile = /var/toughradius/log/admin.log
[customer]
port = 1817
logfile = /var/toughradius/log/customer.log
'''%gen_secret(32)
def echo_privkey_pem():
return '''-----BEGIN RSA PRIVATE KEY-----
MIIBPAIBAAJBAK+a5EAeEZFJdpwmMdgexCvE/x5HpsSvkyx+CFt9MDI8Gx9sXTsQ
hn+Satm4bNKq9+0yarGL1MoVoXCmzMkv++0CAwEAAQJBAJel139XeCxTmM54XYsZ
5qc11Gs9zVMFnL9Lh8QadEisGBoLNVGRKspVuR21pf9yWK1APJYtxeY+ElxTeN6v
frECIQDlXCN0ZLF2IBOUbOAEBnBEzYA19cnpktaD1EyeD1bpOwIhAMQAY3R+suNO
JE1MvE/g6ICAQVCDeiSW0JBUHbpXT5z3AiBakZqygHyPD7WLm76N+Fjm4lspc6hK
oqAwqGmk1JvWNwIhAJicyNPLV1S/4mpB5pq3v7FWrASZ6wAUYh8PL/qIw1evAiEA
sS5pdElUCN0d7/EdoOPBmEAJL7RHs6SjYEihK5ds4TQ=
-----END RSA PRIVATE KEY-----'''
def echo_cacert_pem():
return '''-----BEGIN CERTIFICATE-----
MIIDTDCCAvagAwIBAgIJAMZsf8cd/CUeMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD
VQQGEwJDTjEOMAwGA1UECBMFSHVuYW4xETAPBgNVBAcTCENoYW5nc2hhMRgwFgYD
VQQKEw90b3VnaHJhZGl1cy5uZXQxFDASBgNVBAsTC3RvdWdocmFkaXVzMRgwFgYD
VQQDEw90b3VnaHJhZGl1cy5uZXQxJjAkBgkqhkiG9w0BCQEWF3N1cHBvcnRAdG91
Z2hyYWRpdXMubmV0MB4XDTE1MDMxODE2MTg1N1oXDTIwMTAyNTE2MTg1N1owgaIx
CzAJBgNVBAYTAkNOMQ4wDAYDVQQIEwVIdW5hbjERMA8GA1UEBxMIQ2hhbmdzaGEx
GDAWBgNVBAoTD3RvdWdocmFkaXVzLm5ldDEUMBIGA1UECxMLdG91Z2hyYWRpdXMx
GDAWBgNVBAMTD3RvdWdocmFkaXVzLm5ldDEmMCQGCSqGSIb3DQEJARYXc3VwcG9y
dEB0b3VnaHJhZGl1cy5uZXQwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAr5rkQB4R
kUl2nCYx2B7EK8T/HkemxK+TLH4IW30wMjwbH2xdOxCGf5Jq2bhs0qr37TJqsYvU
yhWhcKbMyS/77QIDAQABo4IBCzCCAQcwHQYDVR0OBBYEFK9UjaxgsGyDZqfLEGUl
zYUhZqyzMIHXBgNVHSMEgc8wgcyAFK9UjaxgsGyDZqfLEGUlzYUhZqyzoYGopIGl
MIGiMQswCQYDVQQGEwJDTjEOMAwGA1UECBMFSHVuYW4xETAPBgNVBAcTCENoYW5n
c2hhMRgwFgYDVQQKEw90b3VnaHJhZGl1cy5uZXQxFDASBgNVBAsTC3RvdWdocmFk
aXVzMRgwFgYDVQQDEw90b3VnaHJhZGl1cy5uZXQxJjAkBgkqhkiG9w0BCQEWF3N1
cHBvcnRAdG91Z2hyYWRpdXMubmV0ggkAxmx/xx38JR4wDAYDVR0TBAUwAwEB/zAN
BgkqhkiG9w0BAQUFAANBAF2J27T8NnXptROTUx7IKU3MIBGvRqj6imtwjsus6fQU
GOLwDVfVEaqmv6YE6jg5ummEfeIcwUfkD5fLgrfRQ9s=
-----END CERTIFICATE-----'''
def echo_radiusd_script():
return '''#!/bin/sh
### BEGIN INIT INFO
# Provides: radiusd
# Required-Start: $all
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: starts the radiusd daemon
# Description: starts toughradius using start-stop-daemon
### END INIT INFO
export PATH=$PATH:/usr/local/bin
set -e
set -u
usage ()
{
cat <<EOF
Usage: $0 [OPTIONS]
start start toughradius
stop stop toughradius
restart restart toughradius,
upgrade update toughradius version and restart
All other options are passed to the toughrad program.
EOF
exit 1
}
start()
{
toughctl --start all
}
stop()
{
toughctl --stop all
}
restart()
{
toughctl --restart all
}
upgrade()
{
echo 'starting upgrade...'
pip install -U https://github.com/talkincode/ToughRADIUS/archive/stable.zip
echo 'upgrade done'
}
case "$1" in
help)
usage
;;
start)
start
;;
stop)
stop
;;
restart)
restart
;;
upgrade)
upgrade
;;
*)
usage
;;
esac
exit 0
'''
def echo_mysql_cnf():
return '''[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
bind-address = 127.0.0.1
key_buffer = 16M
max_allowed_packet = 16M
thread_stack = 192K
thread_cache_size = 8
myisam-recover = BACKUP
max_connections = 1000
table_cache = 512
#thread_concurrency = 8
#
# * Query Cache Configuration
#
query_cache_limit = 4M
query_cache_size = 64M
server-id = 1
log_bin = /var/log/mysql/mysql-bin.log
expire_logs_days = 10
max_binlog_size = 100M
#
# * InnoDB
#
innodb_buffer_pool_size = 256M
innodb_data_file_path = ibdata1:16M:autoextend
innodb_additional_mem_pool_size = 16M
innodb_thread_concurrency = 8
innodb_flush_log_at_trx_commit = 1
innodb_log_buffer_size = 8M
innodb_log_file_size = 128M
log-error=/var/log/mysqld.log
[mysqldump]
quick
quote-names
max_allowed_packet = 64M
[mysql]
#no-auto-rehash # faster start of mysql but no tab completition
[isamchk]
key_buffer = 16M
!includedir /etc/mysql/conf.d/
''' | agpl-3.0 | 7,071,285,846,787,794,000 | 20.928889 | 79 | 0.750659 | false | 2.135498 | false | false | false | 0.004662 |
mutilx9/trackma | trackma/accounts.py | 3 | 4266 | import pickle
from trackma import utils
class AccountManager():
"""
This is the account manager.
It provides a generic way for the user interface to query for the
available registered accounts, and add or delete accounts.
This class returns an Account Dictionary used by
the :class:`Engine` to start.
"""
accounts = {'default': None, 'next': 1, 'accounts': dict()}
def __init__(self):
utils.make_dir('')
self.filename = utils.get_root_filename('accounts.dict')
self._load()
def _load(self):
if utils.file_exists(self.filename):
with open(self.filename, 'rb') as f:
self.accounts = pickle.load(f)
def _save(self):
is_new = not utils.file_exists(self.filename)
with open(self.filename, 'wb') as f:
if is_new:
utils.change_permissions(self.filename, 0o600)
pickle.dump(self.accounts, f, protocol=2)
def add_account(self, username, password, api):
"""
Registers a new account with the specified
*username*, *password*, and *api*.
The *api* must be one of the available APIs
found in the utils.available_libs dict.
"""
available_libs = utils.available_libs.keys()
if not username:
raise utils.AccountError('Empty username.')
if not password:
raise utils.AccountError('Empty password.')
if api not in available_libs:
raise utils.AccountError('That API doesn\'t exist.')
account = {'username': username,
'password': password,
'api': api,
}
nextnum = self.accounts['next']
self.accounts['accounts'][nextnum] = account
self.accounts['next'] += 1
self._save()
def edit_account(self, num, username, password, api, friends=[]):
"""
Updates data for account *num* with the specified
*username*, *password*, and *api*.
"""
available_libs = utils.available_libs.keys()
if not username:
raise utils.AccountError('Empty username.')
if not password:
raise utils.AccountError('Empty password.')
if api not in available_libs:
raise utils.AccountError('That API doesn\'t exist.')
account = {'username': username,
'password': password,
'api': api,
}
self.accounts['accounts'][num].update(account)
self._save()
def delete_account(self, num):
"""
Deletes the account number **num**.
"""
self.accounts['default'] = None
del self.accounts['accounts'][num]
# Reset index if there are no accounts left
if not self.accounts['accounts']:
self.accounts['next'] = 1
self._save()
def purge_account(self, num):
"""
Renames stale cache files for account number **num**.
"""
account = self.accounts['accounts'][num]
userfolder = "%s.%s" % (account['username'], account['api'])
utils.make_dir(userfolder + '.old')
utils.regex_rename_files('(.*.queue)|(.*.info)|(.*.list)|(.*.meta)', userfolder, userfolder + '.old')
def get_account(self, num):
"""
Returns the account dict **num**.
"""
return self.accounts['accounts'][num]
def get_accounts(self):
"""
Returns an iterator of available accounts.
"""
return self.accounts['accounts'].items()
def get_default(self):
"""
Returns the default account number, if set.
Otherwise returns None.
"""
num = self.accounts['default']
if num is not None:
try:
return self.accounts['accounts'][num]
except KeyError:
return None
else:
return None
def set_default(self, val):
"""
Sets a new default account number.
"""
self.accounts['default'] = val
self._save()
def unset_default(self):
"""
Unsets the default account number.
"""
self.accounts['default'] = None
| gpl-3.0 | -3,770,392,888,226,030,000 | 28.42069 | 109 | 0.549226 | false | 4.481092 | false | false | false | 0.001172 |
gabrielStanovsky/props | props/time_annotator/timex.py | 1 | 14008 | # Code for tagging temporal expressions in text
# For details of the TIMEX format, see http://timex2.mitre.org/
import re
import string
import os
import sys
# Requires eGenix.com mx Base Distribution
# http://www.egenix.com/products/python/mxBase/
try:
from mx.DateTime import *
except ImportError:
print """
Requires eGenix.com mx Base Distribution
http://www.egenix.com/products/python/mxBase/"""
# Predefined strings.
numbers = "(^a(?=\s)|one|two|three|four|five|six|seven|eight|nine|ten| \
eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen| \
eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|eighty| \
ninety|hundred|thousand)"
day = "(monday|tuesday|wednesday|thursday|friday|saturday|sunday)"
week_day = "(monday|tuesday|wednesday|thursday|friday|saturday|sunday)"
month = "(january|february|march|april|may|june|july|august|september| \
october|november|december)"
dmy = "(year|day|week|month)"
rel_day = "(today|yesterday|tomorrow|tonight|tonite)"
exp1 = "(before|after|earlier|later|ago)"
exp2 = "(this|next|last)"
iso = "\d+[/-]\d+[/-]\d+ \d+:\d+:\d+\.\d+"
year = "((?<=\s)\d{4}|^\d{4})"
regxp1 = "((\d+|(" + numbers + "[-\s]?)+) " + dmy + "s? " + exp1 + ")"
regxp2 = "((?:" + exp2 + " )?(" + dmy + "|" + week_day + "|" + month + "))"
reg1 = re.compile(regxp1, re.IGNORECASE)
reg2 = re.compile(regxp2, re.IGNORECASE)
reg3 = re.compile(rel_day, re.IGNORECASE)
reg4 = re.compile(iso)
reg5 = re.compile(year)
def tag(text):
# Initialization
timex_found = []
# re.findall() finds all the substring matches, keep only the full
# matching string. Captures expressions such as 'number of days' ago, etc.
found = reg1.findall(text)
found = [a[0] for a in found if len(a) > 1]
for timex in found:
timex_found.append(timex)
# Variations of this thursday, next year, etc
found = reg2.findall(text)
found = [a[0] for a in found if len(a) > 1]
for timex in found:
timex_found.append(timex)
# today, tomorrow, etc
found = reg3.findall(text)
for timex in found:
timex_found.append(timex)
# ISO
found = reg4.findall(text)
for timex in found:
timex_found.append(timex)
# Year
found = reg5.findall(text)
for timex in found:
timex_found.append(timex)
# Tag only temporal expressions which haven't been tagged.
for timex in timex_found:
text = re.sub(timex + '(?!</TIMEX2>)', '<TIMEX2>' + timex + '</TIMEX2>', text)
return text
# Hash function for week days to simplify the grounding task.
# [Mon..Sun] -> [0..6]
hashweekdays = {
'monday': 0,
'tuesday': 1,
'wednesday': 2,
'thursday': 3,
'friday': 4,
'saturday': 5,
'sunday': 6}
# Hash function for months to simplify the grounding task.
# [Jan..Dec] -> [1..12]
hashmonths = {
'january': 1,
'february': 2,
'march': 3,
'april': 4,
'may': 5,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12}
# Hash number in words into the corresponding integer value
def hashnum(number):
if re.match(r'one|^a\b', number, re.IGNORECASE):
return 1
if re.match(r'two', number, re.IGNORECASE):
return 2
if re.match(r'three', number, re.IGNORECASE):
return 3
if re.match(r'four', number, re.IGNORECASE):
return 4
if re.match(r'five', number, re.IGNORECASE):
return 5
if re.match(r'six', number, re.IGNORECASE):
return 6
if re.match(r'seven', number, re.IGNORECASE):
return 7
if re.match(r'eight', number, re.IGNORECASE):
return 8
if re.match(r'nine', number, re.IGNORECASE):
return 9
if re.match(r'ten', number, re.IGNORECASE):
return 10
if re.match(r'eleven', number, re.IGNORECASE):
return 11
if re.match(r'twelve', number, re.IGNORECASE):
return 12
if re.match(r'thirteen', number, re.IGNORECASE):
return 13
if re.match(r'fourteen', number, re.IGNORECASE):
return 14
if re.match(r'fifteen', number, re.IGNORECASE):
return 15
if re.match(r'sixteen', number, re.IGNORECASE):
return 16
if re.match(r'seventeen', number, re.IGNORECASE):
return 17
if re.match(r'eighteen', number, re.IGNORECASE):
return 18
if re.match(r'nineteen', number, re.IGNORECASE):
return 19
if re.match(r'twenty', number, re.IGNORECASE):
return 20
if re.match(r'thirty', number, re.IGNORECASE):
return 30
if re.match(r'forty', number, re.IGNORECASE):
return 40
if re.match(r'fifty', number, re.IGNORECASE):
return 50
if re.match(r'sixty', number, re.IGNORECASE):
return 60
if re.match(r'seventy', number, re.IGNORECASE):
return 70
if re.match(r'eighty', number, re.IGNORECASE):
return 80
if re.match(r'ninety', number, re.IGNORECASE):
return 90
if re.match(r'hundred', number, re.IGNORECASE):
return 100
if re.match(r'thousand', number, re.IGNORECASE):
return 1000
# Given a timex_tagged_text and a Date object set to base_date,
# returns timex_grounded_text
def ground(tagged_text, base_date):
# Find all identified timex and put them into a list
timex_regex = re.compile(r'<TIMEX2>.*?</TIMEX2>', re.DOTALL)
timex_found = timex_regex.findall(tagged_text)
timex_found = map(lambda timex:re.sub(r'</?TIMEX2.*?>', '', timex), \
timex_found)
ret_items=[]
# Calculate the new date accordingly
for timex in timex_found:
timex_val = 'UNKNOWN' # Default value
timex_ori = timex # Backup original timex for later substitution
# If numbers are given in words, hash them into corresponding numbers.
# eg. twenty five days ago --> 25 days ago
if re.search(numbers, timex, re.IGNORECASE):
split_timex = re.split(r'\s(?=days?|months?|years?|weeks?)', \
timex, re.IGNORECASE)
value = split_timex[0]
unit = split_timex[1]
num_list = map(lambda s:hashnum(s),re.findall(numbers + '+', \
value, re.IGNORECASE))
timex = `sum(num_list)` + ' ' + unit
# If timex matches ISO format, remove 'time' and reorder 'date'
if re.match(r'\d+[/-]\d+[/-]\d+ \d+:\d+:\d+\.\d+', timex):
dmy = re.split(r'\s', timex)[0]
dmy = re.split(r'/|-', dmy)
timex_val = str(dmy[2]) + '-' + str(dmy[1]) + '-' + str(dmy[0])
# Specific dates
elif re.match(r'\d{4}', timex):
timex_val = str(timex)
# Relative dates
elif re.match(r'tonight|tonite|today', timex, re.IGNORECASE):
timex_val = str(base_date)
elif re.match(r'yesterday', timex, re.IGNORECASE):
timex_val = str(base_date + RelativeDateTime(days=-1))
elif re.match(r'tomorrow', timex, re.IGNORECASE):
timex_val = str(base_date + RelativeDateTime(days=+1))
# Weekday in the previous week.
elif re.match(r'last ' + week_day, timex, re.IGNORECASE):
day = hashweekdays[timex.split()[1].lower()]
timex_val = str(base_date + RelativeDateTime(weeks=-1, \
weekday=(day,0)))
# Weekday in the current week.
elif re.match(r'(?:this )?(' + week_day+")", timex, re.IGNORECASE):
day = hashweekdays[timex.split()[-1].lower()]
timex_val = str(base_date + RelativeDateTime(weeks=0, \
weekday=(day,0)))
# Weekday in the following week.
elif re.match(r'next ' + week_day, timex, re.IGNORECASE):
day = hashweekdays[timex.split()[1].lower()]
timex_val = str(base_date + RelativeDateTime(weeks=+1, \
weekday=(day,0)))
# Last, this, next week.
elif re.match(r'last week', timex, re.IGNORECASE):
year = (base_date + RelativeDateTime(weeks=-1)).year
# iso_week returns a triple (year, week, day) hence, retrieve
# only week value.
week = (base_date + RelativeDateTime(weeks=-1)).iso_week[1]
timex_val = str(year) + 'W' + str(week)
elif re.match(r'this week', timex, re.IGNORECASE):
year = (base_date + RelativeDateTime(weeks=0)).year
week = (base_date + RelativeDateTime(weeks=0)).iso_week[1]
timex_val = str(year) + 'W' + str(week)
elif re.match(r'next week', timex, re.IGNORECASE):
year = (base_date + RelativeDateTime(weeks=+1)).year
week = (base_date + RelativeDateTime(weeks=+1)).iso_week[1]
timex_val = str(year) + 'W' + str(week)
# Month in the previous year.
elif re.match(r'last ' + month, timex, re.IGNORECASE):
month = hashmonths[timex.split()[1].lower()]
timex_val = str(base_date.year - 1) + '-' + str(month)
# Month in the current year.
elif re.match(r'this ' + month, timex, re.IGNORECASE):
month = hashmonths[timex.split()[1].lower()]
timex_val = str(base_date.year) + '-' + str(month)
# Month in the following year.
elif re.match(r'next ' + month, timex, re.IGNORECASE):
month = hashmonths[timex.split()[1].lower()]
timex_val = str(base_date.year + 1) + '-' + str(month)
elif re.match(r'last month', timex, re.IGNORECASE):
# Handles the year boundary.
if base_date.month == 1:
timex_val = str(base_date.year - 1) + '-' + '12'
else:
timex_val = str(base_date.year) + '-' + str(base_date.month - 1)
elif re.match(r'this month', timex, re.IGNORECASE):
timex_val = str(base_date.year) + '-' + str(base_date.month)
elif re.match(r'next month', timex, re.IGNORECASE):
# Handles the year boundary.
if base_date.month == 12:
timex_val = str(base_date.year + 1) + '-' + '1'
else:
timex_val = str(base_date.year) + '-' + str(base_date.month + 1)
elif re.match(r'last year', timex, re.IGNORECASE):
timex_val = str(base_date.year - 1)
elif re.match(r'this year', timex, re.IGNORECASE):
timex_val = str(base_date.year)
elif re.match(r'next year', timex, re.IGNORECASE):
timex_val = str(base_date.year + 1)
elif re.match(r'\d+ days? (ago|earlier|before)', timex, re.IGNORECASE):
# Calculate the offset by taking '\d+' part from the timex.
offset = int(re.split(r'\s', timex)[0])
timex_val = str(base_date + RelativeDateTime(days=-offset))
elif re.match(r'\d+ days? (later|after)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
timex_val = str(base_date + RelativeDateTime(days=+offset))
elif re.match(r'\d+ weeks? (ago|earlier|before)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
year = (base_date + RelativeDateTime(weeks=-offset)).year
week = (base_date + \
RelativeDateTime(weeks=-offset)).iso_week[1]
timex_val = str(year) + 'W' + str(week)
elif re.match(r'\d+ weeks? (later|after)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
year = (base_date + RelativeDateTime(weeks=+offset)).year
week = (base_date + RelativeDateTime(weeks=+offset)).iso_week[1]
timex_val = str(year) + 'W' + str(week)
elif re.match(r'\d+ months? (ago|earlier|before)', timex, re.IGNORECASE):
extra = 0
offset = int(re.split(r'\s', timex)[0])
# Checks if subtracting the remainder of (offset / 12) to the base month
# crosses the year boundary.
if (base_date.month - offset % 12) < 1:
extra = 1
# Calculate new values for the year and the month.
year = str(base_date.year - offset // 12 - extra)
month = str((base_date.month - offset % 12) % 12)
# Fix for the special case.
if month == '0':
month = '12'
timex_val = year + '-' + month
elif re.match(r'\d+ months? (later|after)', timex, re.IGNORECASE):
extra = 0
offset = int(re.split(r'\s', timex)[0])
if (base_date.month + offset % 12) > 12:
extra = 1
year = str(base_date.year + offset // 12 + extra)
month = str((base_date.month + offset % 12) % 12)
if month == '0':
month = '12'
timex_val = year + '-' + month
elif re.match(r'\d+ years? (ago|earlier|before)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
timex_val = str(base_date.year - offset)
elif re.match(r'\d+ years? (later|after)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
timex_val = str(base_date.year + offset)
# Remove 'time' from timex_val.
# For example, If timex_val = 2000-02-20 12:23:34.45, then
# timex_val = 2000-02-20
timex_val = re.sub(r'\s.*', '', timex_val)
# Substitute tag+timex in the text with grounded tag+timex.
tagged_text = re.sub('<TIMEX2>' + timex_ori + '</TIMEX2>', '<TIMEX2 val=\"' \
+ timex_val + '\">' + timex_ori + '</TIMEX2>', tagged_text)
ret_items.append([timex_ori,timex_val])
return tagged_text,ret_items
####
def demo():
import nltk
text = nltk.corpus.abc.raw('rural.txt')[:10000]
print tag(text)
if __name__ == '__main__':
#demo()
ground(tag("I know what you did last wednesday"),gmt()) | mit | 563,371,370,645,683,000 | 37.698895 | 86 | 0.566176 | false | 3.275193 | false | false | false | 0.004212 |
HewlettPackard/oneview-redfish-toolkit | oneview_redfish_toolkit/blueprints/computer_system_collection.py | 1 | 2001 | # -*- coding: utf-8 -*-
# Copyright (2017-2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from flask import Blueprint
from flask import g
from oneview_redfish_toolkit.api.computer_system_collection \
import ComputerSystemCollection
from oneview_redfish_toolkit.blueprints.util.response_builder import \
ResponseBuilder
from oneview_redfish_toolkit.services.zone_service import ZoneService
computer_system_collection = Blueprint("computer_system_collection", __name__)
@computer_system_collection.route("/redfish/v1/Systems/", methods=["GET"])
def get_computer_system_collection():
"""Get the Redfish Computer System Collection.
Get method to return ComputerSystemCollection JSON when
/redfish/v1/Systems is requested.
Returns:
JSON: JSON with ComputerSystemCollection.
"""
server_profile_list = g.oneview_client.server_profiles.get_all()
server_profile_list = list(filter(lambda i: i.get('serverHardwareUri'),
server_profile_list))
server_profile_tmpls = \
g.oneview_client.server_profile_templates.get_all()
zone_service = ZoneService(g.oneview_client)
zone_ids = zone_service.get_zone_ids_by_templates(server_profile_tmpls)
csc = ComputerSystemCollection(server_profile_list,
server_profile_tmpls,
zone_ids)
return ResponseBuilder.success(csc)
| apache-2.0 | 2,541,078,353,435,733,000 | 36.055556 | 78 | 0.707646 | false | 4.075356 | false | false | false | 0 |
MSylvia/pyNES | pynes/tests/rol_test.py | 28 | 3652 | # -*- coding: utf-8 -*-
'''
ROL, Rotate Left Test
This is an Bit Manipulation of the 6502.
'''
import unittest
from pynes.compiler import lexical, syntax, semantic
class RolTest(unittest.TestCase):
def test_rol_imm(self):
tokens = list(lexical('ROL #$10'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_HEX_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x2a, 0x10])
def test_rol_imm_with_decimal(self):
tokens = list(lexical('ROL #10'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_DECIMAL_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x2a, 0x0a])
def test_rol_imm_with_binary(self):
tokens = list(lexical('ROL #%00000100'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_BINARY_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x2a, 0x04])
def test_rol_zp(self):
tokens = list(lexical('ROL $00'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ZEROPAGE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x26, 0x00])
def test_rol_zpx(self):
tokens = list(lexical('ROL $10,X'))
self.assertEquals(4, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('T_SEPARATOR', tokens[2]['type'])
self.assertEquals('T_REGISTER', tokens[3]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ZEROPAGE_X', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x36, 0x10])
def test_rol_abs(self):
tokens = list(lexical('ROL $1234'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('$1234', tokens[1]['value'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ABSOLUTE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x2e, 0x34, 0x12])
def test_rol_absx(self):
tokens = list(lexical('ROL $1234,X'))
self.assertEquals(4, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('$1234', tokens[1]['value'])
self.assertEquals('T_SEPARATOR', tokens[2]['type'])
self.assertEquals('T_REGISTER', tokens[3]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ABSOLUTE_X', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x3e, 0x34, 0x12])
| bsd-3-clause | -2,688,325,988,595,902,500 | 36.649485 | 64 | 0.598302 | false | 3.474786 | true | false | false | 0 |
brahmastra2016/bleachbit | tests/TestUnix.py | 1 | 10352 | # vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2008-2017 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Unix
"""
from __future__ import absolute_import, print_function
from tests import common
import bleachbit
from bleachbit.Unix import *
import sys
import unittest
@unittest.skipIf('win32' == sys.platform, 'skipping unix tests on windows')
class UnixTestCase(common.BleachbitTestCase):
"""Test case for module Unix"""
def setUp(self):
"""Initialize unit tests"""
self.locales = Locales()
def test_apt(self):
"""Unit test for method apt_autoclean() and apt_autoremove()"""
if 0 != os.geteuid() or not FileUtilities.exe_exists('apt-get'):
self.assertRaises(RuntimeError, apt_autoclean)
self.assertRaises(RuntimeError, apt_autoremove)
else:
bytes_freed = apt_autoclean()
self.assertIsInteger(bytes_freed)
bytes_freed = apt_autoremove()
self.assertIsInteger(bytes_freed)
def test_is_broken_xdg_desktop(self):
"""Unit test for is_broken_xdg_desktop()"""
menu_dirs = ['/usr/share/applications',
'/usr/share/autostart',
'/usr/share/gnome/autostart',
'/usr/share/gnome/apps',
'/usr/share/mimelnk',
'/usr/share/applnk-redhat/',
'/usr/local/share/applications/']
for dirname in menu_dirs:
for filename in [fn for fn in FileUtilities.children_in_directory(dirname, False)
if fn.endswith('.desktop')]:
self.assertIsInstance(is_broken_xdg_desktop(filename), bool)
def test_is_running_darwin(self):
def run_ps():
return """USER PID %CPU %MEM VSZ RSS TT STAT STARTED TIME COMMAND
root 703 0.0 0.0 2471428 2792 ?? Ss 20May16 0:01.30 SubmitDiagInfo
alocaluseraccount 681 0.0 0.0 2471568 856 ?? S 20May16 0:00.81 DiskUnmountWatcher
alocaluseraccount 666 0.0 0.0 2507092 3488 ?? S 20May16 0:17.47 SpotlightNetHelper
root 665 0.0 0.0 2497508 512 ?? Ss 20May16 0:11.30 check_afp
alocaluseraccount 646 0.0 0.1 2502484 5656 ?? S 20May16 0:03.62 DataDetectorsDynamicData
alocaluseraccount 632 0.0 0.0 2471288 320 ?? S 20May16 0:02.79 mdflagwriter
alocaluseraccount 616 0.0 0.0 2497596 520 ?? S 20May16 0:00.41 familycircled
alocaluseraccount 573 0.0 0.0 3602328 2440 ?? S 20May16 0:39.64 storedownloadd
alocaluseraccount 572 0.0 0.0 2531184 3116 ?? S 20May16 0:02.93 LaterAgent
alocaluseraccount 561 0.0 0.0 2471492 584 ?? S 20May16 0:00.21 USBAgent
alocaluseraccount 535 0.0 0.0 2496656 524 ?? S 20May16 0:00.33 storelegacy
root 531 0.0 0.0 2501712 588 ?? Ss 20May16 0:02.40 suhelperd
"""
self.assertTrue(is_running_darwin('USBAgent', run_ps))
self.assertFalse(is_running_darwin('does-not-exist', run_ps))
self.assertRaises(RuntimeError, is_running_darwin, 'foo', lambda: 'invalid-input')
def test_is_running(self):
# Fedora 11 doesn't need realpath but Ubuntu 9.04 uses symlink
# from /usr/bin/python to python2.6
exe = os.path.basename(os.path.realpath(sys.executable))
self.assertTrue(is_running(exe))
self.assertFalse(is_running('does-not-exist'))
def test_journald_clean(self):
if not FileUtilities.exe_exists('journalctl'):
self.assertRaises(RuntimeError, journald_clean)
else:
journald_clean()
def test_locale_regex(self):
"""Unit test for locale_to_language()"""
tests = {'en': 'en',
'en_US': 'en',
'en_US@piglatin': 'en',
'en_US.utf8': 'en',
'ko_KR.eucKR': 'ko',
'pl.ISO8859-2': 'pl',
'zh_TW.Big5': 'zh'}
import re
regex = re.compile('^' + Locales.localepattern + '$')
for locale, tlc in tests.items():
m = regex.match(locale)
self.assertIsNotNone(m, 'expected positive match for ' + locale)
self.assertEqual(m.group("locale"), tlc)
for test in ['default', 'C', 'English', 'ru_RU.txt', 'ru.txt']:
self.assertIsNone(regex.match(test), 'expected negative match for ' + test)
def test_localization_paths(self):
"""Unit test for localization_paths()"""
from xml.dom.minidom import parseString
configpath = parseString(
'<path location="/usr/share/locale/" />').firstChild
locales.add_xml(configpath)
counter = 0
for path in locales.localization_paths(['en']):
self.assertLExists(path)
# self.assert_(path.startswith('/usr/share/locale'))
# /usr/share/locale/en_* should be ignored
self.assertEqual(path.find('/en_'), -1)
counter += 1
self.assertGreater(counter, 0, 'Zero files deleted by localization cleaner.' +
'This may be an error unless you really deleted all the files.')
def test_fakelocalizationdirs(self):
"""Create a faked localization hierarchy and clean it afterwards"""
keepdirs = [
'important_dontdelete',
'important_dontdelete/ru',
'delete',
'delete/locale',
'delete/locale/en',
'delete/dummyfiles',
'foobar',
'foobar/locale']
nukedirs = [
'delete/locale/ru',
'foobar/locale/ru']
keepfiles = [
'delete/dummyfiles/dontdeleteme_ru.txt',
'important_dontdelete/exceptthisone_ru.txt',
'delete/dummyfiles/en.txt',
'delete/dummyfiles/ru.dic']
nukefiles = [
'delete/dummyfiles/ru.txt',
'delete/locale/ru_RU.UTF8.txt']
for path in keepdirs + nukedirs:
os.mkdir(os.path.join(self.tempdir, path))
for path in keepfiles + nukefiles:
self.write_file(path)
configxml = '<path directoryregex="^.*$">' \
' <path directoryregex="^(locale|dummyfiles)$">' \
' <path location="." filter="*" />' \
' <regexfilter postfix="\.txt" />' \
' </path>' \
'</path>'
from xml.dom.minidom import parseString
config = parseString(configxml)
self.locales._paths = LocaleCleanerPath(self.tempdir)
self.locales.add_xml(config.firstChild, None)
# normpath because paths may contain ./
deletelist = [os.path.normpath(path) for path in self.locales.localization_paths(['en', 'de'])]
for path in keepdirs + keepfiles:
self.assertNotIn(os.path.join(self.tempdir, path), deletelist)
for path in nukedirs + nukefiles:
self.assertIn(os.path.join(self.tempdir, path), deletelist)
def test_rotated_logs(self):
"""Unit test for rotated_logs()"""
for path in rotated_logs():
self.assertLExists(path, "Rotated log path '%s' does not exist" % path)
def test_run_cleaner_cmd(self):
from subprocess import CalledProcessError
self.assertRaises(RuntimeError, run_cleaner_cmd, '/hopethisdoesntexist', [])
self.assertRaises(CalledProcessError, run_cleaner_cmd, 'sh', ['-c', 'echo errormsg; false'])
# test if regexes for invalid lines work
self.assertRaises(RuntimeError, run_cleaner_cmd, 'echo', ['This is an invalid line'],
error_line_regexes=['invalid'])
freed_space_regex = r'^Freed ([\d.]+[kMT]?B)'
lines = ['Test line',
'Freed 100B on your hard drive',
'Freed 1.9kB, hooray!',
'Fred 12MB']
freed_space = run_cleaner_cmd('echo', ['\n'.join(lines)], freed_space_regex)
self.assertEqual(freed_space, 2000)
def test_start_with_computer(self):
"""Unit test for start_with_computer*"""
b = start_with_computer_check()
self.assertIsInstance(b, bool)
if not os.path.exists(bleachbit.launcher_path) and os.path.exists('bleachbit.desktop'):
# this happens when BleachBit is not installed
bleachbit.launcher_path = 'bleachbit.desktop'
# opposite setting
start_with_computer(not b)
two_b = start_with_computer_check()
self.assertIsInstance(two_b, bool)
self.assertNotEqual(b, two_b)
# original setting
start_with_computer(b)
three_b = start_with_computer_check()
self.assertIsInstance(b, bool)
self.assertEqual(b, three_b)
def test_wine_to_linux_path(self):
"""Unit test for wine_to_linux_path()"""
wineprefix = "/home/foo/.wine"
windows_pathname = "C:\\Program Files\\NSIS\\NSIS.exe"
result = "/home/foo/.wine/drive_c/Program Files/NSIS/NSIS.exe"
self.assertEqual(wine_to_linux_path(wineprefix, windows_pathname), result)
def test_yum_clean(self):
"""Unit test for yum_clean()"""
if 0 != os.geteuid() or os.path.exists('/var/run/yum.pid') \
or not FileUtilities.exe_exists('yum'):
self.assertRaises(RuntimeError, yum_clean)
else:
bytes_freed = yum_clean()
self.assertIsInteger(bytes_freed)
bleachbit.logger.debug('yum bytes cleaned %d', bytes_freed)
| gpl-3.0 | -6,454,970,789,628,798,000 | 42.495798 | 108 | 0.592156 | false | 3.601949 | true | false | false | 0.002705 |
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/click/exceptions.py | 20 | 7663 | from ._compat import PY2, filename_to_ui, get_text_stderr
from .utils import echo
def _join_param_hints(param_hint):
if isinstance(param_hint, (tuple, list)):
return ' / '.join('"%s"' % x for x in param_hint)
return param_hint
class ClickException(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception
exit_code = 1
def __init__(self, message):
ctor_msg = message
if PY2:
if ctor_msg is not None:
ctor_msg = ctor_msg.encode('utf-8')
Exception.__init__(self, ctor_msg)
self.message = message
def format_message(self):
return self.message
def __str__(self):
return self.message
if PY2:
__unicode__ = __str__
def __str__(self):
return self.message.encode('utf-8')
def show(self, file=None):
if file is None:
file = get_text_stderr()
echo('Error: %s' % self.format_message(), file=file)
class UsageError(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message, ctx=None):
ClickException.__init__(self, message)
self.ctx = ctx
self.cmd = self.ctx and self.ctx.command or None
def show(self, file=None):
if file is None:
file = get_text_stderr()
color = None
hint = ''
if (self.cmd is not None and
self.cmd.get_help_option(self.ctx) is not None):
hint = ('Try "%s %s" for help.\n'
% (self.ctx.command_path, self.ctx.help_option_names[0]))
if self.ctx is not None:
color = self.ctx.color
echo(self.ctx.get_usage() + '\n%s' % hint, file=file, color=color)
echo('Error: %s' % self.format_message(), file=file, color=color)
class BadParameter(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(self, message, ctx=None, param=None,
param_hint=None):
UsageError.__init__(self, message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx)
else:
return 'Invalid value: %s' % self.message
param_hint = _join_param_hints(param_hint)
return 'Invalid value for %s: %s' % (param_hint, self.message)
class MissingParameter(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(self, message=None, ctx=None, param=None,
param_hint=None, param_type=None):
BadParameter.__init__(self, message, ctx, param, param_hint)
self.param_type = param_type
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx)
else:
param_hint = None
param_hint = _join_param_hints(param_hint)
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
if self.param is not None:
msg_extra = self.param.type.get_missing_message(self.param)
if msg_extra:
if msg:
msg += '. ' + msg_extra
else:
msg = msg_extra
return 'Missing %s%s%s%s' % (
param_type,
param_hint and ' %s' % param_hint or '',
msg and '. ' or '.',
msg or '',
)
class NoSuchOption(UsageError):
"""Raised if click attempted to handle an option that does not
exist.
.. versionadded:: 4.0
"""
def __init__(self, option_name, message=None, possibilities=None,
ctx=None):
if message is None:
message = 'no such option: %s' % option_name
UsageError.__init__(self, message, ctx)
self.option_name = option_name
self.possibilities = possibilities
def format_message(self):
bits = [self.message]
if self.possibilities:
if len(self.possibilities) == 1:
bits.append('Did you mean %s?' % self.possibilities[0])
else:
possibilities = sorted(self.possibilities)
bits.append('(Possible options: %s)' % ', '.join(possibilities))
return ' '.join(bits)
class BadOptionUsage(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
:param option_name: the name of the option being used incorrectly.
"""
def __init__(self, option_name, message, ctx=None):
UsageError.__init__(self, message, ctx)
self.option_name = option_name
class BadArgumentUsage(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
def __init__(self, message, ctx=None):
UsageError.__init__(self, message, ctx)
class FileError(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename, hint=None):
ui_filename = filename_to_ui(filename)
if hint is None:
hint = 'unknown error'
ClickException.__init__(self, hint)
self.ui_filename = ui_filename
self.filename = filename
def format_message(self):
return 'Could not open file %s: %s' % (self.ui_filename, self.message)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
class Exit(RuntimeError):
"""An exception that indicates that the application should exit with some
status code.
:param code: the status code to exit with.
"""
def __init__(self, code=0):
self.exit_code = code
| apache-2.0 | 3,160,672,458,036,215,300 | 31.608511 | 80 | 0.588151 | false | 4.102248 | false | false | false | 0.00013 |
Almad/django-photologue | photologue/management/commands/__init__.py | 27 | 1436 | from photologue.models import PhotoSize
def get_response(msg, func=int, default=None):
while True:
resp = raw_input(msg)
if not resp and default is not None:
return default
try:
return func(resp)
except:
print 'Invalid input.'
def create_photosize(name, width=0, height=0, crop=False, pre_cache=False, increment_count=False):
try:
size = PhotoSize.objects.get(name=name)
exists = True
except PhotoSize.DoesNotExist:
size = PhotoSize(name=name)
exists = False
if exists:
msg = 'A "%s" photo size already exists. Do you want to replace it? (yes, no):' % name
if not get_response(msg, lambda inp: inp == 'yes', False):
return
print '\nWe will now define the "%s" photo size:\n' % size
w = get_response('Width (in pixels):', lambda inp: int(inp), width)
h = get_response('Height (in pixels):', lambda inp: int(inp), height)
c = get_response('Crop to fit? (yes, no):', lambda inp: inp == 'yes', crop)
p = get_response('Pre-cache? (yes, no):', lambda inp: inp == 'yes', pre_cache)
i = get_response('Increment count? (yes, no):', lambda inp: inp == 'yes', increment_count)
size.width = w
size.height = h
size.crop = c
size.pre_cache = p
size.increment_count = i
size.save()
print '\nA "%s" photo size has been created.\n' % name
return size
| bsd-3-clause | -8,432,573,853,895,816,000 | 37.810811 | 98 | 0.600975 | false | 3.528256 | false | false | false | 0.004875 |
richardnpaul/FWL-Website | lib/python2.7/site-packages/django/contrib/gis/geos/libgeos.py | 91 | 5833 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import c_char_p, Structure, CDLL, CFUNCTYPE, POINTER
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifiying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if not lib_path is None: break
# No GEOS library could be found.
if lib_path is None:
raise ImportError('Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names))
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
lgeos = CDLL(lib_path)
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except:
warn_msg = fmt
logger.warn('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
#### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure): pass
class GEOSPrepGeom_t(Structure): pass
class GEOSCoordSeq_t(Structure): pass
class GEOSContextHandle_t(Structure): pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility accross 32 and 64-bit platforms.
geos_version = lgeos.GEOSversion
geos_version.argtypes = None
geos_version.restype = c_char_p
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return dict((key, m.group(key)) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor'))
# Version numbers and whether or not prepared geometry support is available.
_verinfo = geos_version_info()
GEOS_MAJOR_VERSION = int(_verinfo['major'])
GEOS_MINOR_VERSION = int(_verinfo['minor'])
GEOS_SUBMINOR_VERSION = int(_verinfo['subminor'])
del _verinfo
GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION)
GEOS_PREPARE = GEOS_VERSION >= (3, 1, 0)
if GEOS_PREPARE:
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
lgeos.initGEOS_r.restype = CONTEXT_PTR
lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
else:
# When thread-safety isn't available, the initGEOS routine must be called
# first. This function takes the notice and error functions, defined
# as Python callbacks above, as parameters. Here is the C code that is
# wrapped:
# extern void GEOS_DLL initGEOS(GEOSMessageHandler notice_function, GEOSMessageHandler error_function);
lgeos.initGEOS(notice_h, error_h)
# Calling finishGEOS() upon exit of the interpreter.
import atexit
atexit.register(lgeos.finishGEOS)
| gpl-3.0 | -4,643,786,910,579,177,000 | 36.632258 | 108 | 0.700497 | false | 3.419109 | false | false | false | 0.0048 |
pepeportela/edx-platform | common/djangoapps/student/management/commands/change_enrollment.py | 44 | 5546 | """ Command line script to change user enrollments. """
import logging
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from optparse import make_option
from student.models import CourseEnrollment, User
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RollbackException(Exception):
"""
Exception raised explicitly to cause a database transaction rollback.
"""
pass
class Command(BaseCommand):
help = """
Changes the enrollment status for students that meet
the criteria specified by the parameters to this command.
Example:
Change enrollment for users joe, frank, and bill from audit to honor:
$ ... change_enrollment -u joe,frank,bill -c some/course/id --from audit --to honor
Or
$ ... change_enrollment -e "[email protected],[email protected],[email protected]" -c some/course/id --from audit --to honor
See what would have been changed from audit to honor without making that change
$ ... change_enrollment -u joe,frank,bill -c some/course/id --from audit --to honor -n
"""
option_list = BaseCommand.option_list + (
make_option('-f', '--from',
metavar='FROM_MODE',
dest='from_mode',
default=False,
help='move from this enrollment mode'),
make_option('-t', '--to',
metavar='TO_MODE',
dest='to_mode',
default=False,
help='move to this enrollment mode'),
make_option('-u', '--usernames',
metavar='USERNAME',
dest='username',
default=False,
help="Comma-separated list of usernames to move in the course"),
make_option('-e', '--emails',
metavar='EMAIL',
dest='email',
default=False,
help="Comma-separated list of email addresses to move in the course"),
make_option('-c', '--course',
metavar='COURSE_ID',
dest='course_id',
default=False,
help="course id to use for transfer"),
make_option('-n', '--noop',
action='store_true',
dest='noop',
default=False,
help="display what will be done but don't actually do anything")
)
def handle(self, *args, **options):
error_users = []
success_users = []
if not options['course_id']:
raise CommandError('You must specify a course id for this command')
if not options['from_mode'] or not options['to_mode']:
raise CommandError('You must specify a "to" and "from" mode as parameters')
try:
course_key = CourseKey.from_string(options['course_id'])
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(options['course_id'])
enrollment_args = dict(
course_id=course_key,
mode=options['from_mode']
)
if options['username']:
self.update_enrollments('username', enrollment_args, options, error_users, success_users)
if options['email']:
self.update_enrollments('email', enrollment_args, options, error_users, success_users)
self.report(error_users, success_users)
def update_enrollments(self, identifier, enrollment_args, options, error_users, success_users):
""" Update enrollments for a specific user identifier (email or username). """
users = options[identifier].split(",")
for identified_user in users:
logger.info(identified_user)
try:
user_args = {
identifier: identified_user
}
enrollment_args['user'] = User.objects.get(**user_args)
enrollments = CourseEnrollment.objects.filter(**enrollment_args)
with transaction.atomic():
for enrollment in enrollments:
enrollment.update_enrollment(mode=options['to_mode'])
enrollment.save()
if options['noop']:
raise RollbackException('Forced rollback.')
except RollbackException:
success_users.append(identified_user)
continue
except Exception as exception: # pylint: disable=broad-except
error_users.append((identified_user, exception))
continue
success_users.append(identified_user)
logger.info('Updated user [%s] to mode [%s]', identified_user, options['to_mode'])
def report(self, error_users, success_users):
""" Log and overview of the results of the command. """
total_users = len(success_users) + len(error_users)
logger.info('Successfully updated %i out of %i users', len(success_users), total_users)
if len(error_users) > 0:
logger.info('The following %i user(s) not saved:', len(error_users))
for user, error in error_users:
logger.info('user: [%s] reason: [%s] %s', user, type(error).__name__, error.message)
| agpl-3.0 | -8,445,688,142,294,113,000 | 37.248276 | 131 | 0.575189 | false | 4.680169 | false | false | false | 0.003246 |
decvalts/landlab | landlab/components/glacier_thin_ice_model/examples/glacier_example.py | 1 | 3113 |
import numpy as np
import scipy.io as io
from landlab.components.glacier_thin_ice_model.glacier import Glacier
from landlab import RasterModelGrid
import matplotlib.pyplot as plt
import matplotlib as mpl
def main():
'''
B: bed elevation
b_dot:
dx: node spacing (dx = dy)
nx: number of columns of nodes
ny: number of rows of nodes
t_STOP: number of years of simulation
dt: time step interval, in years
t: starting time of simulation, default, 0
'''
input_file = 'mb4_spin1.mat'
mat = io.loadmat(input_file)
B = mat['B']
b_dot = mat['b_dot']
dx = mat['dx'][0,0]
dy = mat['dy'][0,0]
nx = np.int_(mat['nx'][0,0])
ny = np.int_(mat['ny'][0,0])
t_STOP = 500 ### 1000
dt = 0.08333
t = 0
### put input data in a dictionary, and pass the dictionary as arguments
B,b_dot,S = flatten(B,b_dot)
dictionary = {'S':S,'B':B,'b_dot':b_dot,'dt':dt,'t_STOP':t_STOP,'t':t,'dx':dx,'nx':nx,'ny':ny}
grid = RasterModelGrid(nx,ny,dx)
gla = Glacier(grid,dictionary)
gla.recursive_steps()
### save outputs in ascill file
S_map = gla.grid['node']['ice_elevation'] ### ice surface elevation matrix
H_map = gla.grid['node']['ice_thickness'] ### ice thickness matrix
I_map = gla.grid['node']['I_map'] ### ice mask matrix
np.savetxt('S_map.txt',S_map)
np.savetxt('H_map.txt',H_map)
np.savetxt('I_map.txt',I_map)
### plot S_map
plt.figure(figsize=(8,6))
plt.imshow(S_map)
plt.colorbar()
plt.savefig('S_map_{0}yrs.pdf'.format(t_STOP),dpi=300)
### plot H_map
plt.figure(figsize=(8,6))
plt.imshow(H_map)
plt.colorbar()
plt.savefig('H_map_{0}yrs.pdf'.format(t_STOP),dpi=300)
### plot map of observed and simulated masks of ice
plot_mask('I_map.txt','obs_map.txt')
def flatten(B,b_dot):
### flatten two dimensional matrix
B = B.T.flatten()
B[np.isnan(B)] = 0
S = B
b_dot = b_dot.T.flatten()
return B,b_dot,S
def plot_mask(ifile_sim,ifile_obs):
'''
plot simulated and observed masks of ice
'''
# make presence of ice from simulated ice file as 1
# make presence of ice from observed ice file as 2
# make presence of ice in overlapping area as 3
dat_sim = np.genfromtxt(ifile_sim)
dat_obs = np.genfromtxt(ifile_obs)
dat_obs[np.where(dat_obs==1)] = 2
dat_add = dat_sim + dat_obs
plt.figure(figsize=(10,8))
# define the colormap
cmap = plt.cm.jet
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
# force the first color entry to be grey
cmaplist[0] = (.5,.5,.5,1.0)
# create the new map
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
# define the bins and normalize
bounds = np.linspace(0,4,5)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
img1 = plt.imshow(dat_sim,cmap=cmap,norm=norm) ### 1
img2 = plt.imshow(dat_obs,cmap=cmap,norm=norm) ### 2
img3 = plt.imshow(dat_add,cmap=cmap,norm=norm) ### 3
cbar = plt.colorbar(img3, ticks=[0.5,1.5, 2.5, 3.5], orientation='vertical')
cbar.ax.set_yticklabels(['No ice','Simulated Only', 'Observed Only', 'Overlapped'])# horizontal colorbar
cbar.ax.tick_params(labelsize=12)
plt.savefig('mask.pdf',dpi=300)
if __name__ == "__main__":
main()
| mit | 1,547,522,674,247,210,800 | 25.836207 | 105 | 0.661099 | false | 2.520648 | false | false | false | 0.054288 |
scottmcclary1/sp17-i524 | project/S17-IR-P008/code/Zeppelin/az.py | 19 | 14434 | # authors: Veera Marni and Naveenkumar Ramaraju
# License: MIT
# To deploy apache zeppelin using ansible via cmd
import time
from cloudmesh_client.common.Shell import Shell
from write2files import HostsWriter
from IPy import IP
import cmd
import os, sys
import getpass
import time
class VMS(cmd.Cmd):
def setup(self, cloud="chameleon", user='cc'):
self.cloud = 'cloud='+cloud
self.cloudUser = user
#print "Ignore Error: \n Please define a key first, e.g.: cm key add --ssh <keyname> \n " \
# "-- If key has been successfully added into the database and uploaded into the cloud \n" \
# "Ignore Error: \n problem uploading key veera to cloud chameleon: Key pair 'veera' already exists.\n " \
# "******************************************************************************************************"
# result = Shell.cm("reset")
# print result
# result = Shell.cm("key add --ssh")
# print result
# result = Shell.cm("key", "upload")
# print result
# result = Shell.cm("default", self.cloud)
# print result
# result = Shell.cm("refresh", "on")
#print result
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = '>> '
self.n = 1
self.floating_ip_list = []
self.static_ip_list = []
self.cloud = "cloud=chameleon"
self.cloudUser = 'cc'
self.assignIp = False
self.masterIp = None
self.userId = None
# self.setup()
def do_setCloud(self, cloud):
self.cloud = "cloud=" + cloud
#self.setup(cloud=self.cloud)
def do_startSpark(self):
print 'Starting Spark'
"ansible-playbook start_zeppelin.yml - i hosts"
if 'chameleon' in self.cloud:
deployment_logs = os.popen(
'ansible-playbook start_spark.yml -i hosts .log.txt').read()
else:
deployment_logs = os.popen(
'ansible-playbook start_spark_jetstream.yml -i hosts .log.txt').read()
def do_stopSpark(self):
print 'Stopping Spark'
if 'chameleon' in self.cloud:
deployment_logs = os.popen(
'ansible-playbook stop_spark.yml -i hosts .log.txt').read()
else:
deployment_logs = os.popen(
'ansible-playbook stop_spark_jetstream.yml -i hosts .log.txt').read()
def do_startZeppelin(self):
print 'Starting Zeppelin'
if 'chameleon' in self.cloud:
deployment_logs = os.popen(
'ansible-playbook start_zeppelin.yml -i hosts .log.txt').read()
else:
deployment_logs = os.popen(
'ansible-playbook start_zeppelin_jetstream.yml -i hosts .log.txt').read()
def do_stopZeppelin(self):
print 'Stopping Zeppelin'
if 'chameleon' in self.cloud:
deployment_logs = os.popen(
'ansible-playbook stop_zeppelin.yml -i hosts .log.txt').read()
else:
deployment_logs = os.popen(
'ansible-playbook stop_zeppelin_jetstream.yml -i hosts .log.txt').read()
def do_setCloudUser(self, cloudUser):
self.cloudUser = cloudUser
def do_setAssignFloatingIp(self, assignIp):
print 'floating ip is ' + str(assignIp)
self.assignIp = assignIp
def do_setUserId(self, userId):
self.userId = userId
print 'user id is set to '+ userId
def do_setMasterIp(self, masterIp):
self.masterIp = masterIp.strip()
print 'master ip is set to '+ self.masterIp
def do_boot(self, n):
self.floating_ip_list = []
self.static_ip_list = []
boot_start_time = time.time()
try:
for i in range(int(n)):
floating_ip = None
static_ip = None
print "Starting to boot Virtual Machine : ", i + 1
Shell.cm("vm", "boot --secgroup=naveen-def")
if self.assignIp:
fip_result = Shell.cm("vm", "ip assign") # floating IP
floating_ip = fip_result.split(' ')[-2][:-6]
try:
if self.assignIp:
IP(floating_ip)
# the below cmd is the "cm vm ip show" as ip is not getting updated automatically in the DB
Shell.cm("vm", "ip assign")
n = 0
while n < 5 and i >= len(self.static_ip_list):
sip_info = Shell.cm("vm", "list")
lines = sip_info.split('\n')
for lin in lines:
if self.userId in lin:
items = lin.split('|')
static_ip = items[5].strip()
if '.' in static_ip and static_ip not in self.static_ip_list and static_ip != self.masterIp:
self.static_ip_list.append(static_ip)
break
print 'Sleeping for ' + str(20 * (n+1)) + ' seconds as ip not assigned'
time.sleep(20 * (n + 1))
n += 1
if n > 4:
raise Exception('Unable to assign ips')
except Exception as e:
print e
print "floating IP error encountered"
print "Stopping to create further VMs"
break
self.floating_ip_list.append(floating_ip)
except ValueError:
self.help_boot()
if len(self.floating_ip_list) == 0 and self.assignIp:
print "No VMs created"
else:
print "Returning IPs of VMs created"
print "Floating IPs list :", self.floating_ip_list
print "Static IPs list :", self.static_ip_list
print "wirting IPs to respective files ..."
print 'VM user :', self.assignIp
HW = HostsWriter()
print self.static_ip_list
HW.writeIPs(staticIPs=self.static_ip_list, floatingIPs=self.floating_ip_list,
ansible_ssh_user=self.cloudUser, floating_ip=self.assignIp, masterIp=self.masterIp)
# starting ansible
if os.path.exists(os.environ['HOME'] + '/.ssh/known_hosts'):
os.remove(os.environ['HOME'] + '/.ssh/known_hosts')
boot_time = boot_start_time - time.time()
print 'Time taken to boot:' + str(boot_time)
print "Commencing deployment for zepplin"
# taking password
password = getpass.getpass("Enter ansible valut password: ")
print "Running the ansible-playbook for zepplin"
deployment_start_time = time.time()
tempPassFile = open('.log.txt', 'w')
tempPassFile.write(password)
tempPassFile.close()
startTime = time.time()
if 'chameleon' in self.cloud:
deployment_logs = os.popen(
'ansible-playbook Zeppelin.yml -i hosts --vault-password-file .log.txt').read()
else:
deployment_logs = os.popen(
'ansible-playbook Zeppelin_jetstream.yml -i hosts --vault-password-file .log.txt').read()
os.remove('.log.txt')
endTime = time.time()
totalDeployTime = endTime - startTime
print "Time taken to depoly ", n, " virtual machines for zeppelin is ", totalDeployTime
# writing logs
tempDepLog = open('deployment_logs', 'w')
tempDepLog.write(deployment_logs)
tempDepLog.close()
# checking logs
deployment_logs_lines = deployment_logs.splitlines()
wordList = []
for line in deployment_logs_lines:
words = line.split(' ')
for word in words:
wordList.append(word)
if "fatal" in wordList or '"Decryption' in wordList or "failed" in wordList or 'fatal:' in wordList:
print "Check deployment logs for errors during deployment"
else:
print "Deployment Successful"
print "Time took for deployment is:" + str(time.time() - deployment_start_time)
def do_delete(self, names):
names = str(names).split(' ')
for name in names:
delete_machine = "delete " + name
print delete_machine
result = Shell.cm("vm", delete_machine)
print result
def do_quit(self, arg):
sys.exit(1)
def do_getFloatingIPs(self):
print "Floating IPs of all Machines", self.floating_ip_list
def do_getStaticIPs(self):
print "Static IPs of all Machines", self.static_ip_list
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
In that case we execute the line as Python code.
"""
self.stdout.write('*** Unknown syntax: %s\n' % line)
# ---------Documentation-----------------
def help_boot(self):
print "syntax: boot [count]\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " boot [n] boots 3 vms one after the other"
def help_quit(self):
print "syntax: quit or q\n",
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " quit terminates the application"
print " q terminates the application"
def help_getFloatingIPs(self):
print "syntax: getFloatingIPs()\n",
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " getFloatingIPs() returns the Floating IPs of all machines"
def help_getStaticIPs(self):
print "syntax: getStaticIPs()\n",
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " getStaticIPs() returns the Static IPs of all machines"
def help_delete(self):
print "syntax: delete [names]\n",
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " delete v-001 deletes machine v-001"
print " delete v-002 deletes machine v-001"
print " delete v* deletes all machines starting with v"
def help_setCloud(self):
print "internal method"
def help_setCloudUser(self):
print "syntax: setCloudUser [user name]\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " setCloudUser ubuntu sets user name as 'ubuntu' to connect to vms"
def help_setAssignFloatingIp(self):
print "syntax: setAssignFloatingIp [True/False]\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " setAssignFloatingIp False sets whether floating ip need to be created"
def help_setMasterIp(self):
print "syntax: setMasterIp [xxx.xxx.xxx.xxx]\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " setMasterIp 123.123.123.123 sets master ip as 123.123.123.123 to start zeppelin and spark"
def help_setUserId(self):
print "syntax: setUserId [user id]\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " setUserId yourUserName sets user name as your user name"
def help_startSpark(self):
print "syntax: startSpark\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " startSpark start spark master and slaves"
def help_stopSpark(self):
print "syntax: stopSpark\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " stopSpark stop spark master and slaves"
def help_startZeppelin(self):
print "syntax: startZeppelin\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " startZeppelin start zeppelin on master"
def help_stopZeppelin(self):
print "syntax: stopZeppelin\n"
print "usage: "
print " | command | description "
print " ------------------------------------------------------------------"
print " startZeppelin stop zeppelin on master"
# ---------shortcuts----------------------
do_q = do_quit
do_exit = do_quit
help_q = help_quit
help_exit = help_quit
if __name__ == "__main__":
vms = VMS()
vms.cmdloop()
| apache-2.0 | 8,716,251,214,280,924,000 | 38.545205 | 124 | 0.47402 | false | 4.45219 | false | false | false | 0.004295 |
mohazahran/Detecting-anomalies-in-user-trajectories | tribeflow/dynamic.py | 1 | 14146 | #-*- coding: utf8
from __future__ import division, print_function
from mpi4py import MPI
from tribeflow import dataio
from tribeflow.mycollections.stamp_lists import StampLists
from tribeflow.learn import prepare_results
from tribeflow.plearn import dispatch_jobs
from tribeflow.plearn import fetch_results
from tribeflow.plearn import generate_workload
from tribeflow.plearn import manage
from tribeflow.plearn import Msg
import _learn
import _eval
import numpy as np
def finalize_splits(nz, n_splits, splitted, Dts, Trace, nh, ns, kernel):
new_nz = nz + n_splits
if kernel.get_priors().shape[0] > 0:
new_P = [row for row in kernel.get_state()]
for _ in xrange(n_splits):
new_P.append(kernel.get_priors())
else:
new_P = kernel.get_state()
Trace[:, -1] = splitted
#Populate new counts
Count_zh_new = np.zeros(shape=(new_nz, nh), dtype='i4')
Count_sz_new = np.zeros(shape=(ns, new_nz), dtype='i4')
count_z_new = np.zeros(new_nz, dtype='i4')
count_h_new = np.zeros(nh, dtype='i4')
_learn.fast_populate(Trace, Count_zh_new, Count_sz_new, \
count_h_new, count_z_new)
new_stamps = StampLists(new_nz)
for z in xrange(new_nz):
idx = Trace[:, -1] == z
topic_stamps = Dts[idx]
new_stamps._extend(z, topic_stamps[:, -1])
return Trace, Count_zh_new, Count_sz_new, \
count_z_new, new_stamps, np.array(new_P)
def split(Dts, Trace, previous_stamps, Count_zh, Count_sz, \
count_h, count_z, alpha_zh, beta_zs, ll_per_z, kernel, \
perc=0.05, min_stamps=50):
nz = Count_zh.shape[0]
nh = Count_zh.shape[1]
ns = Count_sz.shape[0]
assert nz == ll_per_z.shape[0]
idx_int_all = np.arange(Trace.shape[0], dtype='i4')
#Initiate auxiliary matrices
Count_zh_spl = np.zeros(shape=(nz + 1, nh), dtype='i4')
Count_sz_spl = np.zeros(shape=(ns, nz + 1), dtype='i4')
count_z_spl = np.zeros(nz + 1, dtype='i4')
Count_zh_spl[:-1, :] = Count_zh
Count_sz_spl[:, :-1] = Count_sz
count_z_spl[:-1] = count_z
ll_per_z_new = np.zeros(nz + 1, dtype='f8')
ll_per_z_new[:-1] = ll_per_z
new_stamps = StampLists(nz + 1)
for z in xrange(nz):
new_stamps._extend(z, previous_stamps._get_all(z))
splitted = Trace[:, -1].copy()
shift = 0
#Do the splits per topic
for z in xrange(nz):
#Candidates for removal
topic_stamps = np.asanyarray(previous_stamps._get_all(z))
idx = Trace[:, -1] == z
assert topic_stamps.shape[0] == idx.sum()
argsrt = topic_stamps.argsort()
top = int(np.ceil(perc * topic_stamps.shape[0]))
#If not at least min stamps, exit, not enough for a CCDF estimation
if top < min_stamps:
continue
#Populate stamps
new_stamps._clear_one(z)
new_stamps._clear_one(nz)
new_stamps._extend(z, topic_stamps[:-top])
new_stamps._extend(nz, topic_stamps[-top:])
#Split topic on the Trace. The trace has to be sorted by timestamp!!
old_assign = Trace[:, -1][idx].copy()
new_assign = Trace[:, -1][idx].copy()
new_assign[-top:] = nz
Trace[:, -1][idx] = new_assign
#Update matrices. Can't really vectorize this :(
for line in Trace[idx][-top:]:
h = line[0]
Count_zh_spl[z, h] -= 1
for o in line[1:-1]:
Count_sz_spl[o, z] -= 1
count_z_spl[z] -= 1
Count_zh_spl[nz, h] += 1
for o in line[1:-1]:
Count_sz_spl[o, nz] += 1
count_z_spl[nz] += 1
#New LL
ll_per_z_new[z] = 0
ll_per_z_new[-1] = 0
idx_int = idx_int_all[idx]
_eval.quality_estimate(Dts, Trace, \
new_stamps, Count_zh_spl, Count_sz_spl, count_h, \
count_z_spl, alpha_zh, beta_zs, \
ll_per_z_new, idx_int, kernel)
if ll_per_z_new.sum() > ll_per_z.sum():
new_assign[-top:] = nz + shift
splitted[idx] = new_assign
shift += 1
#Revert trace
new_stamps._clear_one(z)
new_stamps._clear_one(nz)
new_stamps._extend(z, previous_stamps._get_all(z))
Count_zh_spl[:-1, :] = Count_zh
Count_sz_spl[:, :-1] = Count_sz
count_z_spl[:-1] = count_z
Count_zh_spl[-1, :] = 0
Count_sz_spl[:, -1] = 0
count_z_spl[-1] = 0
ll_per_z_new[z] = ll_per_z[z]
ll_per_z_new[-1] = 0
Trace[:, -1][idx] = old_assign
return finalize_splits(nz, shift, splitted, Dts, Trace, nh, ns, kernel)
def correlate_counts(Count_zh, Count_sz, count_h, count_z, \
alpha_zh, beta_zs):
#Create Probabilities
Theta_zh = np.zeros_like(Count_zh, dtype='f8')
Psi_sz = np.zeros_like(Count_sz, dtype='f8')
_learn._aggregate(Count_zh, Count_sz, count_h, count_z, \
alpha_zh, beta_zs, Theta_zh, Psi_sz)
Theta_hz = Theta_zh.T * count_z
Theta_hz = Theta_hz / Theta_hz.sum(axis=0)
Psi_sz = Psi_sz / Psi_sz.sum(axis=0)
#Similarity between every probability
C = np.cov(Theta_hz.T) + np.cov(Psi_sz.T)
C /= 2
#Remove lower diag (symmetric)
C = np.triu(C, 1)
return C
def finalize_merge(nz, to_merge, Dts, Trace, nh, ns, kernel):
for z1, z2 in to_merge:
idx = Trace[:, -1] == z2
Trace[:, -1][idx] = z1
if kernel.get_priors().shape[0] > 0:
new_P_dict = dict((i, row) for i, row in enumerate(kernel.get_state()))
for z1, z2 in to_merge:
del new_P_dict[z2]
new_P = []
for i in sorted(new_P_dict):
new_P.append(new_P_dict[i])
else:
new_P = kernel.get_state()
#Make sure new trace has contiguous ids
new_assign = Trace[:, -1].copy()
old_assign = Trace[:, -1].copy()
new_nz = len(set(new_assign))
for i, z in enumerate(set(new_assign)):
idx = old_assign == z
new_assign[idx] = i
Trace[:, -1] = new_assign
#Populate new counts
Count_zh_new = np.zeros(shape=(new_nz, nh), dtype='i4')
Count_sz_new = np.zeros(shape=(ns, new_nz), dtype='i4')
count_z_new = np.zeros(new_nz, dtype='i4')
count_h_new = np.zeros(nh, dtype='i4')
_learn.fast_populate(Trace, Count_zh_new, Count_sz_new, \
count_h_new, count_z_new)
new_stamps = StampLists(new_nz)
for z in xrange(new_nz):
idx = Trace[:, -1] == z
topic_stamps = Dts[idx]
new_stamps._extend(z, topic_stamps[:, -1])
return Trace, Count_zh_new, Count_sz_new, \
count_z_new, new_stamps, np.array(new_P)
def merge(Dts, Trace, previous_stamps, Count_zh, Count_sz, \
count_h, count_z, alpha_zh, beta_zs, ll_per_z, kernel):
nz = Count_zh.shape[0]
nh = Count_zh.shape[1]
ns = Count_sz.shape[0]
idx_int_all = np.arange(Trace.shape[0], dtype='i4')
#Get the nz most similar
C = correlate_counts(Count_zh, Count_sz, count_h, count_z, \
alpha_zh, beta_zs)
#k = int(np.ceil(np.sqrt(nz)))
idx_dim1, idx_dim2 = \
np.unravel_index(C.flatten().argsort()[-nz:][::-1], C.shape)
top_sims = zip(idx_dim1, idx_dim2)
#New info
new_stamps = previous_stamps.copy()
Count_zh_mrg = Count_zh.copy()
Count_sz_mrg = Count_sz.copy()
count_z_mrg = count_z.copy()
#Test merges
merged = set()
accepted = set()
for z1, z2 in top_sims:
if z1 in merged or z2 in merged:
continue
if C[z1, z2] <= 0: #already at nonsimilar
break
Count_zh_mrg[:] = Count_zh
Count_sz_mrg[:] = Count_sz
count_z_mrg[:] = count_z
#Merge z1 and z2
Count_zh_mrg[z1] += Count_zh[z2]
Count_sz_mrg[:, z1] += Count_sz[:, z2]
count_z_mrg[z1] += count_z[z2]
#Remove z2
Count_zh_mrg[z2] = 0
Count_sz_mrg[:, z2] = 0
count_z_mrg[z2] = 0
idx = Trace[:, -1] == z2
Trace[:, -1][idx] = z1
#get stamps for llhood
idx_int = idx_int_all[idx]
new_stamps._extend(z1, previous_stamps._get_all(z2))
new_stamps._clear_one(z2)
#New likelihood
ll_per_z_new = ll_per_z.copy()
ll_per_z_new[z2] = 0
_eval.quality_estimate(Dts, Trace, \
new_stamps, Count_zh_mrg, Count_sz_mrg, count_h, \
count_z_mrg, alpha_zh, beta_zs, \
ll_per_z_new, idx_int, kernel)
if ll_per_z_new.sum() > ll_per_z.sum():
merged.add(z1)
merged.add(z2)
accepted.add((z1, z2))
#Revert trace
Trace[:, -1][idx] = z2
new_stamps._clear_one(z1)
new_stamps._clear_one(z2)
new_stamps._extend(z1, previous_stamps._get_all(z1))
new_stamps._extend(z2, previous_stamps._get_all(z2))
return finalize_merge(nz, accepted, Dts, Trace, nh, ns, kernel)
def fit(trace_fpath, num_topics, alpha_zh, beta_zs, kernel, \
residency_priors, num_iter, num_batches, mpi_mode, from_=0, to=np.inf):
'''
Learns the latent topics from a temporal hypergraph trace. Here we do a
asynchronous learning of the topics similar to AD-LDA, as well as the
dynamic topic expansion/pruing.
Parameters
----------
trace_fpath : str
The path of the trace. Each line should be a \
(timestamp, hypernode, source, destination) where the \
timestamp is a long (seconds or milliseconds from epoch).
num_topics : int
The number of latent spaces to learn
alpha_zh : float
The value of the alpha_zh hyperparameter
beta_zs : float
The value of the beta_zs (beta) hyperaparameter
kernel : Kernel object
The kernel to use
residency_priors : array of float
The kernel hyper parameters
num_iter : int
The number of iterations to learn the model from
num_batches : int
Defines the number of batches of size num_iter
Returns
-------
A dictionary with the results.
'''
assert num_batches >= 2
comm = MPI.COMM_WORLD
num_workers = comm.size - 1
Dts, Trace, previous_stamps, Count_zh, Count_sz, \
count_h, count_z, prob_topics_aux, Theta_zh, Psi_sz, \
hyper2id, source2id = \
dataio.initialize_trace(trace_fpath, num_topics, num_iter, \
from_, to)
if mpi_mode:
workloads = generate_workload(Count_zh.shape[1], num_workers, Trace)
all_idx = np.arange(Trace.shape[0], dtype='i4')
for batch in xrange(num_batches):
print('Now at batch', batch)
if mpi_mode:
for worker_id in xrange(1, num_workers + 1):
comm.send(num_iter, dest=worker_id, tag=Msg.LEARN.value)
dispatch_jobs(Dts, Trace, Count_zh, Count_sz, \
count_h, count_z, alpha_zh, beta_zs, kernel, \
residency_priors, workloads, num_workers, comm)
manage(comm, num_workers)
fetch_results(comm, num_workers, workloads, Dts, Trace, \
previous_stamps, Count_zh, Count_sz, count_h, \
count_z, alpha_zh, beta_zs, Theta_zh, Psi_sz, \
kernel)
else:
prob_topics_aux = np.zeros(Count_zh.shape[0], dtype='f8')
_learn.em(Dts, Trace, previous_stamps, Count_zh, Count_sz, \
count_h, count_z, alpha_zh, beta_zs, \
prob_topics_aux, Theta_zh, Psi_sz, num_iter, \
num_iter * 2, kernel, False)
print('Split')
ll_per_z = np.zeros(count_z.shape[0], dtype='f8')
_eval.quality_estimate(Dts, Trace, previous_stamps, \
Count_zh, Count_sz, count_h, count_z, alpha_zh, \
beta_zs, ll_per_z, all_idx, kernel)
Trace, Count_zh, Count_sz, count_z, previous_stamps, \
P = split(Dts, Trace, previous_stamps, Count_zh, \
Count_sz, count_h, count_z, alpha_zh, beta_zs, \
ll_per_z, kernel)
kernel = kernel.__class__()
kernel.build(Trace.shape[0], Count_zh.shape[0], residency_priors)
if residency_priors.shape[0] > 0:
kernel.update_state(P)
print('Merge')
ll_per_z = np.zeros(count_z.shape[0], dtype='f8')
_eval.quality_estimate(Dts, Trace, previous_stamps, \
Count_zh, Count_sz, count_h, count_z, alpha_zh, \
beta_zs, ll_per_z, all_idx, kernel)
Trace, Count_zh, Count_sz, count_z, previous_stamps, \
P = merge(Dts, Trace, previous_stamps, Count_zh, \
Count_sz, count_h, count_z, alpha_zh, beta_zs, \
ll_per_z, kernel)
kernel = kernel.__class__()
kernel.build(Trace.shape[0], Count_zh.shape[0], residency_priors)
if residency_priors.shape[0] > 0:
kernel.update_state(P)
Theta_zh = np.zeros(shape=Count_zh.shape, dtype='f8')
Psi_sz = np.zeros(shape=Count_sz.shape, dtype='f8')
if batch == num_batches - 1:
print('Computing probs')
_learn._aggregate(Count_zh, Count_sz, count_h, count_z, \
alpha_zh, beta_zs, Theta_zh, Psi_sz)
print('New nz', Count_zh.shape[0])
if mpi_mode:
for worker_id in xrange(1, num_workers + 1):
comm.send(num_iter, dest=worker_id, tag=Msg.STOP.value)
rv = prepare_results(trace_fpath, num_topics, alpha_zh, beta_zs, \
kernel, residency_priors, num_iter, -1, Dts, Trace, \
Count_zh, Count_sz, count_h, count_z, prob_topics_aux, Theta_zh, \
Psi_sz, hyper2id, source2id, from_, to)
rv['num_workers'] = np.asarray([num_workers])
rv['num_batches'] = np.asarray([num_batches])
rv['algorithm'] = np.asarray(['parallel dynamic'])
return rv
| bsd-3-clause | 787,024,947,106,067,100 | 32.521327 | 79 | 0.551393 | false | 3.072546 | false | false | false | 0.012159 |
denismakogon/pyvcloud | pyvcloud/vcloudair.py | 1 | 77761 | # VMware vCloud Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
#todo: upload/download ovf to/from catalog
#todo: create vapp network name is not being used, clean it up
#todo: pass parameters in the create vapp to optimize for speed, available from 6.3
#todo: refactor returns, raise exceptions, document with release notes
import sys
import os
import time
import requests
from progressbar import AnimatedMarker, Bar, BouncingBar, Counter, ETA, \
FileTransferSpeed, FormatLabel, Percentage, \
ProgressBar, ReverseBar, RotatingMarker, \
SimpleProgress, Timer
from StringIO import StringIO
import json
from xml.etree import ElementTree as ET
from pyvcloud.schema.vcd.v1_5.schemas.admin import vCloudEntities
from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import AdminCatalogType
from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
from schema.vcd.v1_5.schemas.vcloud.diskType import OwnerType, DiskType, VdcStorageProfileType, DiskCreateParamsType
from pyvcloud.schema.vcd.schemas.versioning import versionsType
from pyvcloud.vcloudsession import VCS
from pyvcloud.vapp import VAPP
from pyvcloud.gateway import Gateway
from pyvcloud.schema.vcim import serviceType, vchsType
from pyvcloud.helper import CommonUtils
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import OrgVdcNetworkType,\
ReferenceType, NetworkConfigurationType, IpScopesType, IpScopeType,\
IpRangesType, IpRangeType, DhcpPoolServiceType
from pyvcloud.score import Score
from pyvcloud import _get_logger, Http, Log
class VCA(object):
VCA_SERVICE_TYPE_STANDALONE = 'standalone'
VCA_SERVICE_TYPE_VCHS = 'vchs'
VCA_SERVICE_TYPE_VCA = 'vca'
VCA_SERVICE_TYPE_UNKNOWN = 'unknown'
statuses = ['Could not be created',
'Unresolved',
'Resolved',
'Deployed',
'Suspended',
'Powered on',
'Waiting for user input',
'Unknown state',
'Unrecognized state',
'Powered off',
'Inconsistent state',
'Children do not all have the same status',
'Upload initiated, OVF descriptor pending',
'Upload initiated, copying contents',
'Upload initiated , disk contents pending',
'Upload has been quarantined',
'Upload quarantine period has expired'
]
def __init__(self, host, username, service_type=VCA_SERVICE_TYPE_VCA, version='5.7', verify=True, log=False):
"""
Create a VCA connection
:param host: (str): The vCloud Air Host. Varies by service type.
Valid values are https://vchs.vmware.com and https://vca.vmware.com
:param username: (str): The username for the vCloud Air Service.
:param service_type: (str, optional): The type of vCloud Air Service. Valid values are ondemand, subscription, vcd.
:param version: (str, optional): The API version. Note: may vary by service type.
:verify: (bool, optional): Enforce strict ssl certificate checking.
:log: (bool, optional): enable logging for the connection.
:return: (bool): True if the user was successfully logged in, False otherwise.
**service type:** subscription, ondemand, vcd
"""
if not (host.startswith('https://') or host.startswith('http://')):
host = 'https://' + host
self.host = host
self.username = username
self.token = None
self.service_type = service_type
self.version = version
self.verify = verify
self.vcloud_session = None
self.instances = None
self.org = None
self.organization = None
self.vdc = None
self.services = None
self.response = None
self.log = log
self.logger = _get_logger() if log else None
def get_service_type(self):
"""
Returns the service type provided by the host (standalone, vchs or vca).
This method only uses the host variable, it doesn't require the
user to login.
:return: (str): The type of service provided by the host.
**service type:** standalone, vchs, vca
"""
url = self.host + '/api/iam/login'
headers = {}
headers["Accept"] = "application/json;version=" + '5.7'
response = Http.post(url, headers=headers, auth=('_', '_'), verify=self.verify, logger=self.logger)
if response.status_code == requests.codes.unauthorized:
return VCA.VCA_SERVICE_TYPE_VCA
url = self.host + '/api/vchs/sessions'
headers = {}
headers["Accept"] = "application/xml;version=" + '5.6'
response = Http.post(url, headers=headers, auth=('_', '_'), verify=self.verify, logger=self.logger)
if response.status_code == requests.codes.unauthorized:
return VCA.VCA_SERVICE_TYPE_VCHS
url = self.host + '/api/versions'
response = Http.get(url, verify=self.verify, logger=self.logger)
if response.status_code == requests.codes.ok:
try:
supported_versions = versionsType.parseString(response.content, True)
return VCA.VCA_SERVICE_TYPE_STANDALONE
except:
pass
return VCA.VCA_SERVICE_TYPE_UNKNOWN
def _get_services(self):
headers = {}
headers["x-vchs-authorization"] = self.token
headers["Accept"] = "application/xml;version=" + self.version
response = Http.get(self.host + "/api/vchs/services", headers=headers, verify=self.verify, logger=self.logger)
if response.status_code == requests.codes.ok:
return serviceType.parseString(response.content, True)
def login(self, password=None, token=None, org=None, org_url=None):
"""
Request to login to vCloud Air
:param password: (str, optional): The password.
:param token: (str, optional): The token from a previous successful login, None if this is a new login request.
:param org: (str, optional): The organization identifier.
:param org_url: (str, optional): The org_url.
:return: (bool): True if the user was successfully logged in, False otherwise.
**service type:** vca, vchs, standalone
"""
if self.service_type in [VCA.VCA_SERVICE_TYPE_VCHS, 'subscription']:
if token:
headers = {}
headers["x-vchs-authorization"] = token
headers["Accept"] = "application/xml;version=" + self.version
self.response = Http.get(self.host + "/api/vchs/services", headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
self.services = serviceType.parseString(self.response.content, True)
self.token = token
return True
else:
return False
else:
url = self.host + "/api/vchs/sessions"
headers = {}
headers["Accept"] = "application/xml;version=" + self.version
self.response = Http.post(url, headers=headers, auth=(self.username, password), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.created:
self.token = self.response.headers["x-vchs-authorization"]
self.services = self._get_services()
return True
else:
return False
elif self.service_type in [VCA.VCA_SERVICE_TYPE_VCA, 'ondemand']:
if token:
self.token = token
self.instances = self.get_instances()
return self.instances != None
else:
url = self.host + "/api/iam/login"
headers = {}
headers["Accept"] = "application/json;version=%s" % self.version
self.response = Http.post(url, headers=headers, auth=(self.username, password), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.created:
self.token = self.response.headers["vchs-authorization"]
self.instances = self.get_instances()
return True
else:
return False
elif self.service_type in [VCA.VCA_SERVICE_TYPE_STANDALONE, 'vcd']:
if token:
url = self.host + '/api/sessions'
vcloud_session = VCS(url, self.username, org, None, org_url, org_url, version=self.version, verify=self.verify, log=self.log)
result = vcloud_session.login(token=token)
if result:
self.org = org
self.vcloud_session = vcloud_session
return result
else:
url = self.host + '/api/sessions'
vcloud_session = VCS(url, self.username, org, None, org_url, org_url, version=self.version, verify=self.verify, log=self.log)
result = vcloud_session.login(password=password)
if result:
self.token = vcloud_session.token
self.org = org
self.vcloud_session = vcloud_session
return result
else:
return False
return False
def get_service_groups(self):
"""
Request available service groups for a given company.
:return: (list of str): list of available service groups.
**service type:** vca
"""
headers = self._get_vcloud_headers()
headers['Accept'] = "application/json;version=%s;class=com.vmware.vchs.billing.serviceGroups" % self.version
self.response = Http.get(self.host + "/api/billing/service-groups", headers=headers, verify=self.verify, logger=self.logger)
if self.response.history and self.response.history[-1]:
self.response = Http.get(self.response.history[-1].headers['location'], headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return json.loads(self.response.content)['serviceGroupList']
else:
raise Exception(self.response.status_code)
def get_plans(self):
"""
Request plans available for an ondemand account.
:return: (list of str): list of available plans in json format.
**service type:** vca
"""
headers = self._get_vcloud_headers()
headers['Accept'] = "application/json;version=%s;class=com.vmware.vchs.sc.restapi.model.planlisttype" % self.version
self.response = Http.get(self.host + "/api/sc/plans", headers=headers, verify=self.verify, logger=self.logger)
if self.response.history and self.response.history[-1]:
self.response = Http.get(self.response.history[-1].headers['location'], headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return json.loads(self.response.content)['plans']
else:
raise Exception(self.response.status_code)
def get_plan(self, plan_id):
"""
Request plan details.
:return: (str): plan in json format
**service type:** vca
"""
headers = self._get_vcloud_headers()
headers['Accept'] = "application/json;version=%s;class=com.vmware.vchs.sc.restapi.model.planlisttype" % self.version
self.response = Http.get(self.host + "/api/sc/plans/%s" % plan_id, headers=headers,
verify=self.verify, logger=self.logger)
if self.response.history and self.response.history[-1]:
self.response = Http.get(self.response.history[-1].headers['location'], headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return json.loads(self.response.content)
else:
raise Exception(self.response.status_code)
def get_users(self):
"""
Retrieves a collection of all users the authenticated API user has access to.
:return: (list of str): list of users.
**service type:** vca
"""
headers = self._get_vcloud_headers()
headers['Accept'] = "application/json;version=%s;class=com.vmware.vchs.iam.api.schema.v2.classes.user.Users" % self.version
self.response = Http.get(self.host + "/api/iam/Users", headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return json.loads(self.response.content)['users']
else:
raise Exception(self.response.status_code)
def add_user(self, email, given_name, family_name, roles):
"""
Add user.
:return: .
**service type:** vca
"""
data = """
{
"schemas": [
"urn:scim:schemas:core:1.0"
],
"state": "Active",
"email": "%s",
"familyName": "%s",
"givenName": "%s",
"roles": {
"roles": [
""" % (email, family_name, given_name)
first_role = True
for role in roles:
if first_role:
first_role = False
else:
data += ','
data += """
{
"name": "%s"
}
""" % role.strip()
data += """
]
},
"userName": "%s"
}
""" % email
headers = self._get_vcloud_headers()
headers['Accept'] = "application/json;version=%s;class=com.vmware.vchs.iam.api.schema.v2.classes.user.Users" % self.version
headers['Content-Type'] = "application/json;class=com.vmware.vchs.iam.api.schema.v2.classes.user.User;version=%s" % self.version
self.response = Http.post(self.host + "/api/iam/Users", headers=headers, data=data, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.created:
return json.loads(self.response.content)
else:
raise Exception(self.response.status_code)
def del_user(self, user_id):
"""
Delete user.
:return: .
**service type:** vca
"""
headers = self._get_vcloud_headers()
headers['Accept'] = "application/json"
self.response = Http.delete(self.host + "/api/iam/Users/" + user_id, headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.no_content:
return True
else:
Log.error(self.logger, self.response.status_code)
Log.error(self.logger, self.response.content)
raise Exception(self.response.status_code)
def change_password(self, current_password, new_password):
"""
Change current user password.
:return: .
**service type:** vca
"""
data = """
{"currentPassword":"%s","newPassword":"%s"}
""" % (current_password, new_password)
headers = self._get_vcloud_headers()
headers['Accept'] = "application/json;version=%s;class=com.vmware.vchs.iam.api.schema.v2.classes.user.Password" % self.version
headers['Content-Type'] = "application/json;class=com.vmware.vchs.iam.api.schema.v2.classes.user.Password;version=%s" % self.version
self.response = Http.put(self.host + "/api/iam/Users/password", headers=headers, data=data, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.no_content:
return True
else:
raise Exception(self.response.status_code)
def validate_user(self, email, new_password, token):
"""
Validate user and set the initial password.
:return: .
**service type:** vca
"""
headers = {}
headers['Accept'] = "application/json;version=%s" % self.version
headers['Content-Type'] = "application/json"
self.response = Http.post(self.host + "/api/iam/access/%s" % token, headers=headers, auth=(email, new_password), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return True
else:
raise Exception(self.response.status_code)
def reset_password(self, user_id):
"""
Reset user password.
:return: .
**service type:** vca
"""
headers = self._get_vcloud_headers()
headers['Content-Type'] = "application/json;version=%s" % self.version
self.response = Http.put(self.host +
"/api/iam/Users/%s/password/reset" %
user_id, headers=headers,
verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.no_content:
return True
else:
Log.error(self.logger, self.response.status_code)
Log.error(self.logger, self.response.content)
raise Exception(self.response.status_code)
def get_roles(self):
"""
Get role.
:return: .
**service type:** vca
"""
headers = self._get_vcloud_headers()
headers['Accept'] = "application/json;version=%s;class=com.vmware.vchs.iam.api.schema.v2.classes.user.Roles" % self.version
self.response = Http.get(self.host + "/api/iam/Roles", headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return json.loads(self.response.content)['roles']
else:
raise Exception(self.response.status_code)
def get_instances(self):
"""
Request available instances
:return: (list of str): list of available instances in json format.
**service type:** vca
"""
self.response = Http.get(self.host + "/api/sc/instances", headers=self._get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.history and self.response.history[-1]:
self.response = Http.get(self.response.history[-1].headers['location'], headers=self._get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return json.loads(self.response.content)['instances']
else:
raise Exception(self.response.status_code)
def get_instance(self, instance_id):
"""
Returns the details of a service instance
:return: (str): instance information in json format.
**service type:** vca
"""
self.response = Http.get(self.host + "/api/sc/instances/%s" %
instance_id,
headers=self._get_vcloud_headers(),
verify=self.verify, logger=self.logger)
if self.response.history and self.response.history[-1]:
self.response = Http.get(self.response.history[-1].headers['location'], headers=self._get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return json.loads(self.response.content)
else:
raise Exception(self.response.status_code)
def delete_instance(self, instance):
"""
Request to delete an existing instance
:param instance: (str): The instance identifer.
:return: (): True if the user was successfully logged in, False otherwise.
**service type:** vca
"""
self.response = Http.delete(self.host + "/api/sc/instances/" + instance, headers=self._get_vcloud_headers(), verify=self.verify, logger=self.logger)
print self.response.status_code, self.response.content
def login_to_instance(self, instance, password, token=None, org_url=None):
"""
Request to login into a specific instance
:param instance: (str): The instance identifer.
:param password: (str): The password.
:param token: (str, optional): The token from a previous successful login, None if this is a new login request.
:param org_url: (str, optional):
:return: (bool): True if the login was successful, False otherwise.
**service type:** vca
"""
instances = filter(lambda i: i['id']==instance, self.instances)
if len(instances)>0:
if 'No Attributes' == instances[0]['instanceAttributes']:
return False
attributes = json.loads(instances[0]['instanceAttributes'])
session_uri = attributes['sessionUri']
org_name = attributes['orgName']
vcloud_session = VCS(session_uri, self.username, org_name, instance, instances[0]['apiUrl'], org_url, version=self.version, verify=self.verify, log=self.log)
result = vcloud_session.login(password, token)
if result:
self.vcloud_session = vcloud_session
return True
return False
def login_to_instance_sso(self, instance, token=None, org_url=None):
"""
Request to login into a specific instance
:param instance: (str): The instance identifer.
:param token: (str, optional): The token from a previous successful login, None if this is a new login request.
:param org_url: (str, optional):
:return: (bool): True if the login was successful, False otherwise.
**service type:** vca
"""
Log.debug(self.logger, 'SSO to instance %s, org_url=%s' % (instance, org_url))
instances = filter(lambda i: i['id']==instance, self.instances)
if len(instances)>0:
if 'instanceAttributes' not in instances[0] or 'No Attributes' == instances[0]['instanceAttributes']:
return False
attributes = json.loads(instances[0]['instanceAttributes'])
plans = self.get_plans()
service_name = filter(lambda plan:
plan['id'] == instances[0]['planId'],
plans)[0]['serviceName']
if 'com.vmware.vchs.compute' != service_name:
Log.debug(self.logger, 'cannot select instance of plan %s'
% service_name)
return False
session_uri = attributes['sessionUri']
org_name = attributes['orgName']
from urlparse import urlparse
parsed_uri = urlparse(session_uri)
region_fqdn = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
headers = self._get_vcloud_headers()
headers['Accept'] = 'application/xml;version=5.7'
Log.debug(self.logger, 'SSO with region_fqdn=%s, session_uri=%s, org_name=%s, apiUrl=%s' % (region_fqdn, session_uri, org_name, instances[0]['apiUrl']))
if org_url is None:
org_url = instances[0]['apiUrl']
Log.debug(self.logger, headers)
self.response = Http.post(region_fqdn + 'api/sessions/vcloud/' + org_name,
headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
Log.debug(self.logger, 'ok: ' + self.response.content)
Log.debug(self.logger, 'ok: ' + str(self.response.headers))
vcloud_session = VCS(session_uri, self.username, org_name, instance, instances[0]['apiUrl'], org_url, version=self.version, verify=self.verify, log=self.log)
token = self.response.headers['x-vcloud-authorization']
result = vcloud_session.login(token=token)
if result:
self.vcloud_session = vcloud_session
return True
else:
return False
else:
Log.debug(self.logger, 'ko: ' + self.response.content)
return False
return False
#subscription
def get_vdc_references(self, serviceId):
"""
Request a list of references to existing virtual data centers.
:param serviceId: (str): The service instance identifier.
:return: (list of ReferenceType): a list of :class:`<pyvcloud.schema.vcim.vchsType.VdcReferenceType` objects for the vdcs hosting the service.
**service type:** vchs
"""
serviceReferences = filter(lambda serviceReference: serviceReference.get_serviceId() == serviceId, self.services.get_Service())
if len(serviceReferences) == 0:
return []
self.response = Http.get(serviceReferences[0].get_href(), headers=self._get_vcloud_headers(), verify=self.verify, logger=self.logger)
vdcs = vchsType.parseString(self.response.content, True)
return vdcs.get_VdcRef()
def get_vdc_reference(self, serviceId, vdcId):
"""
Request a reference to a specific vdc context hosting a service.
:param serviceId: (str): The service identifier for the service.
:param vdcId: (str): The identifier for the virtual data center.
:return: (ReferenceType) a :class:`pyvcloud.schema.vcim.vchsType.VdcReferenceType` object representing the vdc.
**service type:** vchs
"""
vdcReferences = filter(lambda vdcRef: vdcRef.get_name() == vdcId, self.get_vdc_references(serviceId))
if len(vdcReferences) == 0:
return None
return vdcReferences[0]
#in subscription 1 org <-> 1 vdc
def login_to_org(self, service, org_name):
"""
Request to login into a specific organization.
An organization is a unit of administration for a collection of users, groups, and computing resources.
:param service: (str): The service identifer.
:param org_name: (str):
:return: (bool): True if the login was successful, False otherwise.
**service type:** ondemand, subscription, vcd
.. note:: for a subscription service, 1 org <-> 1 vdc
"""
vdcReference = self.get_vdc_reference(service, org_name)
if vdcReference:
link = filter(lambda link: link.get_type() == "application/xml;class=vnd.vmware.vchs.vcloudsession", vdcReference.get_Link())[0]
self.response = Http.post(link.get_href(), headers=self._get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.created:
vchs = vchsType.parseString(self.response.content, True)
vdcLink = vchs.get_VdcLink()
headers = {}
headers[vdcLink.authorizationHeader] = vdcLink.authorizationToken
headers["Accept"] = "application/*+xml;version=" + self.version
self.response = Http.get(vdcLink.href, headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
self.vdc = vdcType.parseString(self.response.content, True)
self.org = self.vdc.name
org_url = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.org+xml", self.vdc.get_Link())[0].href
vcloud_session = VCS(org_url, self.username, self.org, None, org_url, org_url, version=self.version, verify=self.verify, log=self.log)
if vcloud_session.login(password=None, token=vdcLink.authorizationToken):
self.vcloud_session = vcloud_session
return True
return False
#common
def _get_vcloud_headers(self):
headers = {}
if self.service_type == VCA.VCA_SERVICE_TYPE_VCHS or self.service_type == 'subscription':
headers["Accept"] = "application/xml;version=" + self.version
headers["x-vchs-authorization"] = self.token
elif self.service_type == VCA.VCA_SERVICE_TYPE_VCA or self.service_type == 'ondemand':
headers["Authorization"] = "Bearer %s" % self.token
headers["Accept"] = "application/json;version=%s" % self.version
elif self.service_type == VCA.VCA_SERVICE_TYPE_STANDALONE or self.service_type == 'vcd':
# headers["x-vcloud-authorization"] = self.token
pass
return headers
def get_vdc_templates(self):
pass
def get_vdc(self, vdc_name):
"""
Request a reference to a specific Virtual Data Center.
A vdc is a logical construct that provides compute, network, and storage resources to an organization.
Virtual machines can be created, stored, and operated within a vdc.
A vdc Data centers also provides storage for virtual media.
:param vdc_name: (str): The virtual data center name.
:return: (VdcType) a :class:`.vcloud.vdcType.VdcType` object describing the vdc. (For example: subscription, ondemand)
**service type:** ondemand, subscription, vcd
"""
if self.vcloud_session and self.vcloud_session.organization:
refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml', self.vcloud_session.organization.Link)
if len(refs) == 1:
self.response = Http.get(refs[0].href, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return vdcType.parseString(self.response.content, True)
def get_vdc_names(self):
"""
Returns a list of Virtual Data Centers in the Organization.
:param vdc_name: (str): The virtual data center name.
:return: (list of str) list of vdc names
**service type:** vca, vchs, standalone
"""
vdcs = []
if self.vcloud_session and self.vcloud_session.organization:
refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
self.vcloud_session.organization.Link)
for ref in refs:
vdcs.append(ref.name)
return vdcs
def get_vapp(self, vdc, vapp_name):
"""
Request a reference to a specific vapp.
A vApp is an application package containing 1 or more virtual machines and their required operating system.
:param vdc: (VdcType): The virtual data center name.
:param vapp_name: (str): The name of the requested vapp.
:return: (VAPP): a :class:`pyvcloud.vapp.VAPP` object describing the vApp.
**service type:** ondemand, subscription, vcd
"""
refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml', vdc.ResourceEntities.ResourceEntity)
if len(refs) == 1:
self.response = Http.get(refs[0].href, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
vapp = VAPP(vAppType.parseString(self.response.content, True), self.vcloud_session.get_vcloud_headers(), self.verify, self.log)
return vapp
def _create_instantiateVAppTemplateParams(self, name, template_href,
vm_name, vm_href, deploy,
power, vm_cpus=None,
vm_memory=None):
templateParams = vcloudType.InstantiateVAppTemplateParamsType()
templateParams.set_name(name)
templateParams.set_deploy(deploy)
templateParams.set_powerOn(power)
source = vcloudType.ReferenceType(href=template_href)
templateParams.set_Source(source)
templateParams.set_AllEULAsAccepted("true")
if vm_name or vm_cpus or vm_memory:
params = vcloudType.SourcedCompositionItemParamType()
if ((self.version == "1.0") or (self.version == "1.5")
or (self.version == "5.1") or (self.version == "5.5")):
message = 'Customization during instantiation is not ' +\
'supported in this version, use vapp methods ' +\
'to change vm name, cpu or memory'
Log.error(self.logger, message)
raise Exception(message)
else:
params.set_Source(vcloudType.ReferenceType(href=vm_href))
templateParams.add_SourcedItem(params)
if vm_name:
gen_params = vcloudType.VmGeneralParamsType()
gen_params.set_Name(vm_name)
params.set_VmGeneralParams(gen_params)
if vm_cpus or vm_memory:
inst_param = vcloudType.InstantiationParamsType()
hardware = vcloudType.VirtualHardwareSection_Type(id=None)
hardware.original_tagname_ = "VirtualHardwareSection"
hardware.set_Info(vAppType.cimString(valueOf_="Virtual hardware requirements"))
inst_param.add_Section(hardware)
params.set_InstantiationParams(inst_param)
if vm_cpus:
cpudata = vAppType.RASD_Type()
cpudata.original_tagname_ = "ovf:Item"
cpudata.set_required(None)
cpudata.set_AllocationUnits(vAppType.cimString(valueOf_="hertz * 10^6"))
cpudata.set_Description(vAppType.cimString(valueOf_="Number of Virtual CPUs"))
cpudata.set_ElementName(vAppType.cimString(valueOf_="{0} virtual CPU(s)".format(vm_cpus)))
cpudata.set_InstanceID(vAppType.cimInt(valueOf_=1))
cpudata.set_ResourceType(3)
cpudata.set_VirtualQuantity(vAppType.cimInt(valueOf_=vm_cpus))
hardware.add_Item(cpudata)
if vm_memory:
memorydata = vAppType.RASD_Type()
memorydata.original_tagname_ = "ovf:Item"
memorydata.set_required(None)
memorydata.set_AllocationUnits(vAppType.cimString(valueOf_="byte * 2^20"))
memorydata.set_Description(vAppType.cimString(valueOf_="Memory Size"))
memorydata.set_ElementName(vAppType.cimString(valueOf_="{0} MB of memory".format(vm_memory)))
memorydata.set_InstanceID(vAppType.cimInt(valueOf_=2))
memorydata.set_ResourceType(4)
memorydata.set_VirtualQuantity(vAppType.cimInt(valueOf_=vm_memory))
hardware.add_Item(memorydata)
return templateParams
def _get_vdc_templates(self):
content_type = "application/vnd.vmware.admin.vdcTemplates+xml"
link = filter(lambda link: link.get_type() == content_type, self.vcloud_session.get_Link())
if len(link) == 0:
return []
self.response = Http.get(link[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
return vdcTemplateListType.parseString(self.response.content, True)
def create_vdc(self, vdc_name, vdc_template_name=None):
vdcTemplateList = self._get_vdc_templates()
content_type = "application/vnd.vmware.admin.vdcTemplate+xml"
vdcTemplate = None
if vdc_template_name is None:
vdcTemplate = filter(lambda link: link.get_type() == content_type, vdcTemplateList.get_VdcTemplate())[0]
else:
vdcTemplate = filter(lambda link: (link.get_type() == content_type) and (link.get_name() == vdc_template_name), vdcTemplateList.get_VdcTemplate())[0]
source = vcloudType.ReferenceType(href=vdcTemplate.get_href())
templateParams = vcloudType.InstantiateVAppTemplateParamsType() # Too simple to add InstantiateVdcTemplateParamsType class
templateParams.set_name(vdc_name)
templateParams.set_Source(source)
body = CommonUtils.convertPythonObjToStr(templateParams, name="InstantiateVdcTemplateParams",
namespacedef='xmlns="http://www.vmware.com/vcloud/v1.5"')
content_type = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
link = filter(lambda link: link.get_type() == content_type, self.vcloud_session.get_Link())
self.response = Http.post(link[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, data=body, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
task = taskType.parseString(self.response.content, True)
return task
def delete_vdc(self, vdc_name):
"""
Request the deletion of an existing vdc.
:param vdc_name: (str): The name of the virtual data center.
:return: (tuple of (bool, task or str)) Two values are returned, a bool success indicator and a \
:class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object if the bool value was True or a \
str message indicating the reason for failure if the bool value was False.
**service type:** standalone, vchs, vca
"""
vdc = self.get_vdc(vdc_name)
if vdc is None:
return (False, 'VDC not found')
vdc.get_href()
self.response = Http.delete(vdc.get_href() + '?recursive=true&force=true', headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
task = taskType.parseString(self.response.content, True)
return (True, task)
else:
return (False, self.response.content)
def create_vapp(self, vdc_name, vapp_name, template_name, catalog_name,
network_name=None, network_mode='bridged', vm_name=None,
vm_cpus=None, vm_memory=None, deploy='false',
poweron='false'):
"""
Create a new vApp in a virtual data center.
A vApp is an application package containing 1 or more virtual machines and their required operating system.
:param vdc_name: (str): The virtual data center name.
:param vapp_name: (str): The name of the new vapp.
:param template_name: (str): The name of a template from a catalog that will be used to create the vApp.
:param catalog_name: (str): The name of the catalog that contains the named template.
:param network_name: (str): The name of the network contained within the vApp.
:param network_mode: (str): The mode for the network contained within the vApp.
:param vm_name: (str, optional): The name of the Virtual Machine contained in the vApp.
:param vm_cpus: (str, optional): The number of virtual CPUs assigned to the VM.
:param vm_memory: (str, optional): The amount of memory assigned to the VM, specified in MB.
:param deploy: (bool): True to deploy the vApp immediately after creation, False otherwise.
:param poweron: (bool): True to poweron the vApp immediately after deployment, False otherwise.
:return: (task): a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType`, a handle to the asynchronous process executing the request.
**service type:**. ondemand, subscription, vcd
.. note:: In this version of pyvcloud a maximum of 1 vm can be added to a vapp.
"""
self.vdc = self.get_vdc(vdc_name)
if not self.vcloud_session or not self.vcloud_session.organization or not self.vdc:
#"Select an organization and datacenter first"
return False
if '' == vm_name: vm_name = None
catalogs = filter(lambda link: catalog_name == link.get_name() and link.get_type() == "application/vnd.vmware.vcloud.catalog+xml",
self.vcloud_session.organization.get_Link())
if len(catalogs) == 1:
self.response = Http.get(catalogs[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
catalog = catalogType.parseString(self.response.content, True)
catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
if len(catalog_items) == 1:
self.response = Http.get(catalog_items[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
# use ElementTree instead because none of the types inside resources (not even catalogItemType) is able to parse the response correctly
catalogItem = ET.fromstring(self.response.content)
entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
vm_href = None
if vm_name:
self.response = Http.get(entity.get('href'), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
vAppTemplate = ET.fromstring(self.response.content)
for vm in vAppTemplate.iter('{http://www.vmware.com/vcloud/v1.5}Vm'):
vm_href = vm.get('href')
template_params = self._create_instantiateVAppTemplateParams(
vapp_name, entity.get("href"), vm_name=vm_name,
vm_href=vm_href, vm_cpus=vm_cpus, vm_memory=vm_memory,
deploy=deploy, power=poweron)
if network_name:
pass
output = StringIO()
template_params.export(output,
0,
name_ = 'InstantiateVAppTemplateParams',
namespacedef_ = '''xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"''',
pretty_print = False)
body = '<?xml version="1.0" encoding="UTF-8"?>' + \
output.getvalue().replace('class:', 'rasd:')\
.replace(' xmlns:vmw="http://www.vmware.com/vcloud/v1.5"', '')\
.replace('vmw:', 'rasd:')\
.replace('Info>', "ovf:Info>")
content_type = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
link = filter(lambda link: link.get_type() == content_type, self.vdc.get_Link())
self.response = Http.post(link[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, data=body, logger=self.logger)
if self.response.status_code == requests.codes.created:
vApp = vAppType.parseString(self.response.content, True)
task = vApp.get_Tasks().get_Task()[0]
return task
return False
def block_until_completed(self, task):
"""
Wait on a task until it has completed.
A task is an asynchronous process executing a request.
The status of the task is checked at one second intervals until the task is completed.
No timeout.
:param task: (task): A :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that represents a running task.
:return: (bool) True if the task completed successfully, False if an error completed with an error.
**service type:** ondemand, subscription, vcd
"""
progress = task.get_Progress()
status = task.get_status()
rnd = 0
while status != "success":
if status == "error":
error = task.get_Error()
Log.error(self.logger, "task error, major=%s, minor=%s, message=%s" % (error.get_majorErrorCode(), error.get_minorErrorCode(), error.get_message()))
return False
else:
# some task doesn't not report progress
if progress:
pass
else:
rnd += 1
time.sleep(1)
self.response = Http.get(task.get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
task = taskType.parseString(self.response.content, True)
progress = task.get_Progress()
status = task.get_status()
else:
Log.error(self.logger, "can't get task")
return False
return True
def delete_vapp(self, vdc_name, vapp_name):
"""
Delete a specific vApp.
A vApp is an application package containing 1 or more virtual machines and their required operating system.
The vApp is undeployed and removed.
:param vdc_name: (str): The virtual data center name.
:param vapp_name: (str): The name of the vapp to be deleted.
:return: (bool) True if the vapp was successfully deleted, false if the vapp was not found.
**service type:** ondemand, subscription, vcd
"""
self.vdc = self.get_vdc(vdc_name)
if not self.vcloud_session or not self.vcloud_session.organization or not self.vdc: return False
vapp = self.get_vapp(self.vdc, vapp_name)
if not vapp: return False
#undeploy and remove
if vapp.me.deployed:
task = vapp.undeploy()
if task:
self.block_until_completed(task)
else:
Log.debug(self.logger, "vapp.undeploy() didn't return a task")
return False
vapp = self.get_vapp(self.vdc, vapp_name)
if vapp: return vapp.delete()
Log.debug(self.logger, "no vApp")
def get_catalogs(self):
"""
Request a list of the available Public and Organization catalogs in the vdc.
A catalog contains one or more vApp templates and media images.
:return: (list of CatalogType) a list of :class:`pyvcloud.schema.vcd.v1_5.schemas.vcloud.catalogType.CatalogType` objects that describe the available catalogs.
Each CatalogType contains a single :class:`.catalogType.CatalogItemsType` \n
which contains a list of :class:`.vcloud.catalogType.ReferenceType` objects.
use get_name() on a CatalogType to retrieve the catalog name.
use get_name() on ReferenceType to retrieve the catalog item name.
**service type:** ondemand, subscription, vcd
"""
self.vcloud_session.login(token=self.vcloud_session.token)
links = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.catalog+xml", self.vcloud_session.organization.Link)
catalogs = []
for link in links:
self.response = Http.get(link.get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
catalogs.append(catalogType.parseString(self.response.content, True))
return catalogs
def create_catalog(self, catalog_name, description):
"""
Create a new catalog.
A catalog is a container for one or more vApp templates and media images.
:param catalog_name: (str): The name of the new catalog.
:param description: (str): A description for the new catalog.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the creation of the catalog.
**service type:** ondemand, subscription, vcd
"""
refs = filter(lambda ref: ref.rel == 'add' and ref.type_ == 'application/vnd.vmware.admin.catalog+xml',
self.vcloud_session.organization.Link)
if len(refs) == 1:
data = """<?xml version="1.0" encoding="UTF-8"?>
<AdminCatalog xmlns="http://www.vmware.com/vcloud/v1.5" name="%s">
<Description>%s</Description>
</AdminCatalog>
""" % (catalog_name, description)
self.response = Http.post(refs[0].href, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, data=data, logger=self.logger)
if self.response.status_code == requests.codes.created:
task = vCloudEntities.parseString(self.response.content, True)
return task.get_Tasks().get_Task()[0]
def delete_catalog(self, catalog_name):
"""
Delete a specific catalog.
A catalog is a container for one or more vApp templates and media images.
:param catalog_name: (str): The name of the catalog to delete.
:return: (bool) True if the catalog was successfully deleted, false if the vapp was not deleted (or found).
**service type:**. ondemand, subscription, vcd
"""
admin_url = None
if not self.vcloud_session or not self.vcloud_session.organization: return False
if 'ondemand' == self.service_type:
refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.admin.organization+xml',
self.vcloud_session.organization.Link)
if len(refs) == 1:
admin_url = refs[0].href
else:
refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.admin.catalog+xml',
self.vcloud_session.organization.Link)
if len(refs) == 1:
admin_url = refs[0].href[:refs[0].href.rindex('/')]
if admin_url:
self.response = Http.get(admin_url, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
adminOrg = vCloudEntities.parseString(self.response.content, True)
if adminOrg and adminOrg.Catalogs and adminOrg.Catalogs.CatalogReference:
catRefs = filter(lambda ref: ref.name == catalog_name and ref.type_ == 'application/vnd.vmware.admin.catalog+xml',
adminOrg.Catalogs.CatalogReference)
if len(catRefs) == 1:
self.response = Http.delete(catRefs[0].href, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.no_content:
return True
return False
def upload_media(self, catalog_name, item_name, media_file_name, description='', display_progress=False, chunk_bytes=128*1024):
"""
Uploads a media file (ISO) to a vCloud catalog
:param catalog_name: (str): The name of the catalog to upload the media.
:param item_name: (str): The name of the media file in the catalog.
:param media_file_name: (str): The name of the local media file to upload.
:return: (bool) True if the media file was successfully uploaded, false otherwise.
**service type:** ondemand, subscription, vcd
"""
assert os.path.isfile(media_file_name)
statinfo = os.stat(media_file_name)
assert statinfo.st_size
for catalog in self.get_catalogs():
if catalog_name != catalog.name:
continue
link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and link.get_rel() == 'add', catalog.get_Link())
assert len(link) == 1
Log.debug(self.logger, link[0].get_href())
data = """
<Media
xmlns="http://www.vmware.com/vcloud/v1.5"
name="%s"
size="%s"
imageType="iso">
<Description>%s</Description>
</Media>
""" % (item_name, statinfo.st_size, description)
self.response = Http.post(link[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(),
data=data, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.created:
catalogItem = ET.fromstring(self.response.content)
entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
href = entity.get('href')
self.response = Http.get(href, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
media = mediaType.parseString(self.response.content, True)
link = filter(lambda link: link.get_rel() == 'upload:default', media.get_Files().get_File()[0].get_Link())[0]
progress_bar = None
if display_progress:
widgets = ['Uploading file: ', Percentage(), ' ', Bar(),
' ', ETA(), ' ', FileTransferSpeed()]
progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
f = open(media_file_name, 'rb')
bytes_transferred = 0
while bytes_transferred < statinfo.st_size:
my_bytes = f.read(chunk_bytes)
if len(my_bytes) <= chunk_bytes:
headers = self.vcloud_session.get_vcloud_headers()
headers['Content-Range'] = 'bytes %s-%s/%s' % (bytes_transferred, len(my_bytes)-1, statinfo.st_size)
headers['Content-Length'] = str(len(my_bytes))
self.response = Http.put(link.get_href(), headers=headers,
data=my_bytes, verify=self.verify, logger=None)
if self.response.status_code == requests.codes.ok:
bytes_transferred += len(my_bytes)
if display_progress:
progress_bar.update(bytes_transferred)
Log.debug(self.logger, 'transferred %s of %s bytes' % (str(bytes_transferred), str(statinfo.st_size)))
else:
Log.debug(self.logger, 'file upload failed with error: [%s] %s' % (self.response.status_code, self.response.content))
return False
f.close()
if display_progress:
progress_bar.finish()
return True
return False
def delete_catalog_item(self, catalog_name, item_name):
"""
Request the deletion of an item from a catalog.
An item is a vApp template and media image stored in a catalog.
:param catalog_name: (str): The name of the catalog to delete.
:param item_name: (str): The name of the catalog item to delete.
:return: (bool) True if the catalog item was successfully deleted, false if the vapp was not deleted (or found).
**service type:** ondemand, subscription, vcd
"""
for catalog in self.get_catalogs():
if catalog_name != catalog.name:
continue
if catalog.CatalogItems and catalog.CatalogItems.CatalogItem:
for item in catalog.CatalogItems.CatalogItem:
if item_name == item.name:
self.response = Http.delete(item.href, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.no_content:
return True
return False
def get_gateways(self, vdc_name):
"""
Request a list of the Gateways within a Virtual Data Center.
:param vdc_name: (str): The virtual data center name.
:return: (list of Gateway) A list of :class:`.pyvcloud.gateway.Gateway` objects describing the available gateways.
**service type:** ondemand, subscription, vcd
"""
gateways = []
vdc = self.get_vdc(vdc_name)
if not vdc: return gateways
link = filter(lambda link: link.get_rel() == "edgeGateways", vdc.get_Link())
self.response = Http.get(link[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
queryResultRecords = queryRecordViewType.parseString(self.response.content, True)
if queryResultRecords.get_Record():
for edgeGatewayRecord in queryResultRecords.get_Record():
self.response = Http.get(edgeGatewayRecord.get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
gateway = Gateway(networkType.parseString(self.response.content, True), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, busy=edgeGatewayRecord.isBusy, log=self.log)
gateways.append(gateway)
return gateways
def get_gateway(self, vdc_name, gateway_name):
"""
Request the details of a specific Gateway Appliance within a Virtual Data Center.
:param vdc_name: (str): The virtual data center name.
:param gateway_name: (str): The requested gateway name.
:return: (Gateway) A :class:`.pyvcloud.gateway.Gateway` object describing the requested gateway.
**service type:** ondemand, subscription, vcd
"""
gateway = None
vdc = self.get_vdc(vdc_name)
if not vdc: return gateway
link = filter(lambda link: link.get_rel() == "edgeGateways", vdc.get_Link())
self.response = Http.get(link[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
queryResultRecords = queryRecordViewType.parseString(self.response.content, True)
if queryResultRecords.get_Record():
for edgeGatewayRecord in queryResultRecords.get_Record():
if edgeGatewayRecord.get_name() == gateway_name:
self.response = Http.get(edgeGatewayRecord.get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
gateway = Gateway(networkType.parseString(self.response.content, True), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, busy=edgeGatewayRecord.isBusy, log=self.log)
break
return gateway
def get_networks(self, vdc_name):
"""
Request a list of the Networks within a Virtual Data Center.
:param vdc_name: (str): The virtual data center name.
:return: (list of OrgVdcNetworkType) A list of :class:`pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType.OrgVdcNetworkType` objects describing the available networks.
**service type:** ondemand, subscription, vcd
"""
result = []
vdc = self.get_vdc(vdc_name)
if not vdc: return result
networks = vdc.get_AvailableNetworks().get_Network()
for n in networks:
self.response = Http.get(n.get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
network = networkType.parseString(self.response.content, True)
result.append(network)
return result
def get_network(self, vdc_name, network_name):
"""
Request the details of a specific Network within a Virtual Data Center.
:param vdc_name: (str): The virtual data center name.
:param network_name: (str): The name of the requested network.
:return: (OrgVdcNetworkType) An :class:`pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType.OrgVdcNetworkType` object describing the requested network.
**service type:** ondemand, subscription, vcd
"""
result = None
networks = self.get_networks(vdc_name)
for network in networks:
if network.get_name() == network_name:
result = network
return result
def parsexml_(self, string_to_parse):
doc = ET.fromstring(string_to_parse)
return doc
def get_media(self, catalog_name, media_name):
"""
Request a media resource from a catalog.
:param catalog_name: (str): The name of the catalog containing the media.
:param media_name: (str): The name of the requested media.
:return: (dict of str,str) An dictionary describing the requested media.
Dictionary keys in include a 'name' key with a value containing the media name.
A 'href' key with a value containing a https url to the media.
And a 'type' key with a value indicating the type of media.
**service type:** ondemand, subscription, vcd
"""
refs = filter(lambda ref: ref.name == catalog_name and ref.type_ == 'application/vnd.vmware.vcloud.catalog+xml', self.vcloud_session.organization.Link)
if len(refs) == 1:
self.response = Http.get(refs[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.ok:
catalog = catalogType.parseString(self.response.content, True)
catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == media_name, catalog.get_CatalogItems().get_CatalogItem())
if len(catalog_items) == 1:
self.response = Http.get(catalog_items[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
# print self.response.content
if self.response.status_code == requests.codes.ok:
doc = self.parsexml_(self.response.content)
for element in doc._children:
if element.tag == '{http://www.vmware.com/vcloud/v1.5}Entity':
return element.attrib
# TODO: DELETE https://vchs.vmware.com/api/vchs/session
def logout(self):
"""
Request to logout from vCloud Air.
:return:
**service type:** ondemand, subscription, vcd
"""
if self.service_type in [VCA.VCA_SERVICE_TYPE_STANDALONE, 'vcd']:
pass
elif self.service_type in [VCA.VCA_SERVICE_TYPE_VCHS, 'subscription']:
pass
elif self.service_type in [VCA.VCA_SERVICE_TYPE_VCA, 'ondemand']:
pass
self.token = None
self.vcloud_session = None
def create_vdc_network(self, vdc_name, network_name, gateway_name, start_address,
end_address, gateway_ip, netmask,
dns1, dns2, dns_suffix):
"""
Request the creation of an new network within a vdc.
:param vdc_name: (str): The name of the virtual data center.
:param network_name: (str): The name of the new network to be deleted.
:param gateway_name: (str): The name of an existing edge Gateway appliance that will manage the virtual network.
:param start_address: (str): The first ip address in a range of addresses for the network.
:param end_address: (str): The last ip address in a range of addresses for the network.
:return: (tuple of (bool, task or str)) Two values are returned, a bool success indicator and a \
:class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object if the bool value was True or a \
str message indicating the reason for failure if the bool value was False.
**service type:** ondemand, subscription, vcd
"""
vdc = self.get_vdc(vdc_name)
gateway = ReferenceType(href=self.get_gateway(vdc_name, gateway_name).me.href)
gateway.original_tagname_ = "EdgeGateway"
iprange = IpRangeType(StartAddress=start_address,
EndAddress=end_address)
ipranges = IpRangesType(IpRange=[iprange])
ipscope = IpScopeType(IsInherited=False,
Gateway=gateway_ip,
Netmask=netmask,
Dns1=dns1,
Dns2=dns2,
DnsSuffix=dns_suffix,
IpRanges=ipranges)
ipscopes = IpScopesType(IpScope=[ipscope])
configuration = NetworkConfigurationType(IpScopes=ipscopes,
FenceMode="natRouted")
net = OrgVdcNetworkType(name=network_name, Description="Network created by pyvcloud",
EdgeGateway=gateway, Configuration=configuration,
IsShared=False)
namespacedef = 'xmlns="http://www.vmware.com/vcloud/v1.5"'
content_type = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
body = '<?xml version="1.0" encoding="UTF-8"?>{0}'.format(
CommonUtils.convertPythonObjToStr(net, name='OrgVdcNetwork',
namespacedef=namespacedef))
postlink = filter(lambda link: link.get_type() == content_type,
vdc.get_Link())[0].href
headers = self.vcloud_session.get_vcloud_headers()
headers["Content-Type"] = content_type
self.response = Http.post(postlink, data=body, headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.created:
network = networkType.parseString(self.response.content, True)
task = network.get_Tasks().get_Task()[0]
return (True, task)
else:
return (False, self.response.content)
def delete_vdc_network(self, vdc_name, network_name):
"""
Request the deletion of an existing network within a vdc.
:param vdc_name: (str): The name of the virtual data center.
:param network_name: (str): The name of the new network to be deleted.
:return: (tuple of (bool, task or str)) Two values are returned, a bool success indicator and a \
:class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object if the bool value was True or a \
str message indicating the reason for failure if the bool value was False.
**service type:** ondemand, subscription, vcd
"""
netref = self.get_admin_network_href(vdc_name, network_name)
if netref is None:
return (False, 'network not found')
self.response = Http.delete(netref, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
task = taskType.parseString(self.response.content, True)
return (True, task)
else:
return (False, self.response.content)
def get_admin_network_href(self, vdc_name, network_name):
vdc = self.get_vdc(vdc_name)
link = filter(lambda link: link.get_rel() == "orgVdcNetworks",
vdc.get_Link())
self.response = Http.get(link[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
queryResultRecords = queryRecordViewType.parseString(self.response.content, True)
if self.response.status_code == requests.codes.ok:
for record in queryResultRecords.get_Record():
if record.name == network_name:
return record.href
def get_score_service(self, score_service_url):
if self.vcloud_session is None or self.vcloud_session.token is None:
Log.error(self.logger, "self.vcloud_session is None")
return None
return Score(score_service_url, self.vcloud_session.org_url, self.vcloud_session.token, self.version, self.verify, self.log)
def get_diskRefs(self, vdc):
"""
Request a list of references to disk volumes in a vdc.
:param vdc: (str): The name of the virtual data center.
:return: (list of ResourceReferenceType) A list of
:class:`pyvcloud.schema.vcd.v1_5.schemas.vcloud.vdcType.ResourceReferenceType` objects.
Use get_name(), get_type() and get_href() methods on each list entry to return disk details.
**service type:** ondemand, subscription, vcd
"""
resourceEntities = vdc.get_ResourceEntities().get_ResourceEntity()
return [resourceEntity for resourceEntity in resourceEntities
if resourceEntity.get_type() == "application/vnd.vmware.vcloud.disk+xml"]
def _parse_disk(self, content):
diskDesc = diskType.parseString(content, True)
disk = DiskType()
ids = diskDesc.anyAttributes_.get('id').split(':')
disk.set_id(ids[3])
disk.set_name(diskDesc.anyAttributes_.get('name'))
disk.set_size(diskDesc.anyAttributes_.get('size'))
disk.set_busType(diskDesc.anyAttributes_.get('busType'))
disk.set_busSubType(diskDesc.anyAttributes_.get('busSubType'))
disk.set_status(diskDesc.anyAttributes_.get('status'))
xml = ET.fromstring(content)
for child in xml:
if '{http://www.vmware.com/vcloud/v1.5}Owner' == child.tag:
for grandchild in child:
owner = OwnerType()
owner.set_User(grandchild.attrib['name'])
disk.set_Owner(owner)
if '{http://www.vmware.com/vcloud/v1.5}StorageProfile' == child.tag:
storageProfile = VdcStorageProfileType()
storageProfile.set_name(child.attrib['name'])
disk.set_StorageProfile(storageProfile)
if '{http://www.vmware.com/vcloud/v1.5}Tasks' == child.tag:
task = taskType.parseString(ET.tostring(child.getchildren()[0]), True)
disk.set_Tasks([task])
return disk
def get_disks(self, vdc_name):
"""
Request a list of disks attached to a vdc.
:param vdc_name: (str): The name of a virtual data center.
:return: (list of tuples of (DiskType, list of str)): An list of tuples. \
Each tuple contains a :class:`pyvcloud.schema.vcd.v1_5.schemas.vcloud.diskType.DiskType` object and a list of vms utilizing the disk.
**service type:** ondemand, subscription, vcd
"""
vdc = self.get_vdc(vdc_name)
links = self.get_diskRefs(vdc)
disks = []
for link in links:
response = Http.get(link.get_href(), headers = self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
disk = self._parse_disk(response.content)
vms = []
content_type = "application/vnd.vmware.vcloud.vms+xml"
response = Http.get(link.get_href()+'/attachedVms', headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
# print response.content
listofvms = vmsType.parseString(response.content, True)
for vmReference in listofvms.get_VmReference():
vms.append(vmReference)
disks.append([disk, vms])
return disks
def add_disk(self, vdc_name, name, size):
"""
Request the creation of an indepdent disk (not attached to a vApp).
:param vdc_name: (str): The name of the virtual data center.
:param name: (str): The name of the new disk.
:param size: (str): The size of the new disk in MB.
:return: (tuple(bool, DiskType)) Two values are returned, a bool success indicator and a :class:`pyvcloud.schema.vcd.v1_5.schemas.vcloud.diskType.DiskType` object describing the disk resource.
**service type:** ondemand, subscription, vcd
"""
data = """
<vcloud:DiskCreateParams xmlns:vcloud="http://www.vmware.com/vcloud/v1.5">
<vcloud:Disk name="%s" size="%s"/>
</vcloud:DiskCreateParams>
""" % (name, size)
vdc = self.get_vdc(vdc_name)
content_type = "application/vnd.vmware.vcloud.diskCreateParams+xml"
link = filter(lambda link: link.get_type() == content_type, vdc.get_Link())
self.response = Http.post(link[0].get_href(), data=data, headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.created:
disk = self._parse_disk(self.response.content)
return(True, disk)
else:
return(False, self.response.content)
def delete_disk(self, vdc_name, name, id=None):
"""
Request the deletion of an existing disk within a vdc.
:param vdc_name: (str): The name of the virtual data center.
:param name: (str): The name of the new disk.
:param id: (str, optional): The id of the disk resource.
:return: (tuple(bool, TaskType)) Two values are returned, a bool success indicator and a \
:class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object if the bool value was True or a \
str message indicating the reason for failure if the bool value was False.
**service type:** ondemand, subscription, vcd
"""
vdc = self.get_vdc(vdc_name)
refs = self.get_diskRefs(vdc)
link = []
if id is not None:
link = filter(lambda link: link.get_href().endswith('/'+id), refs)
elif name is not None:
link = filter(lambda link: link.get_name() == name, refs)
if len(link) == 1:
self.response = Http.delete(link[0].get_href(), headers=self.vcloud_session.get_vcloud_headers(), verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
task = taskType.parseString(self.response.content, True)
return (True, task)
else:
return(False, self.response.content)
elif len(link) == 0:
return(False, 'disk not found')
elif len(link) > 1:
return(False, 'more than one disks found with that name, use the disk id')
def cancel_task(self, task_url):
self.response = Http.post(task_url + '/action/cancel', headers=self.vcloud_session.get_vcloud_headers(),
verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.no_content:
return True
else:
Log.error(self.logger, "can't cancel task")
return False
def get_status(self, code):
return self.statuses[code+1]
def get_vdc_templates(self):
if self.vcloud_session.organization is None:
self.vcloud_session.login(token=self.vcloud_session.token)
vdcTemplateList = self._get_vdc_templates()
return vdcTemplateList
| apache-2.0 | -1,525,826,246,397,108,200 | 47.967884 | 214 | 0.601021 | false | 4.024272 | false | false | false | 0.004334 |
chuan9/chromium-crosswalk | native_client_sdk/src/tools/httpd.py | 51 | 6989 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import BaseHTTPServer
import logging
import multiprocessing
import os
import SimpleHTTPServer # pylint: disable=W0611
import socket
import sys
import time
import urlparse
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_SDK_ROOT = os.path.dirname(SCRIPT_DIR)
# We only run from the examples directory so that not too much is exposed
# via this HTTP server. Everything in the directory is served, so there should
# never be anything potentially sensitive in the serving directory, especially
# if the machine might be a multi-user machine and not all users are trusted.
# We only serve via the loopback interface.
def SanityCheckDirectory(dirname):
abs_serve_dir = os.path.abspath(dirname)
# Verify we don't serve anywhere above NACL_SDK_ROOT.
if abs_serve_dir[:len(NACL_SDK_ROOT)] == NACL_SDK_ROOT:
return
logging.error('For security, httpd.py should only be run from within the')
logging.error('example directory tree.')
logging.error('Attempting to serve from %s.' % abs_serve_dir)
logging.error('Run with --no-dir-check to bypass this check.')
sys.exit(1)
class HTTPServer(BaseHTTPServer.HTTPServer):
def __init__(self, *args, **kwargs):
BaseHTTPServer.HTTPServer.__init__(self, *args)
self.running = True
self.result = 0
def Shutdown(self, result=0):
self.running = False
self.result = result
class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def _SendNothingAndDie(self, result=0):
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/html')
self.send_header('Content-length', '0')
self.end_headers()
self.server.Shutdown(result)
def do_GET(self):
# Browsing to ?quit=1 will kill the server cleanly.
_, _, _, query, _ = urlparse.urlsplit(self.path)
if query:
params = urlparse.parse_qs(query)
if '1' in params.get('quit', []):
self._SendNothingAndDie()
return
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
class LocalHTTPServer(object):
"""Class to start a local HTTP server as a child process."""
def __init__(self, dirname, port):
parent_conn, child_conn = multiprocessing.Pipe()
self.process = multiprocessing.Process(
target=_HTTPServerProcess,
args=(child_conn, dirname, port, {}))
self.process.start()
if parent_conn.poll(10): # wait 10 seconds
self.port = parent_conn.recv()
else:
raise Exception('Unable to launch HTTP server.')
self.conn = parent_conn
def ServeForever(self):
"""Serve until the child HTTP process tells us to stop.
Returns:
The result from the child (as an errorcode), or 0 if the server was
killed not by the child (by KeyboardInterrupt for example).
"""
child_result = 0
try:
# Block on this pipe, waiting for a response from the child process.
child_result = self.conn.recv()
except KeyboardInterrupt:
pass
finally:
self.Shutdown()
return child_result
def ServeUntilSubprocessDies(self, process):
"""Serve until the child HTTP process tells us to stop or |subprocess| dies.
Returns:
The result from the child (as an errorcode), or 0 if |subprocess| died,
or the server was killed some other way (by KeyboardInterrupt for
example).
"""
child_result = 0
try:
while True:
if process.poll() is not None:
child_result = 0
break
if self.conn.poll():
child_result = self.conn.recv()
break
time.sleep(0)
except KeyboardInterrupt:
pass
finally:
self.Shutdown()
return child_result
def Shutdown(self):
"""Send a message to the child HTTP server process and wait for it to
finish."""
self.conn.send(False)
self.process.join()
def GetURL(self, rel_url):
"""Get the full url for a file on the local HTTP server.
Args:
rel_url: A URL fragment to convert to a full URL. For example,
GetURL('foobar.baz') -> 'http://localhost:1234/foobar.baz'
"""
return 'http://localhost:%d/%s' % (self.port, rel_url)
def _HTTPServerProcess(conn, dirname, port, server_kwargs):
"""Run a local httpserver with the given port or an ephemeral port.
This function assumes it is run as a child process using multiprocessing.
Args:
conn: A connection to the parent process. The child process sends
the local port, and waits for a message from the parent to
stop serving. It also sends a "result" back to the parent -- this can
be used to allow a client-side test to notify the server of results.
dirname: The directory to serve. All files are accessible through
http://localhost:<port>/path/to/filename.
port: The port to serve on. If 0, an ephemeral port will be chosen.
server_kwargs: A dict that will be passed as kwargs to the server.
"""
try:
os.chdir(dirname)
httpd = HTTPServer(('', port), HTTPRequestHandler, **server_kwargs)
except socket.error as e:
sys.stderr.write('Error creating HTTPServer: %s\n' % e)
sys.exit(1)
try:
conn.send(httpd.server_address[1]) # the chosen port number
httpd.timeout = 0.5 # seconds
while httpd.running:
# Flush output for MSVS Add-In.
sys.stdout.flush()
sys.stderr.flush()
httpd.handle_request()
if conn.poll():
httpd.running = conn.recv()
except KeyboardInterrupt:
pass
finally:
conn.send(httpd.result)
conn.close()
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-C', '--serve-dir',
help='Serve files out of this directory.',
default=os.path.abspath('.'))
parser.add_argument('-p', '--port',
help='Run server on this port.', default=5103)
parser.add_argument('--no-dir-check', '--no_dir_check',
help='No check to ensure serving from safe directory.',
dest='do_safe_check', action='store_false', default=True)
# To enable bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete httpd.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options = parser.parse_args(args)
if options.do_safe_check:
SanityCheckDirectory(options.serve_dir)
server = LocalHTTPServer(options.serve_dir, int(options.port))
# Serve until the client tells us to stop. When it does, it will give us an
# errorcode.
print 'Serving %s on %s...' % (options.serve_dir, server.GetURL(''))
return server.ServeForever()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 2,621,170,791,323,516,400 | 30.913242 | 80 | 0.676635 | false | 3.814956 | false | false | false | 0.010016 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.