input
stringclasses 1
value | context
stringlengths 5.05k
188k
| answers
stringlengths 22
82
| length
int32 502
23.3k
| dataset
stringclasses 1
value | language
stringclasses 1
value | all_classes
null | _id
stringlengths 48
48
|
---|---|---|---|---|---|---|---|
"""Tools for parsing a regular expression into a Pattern."""
import collections
import string
import charsource
import pattern as p
# Characters that represent themselves in a regular expression.
# TODO(jasonpr): Handle $ and ^ specially at edges of regex.
_CHAR_LITERALS = string.ascii_letters + string.digits + '!"#$%&\',-/:;<=>@^_`~]} \t\n\r'
# Characters that represent themselves inside a square-bracket expression.
_GROUP_CHARS = string.ascii_letters + string.digits + '!"#$%&\'()*+,-./:;<=>?@[^_`{|}~'
# Characters that represent themselves when escaped with a backslash.
_IDENTIY_ESCAPES = r'.[\()*+?{|'
# Characters that represent a character class when escaped with a backslash.
_CHARACTER_CLASSES = {
'd': string.digits,
'w': string.ascii_letters + string.digits + '_',
'h': string.hexdigits,
# TODO(jasonpr): Make an informed decision, rather than blindly
# inheritting this definition from Python.
's': string.whitespace,
}
_BRACKET_CHARACTER_CLASSES = {
'alnum': set(string.ascii_letters + string.digits),
'alpha': set(string.ascii_letters),
'digit': set(string.digits),
'lower': set(string.ascii_lowercase),
'print': set(string.printable),
'punct': set(string.punctuation),
# TODO(jasonpr): Make an informed decision, rather than blindly
# inheritting this definition from Python.
'space': set(string.whitespace),
'upper': set(string.ascii_uppercase),
'xdigit': set(string.hexdigits),
}
def parse_regex(regex_string):
"""Convert a regular expression string into a Pattern."""
return _parse_regex(charsource.GetPutSource(regex_string))
# The following _parse_* methods form a recursive descent parser
# that respect the order of operations in a regular expression.
def _parse_regex(source):
"""Parse any regex into a Pattern."""
return _parse_alternation(source)
def _parse_alternation(source):
"""Parse an alternation expression, like 'ab|cd|ef'."""
parts = []
# Act as though the last character was a '|', so we get the
# initial element of the alternation.
last_char = '|'
while last_char == '|':
parts.append(_parse_concatenation(source))
last_char = source.get()
# Put back the non-alternation character.
source.put(last_char)
return p.Or(*parts)
def _parse_concatenation(source):
"""Parse a concatenation expression, like 'abc' or 'a(b|c)d*'."""
parts = []
duplication = _parse_duplication(source)
# If we're expecting a concatenation, there MUST be at least
# one (first) element!
assert duplication
while duplication:
parts.append(duplication)
duplication = _parse_duplication(source)
return p.Sequence(*parts)
def _parse_duplication(source):
"""Parse a duplication expression, like 'a*' or '(a|b){3,5}'."""
duplicated = _parse_parenthesization(source)
if not duplicated:
return None
duplicator = source.get()
if duplicator == '?':
return p.Maybe(duplicated)
elif duplicator == '*':
return p.Star(duplicated)
elif duplicator == '+':
return p.Plus(duplicated)
elif duplicator == '{':
min_repeats = _parse_positive_int(source)
range_continuation = source.get()
# We will ultimately expect a closing curly brace, but
# we might see a comma and a max repeats value, first.
if range_continuation == ',':
max_repeats = _parse_positive_int(source)
range_continuation = source.get()
else:
max_repeats = min_repeats
if range_continuation != '}':
raise ValueError('Expected "}", but got "%s".' %
range_continuation)
return p.Repeat(duplicated, min_repeats, max_repeats)
else:
source.put(duplicator)
return duplicated
def _parse_parenthesization(source):
"""Parse a parenthesization pattern, like '(a|b)' or '[ab]' or 'a'.
Note that '[ab]' is a parenthesization, since it is equivalent
to '([ab])'. Similarly, 'a' is equivalent to '(a)'.
"""
first_char = source.get()
if first_char == '(':
enclosed_regex = _parse_regex(source)
close_paren = source.get()
assert close_paren == ')'
return enclosed_regex
# Otherwise, this must just be a group. (Groups have just as
# tight of binding as a parenthesization.)
source.put(first_char)
return _parse_group(source)
def _parse_group(source):
"""Parse a group pattern, like '[abc]' or 'a'.
Note that 'a' is a group, since 'a' is equivalent to '[a]'.
"""
first_char = source.get()
if first_char == '[':
second_char = source.get()
if second_char == '^':
negating = True
else:
source.put(second_char)
negating = False
group_chars = _parse_group_chars(source)
result = p.Selection(group_chars, negating)
close_brace = source.get()
assert close_brace == ']'
return result
# Otherwise, it's a single normal character.
source.put(first_char)
return _parse_atom(source)
def _parse_group_chars(source):
"""Parse the characters from a group specification.
This is just a string of characters allowable in a group specification.
For example, a valid parse is 'aA1.?', since '[aA1.?]' is a valid group.
"""
chars = set()
while True:
range_chars = _parse_group_range(source)
if range_chars:
for char in range_chars:
chars.add(char)
continue
char_class = _parse_char_class(source)
if char_class:
chars |= char_class
continue
char = source.get()
if not char:
raise ValueError('Unexpected end of stream.')
if char not in _GROUP_CHARS:
source.put(char)
break
chars.add(char)
return ''.join(chars)
def _parse_atom(source):
"""Parse a single regex atom.
An atom is a period ('.'), a character literal, or an escape sequence.
"""
char = source.get()
if not char:
# For good measure, put the EOF back on!
# This doesn't really do anything, since the source will
# generate EOFs forever.
source.put(char)
return None
elif char == '.':
return p.Anything()
elif char in _CHAR_LITERALS:
return p.String(char)
elif char == '\\':
escaped = source.get()
if escaped in _IDENTIY_ESCAPES:
return p.String(escaped)
elif escaped in _CHARACTER_CLASSES:
return p.Selection(_CHARACTER_CLASSES[escaped])
else:
raise ValueError('Unexpected escape sequence, \\%s.', escaped)
else:
source.put(char)
return None
def _parse_positive_int(source):
"""Parse a positive integer.
That is, parse a sequence of one or more digits.
"""
digits = []
next_char = source.get()
assert next_char and next_char in string.digits
while next_char and next_char in string.digits:
digits.append(next_char)
next_char = source.get()
source.put(next_char)
return int(''.join(digits))
def _parse_group_range(source):
"""Parse a three-character group range expression.
Return the set of characters represented by the range.
For example, parsing the expression 'c-e' from the source returns
set(['c', 'd', 'e']).
"""
start = source.get()
if start not in _GROUP_CHARS:
source.put(start)
return None
middle = source.get()
if middle != '-':
source.put(middle)
source.put(start)
return None
end = source.get()
if end not in _GROUP_CHARS:
source.put(end)
source.put(middle)
source.put(start)
return None
range_chars = set()
for ascii_value in range(ord(start), ord(end) + 1):
range_chars.add(chr(ascii_value))
return range_chars
def _parse_char_class(source):
for class_name, class_contents in _BRACKET_CHARACTER_CLASSES.iteritems():
| if _parse_verbatim(source, '[:%s:]' % class_name): | 850 | lcc_e | python | null | da4a2050450c0e6dbb46728a24c2681051dcf60db99aed6b |
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 8 14:27:22 2016
@author: Viktor
"""
import numpy as np
from sklearn.datasets import fetch_mldata
from matplotlib import pyplot as plt
from skimage.io import imread
from skimage.io import imshow
from skimage.morphology import opening, closing
from scipy import ndimage
from sklearn.neighbors import KNeighborsClassifier
#ucitavanje MNIST dataseta
mnist = fetch_mldata('MNIST original')
print(mnist.data.shape)
print(mnist.target.shape)
print(np.unique(mnist.target))
img = 255-mnist.data[12345]
img = img.reshape(28,28)
plt.imshow(-img, cmap='Greys')
#iscitavanje dataseta i smestanje u matricu radi lakseg pristupa
numbers = [0]*10
numbers[0] = mnist['data'][np.where(mnist['target'] == 0.)[0]]
numbers[1] = mnist['data'][np.where(mnist['target'] == 1.)[0]]
numbers[2] = mnist['data'][np.where(mnist['target'] == 2.)[0]]
numbers[3] = mnist['data'][np.where(mnist['target'] == 3.)[0]]
numbers[4] = mnist['data'][np.where(mnist['target'] == 4.)[0]]
numbers[5] = mnist['data'][np.where(mnist['target'] == 5.)[0]]
numbers[6] = mnist['data'][np.where(mnist['target'] == 6.)[0]]
numbers[7] = mnist['data'][np.where(mnist['target'] == 7.)[0]]
numbers[8] = mnist['data'][np.where(mnist['target'] == 8.)[0]]
numbers[9] = mnist['data'][np.where(mnist['target'] == 9.)[0]]
test = numbers[0][123]
res = numbers[0][123] == numbers[0][124]
percent_hit = np.count_nonzero(res) / 784.0
representative_number = [0]*10
for j in range(0,10):
representative_number[j] = np.zeros(np.shape(numbers[j][0]), dtype='float')
for i in range(0,len(numbers[j])):
representative_number[j] = representative_number[j] + numbers[j][i]
representative_number[j] = (representative_number[j])/len(numbers[j])
def processing(path):
img = imread(path)
gray = rgb2gray(img)
binary = 1 - (gray > 0.5)
binary = closing(binary)
binary = opening(binary)
labeled, nr_objects = ndimage.label(binary)
return nr_objects
def poklapanje(niz1, niz2):
mera_poklapanja = 0.0
for i in range(0,len(niz1)):
if(niz1[i]==niz2[i]):
mera_poklapanja = mera_poklapanja + 1
return mera_poklapanja/len(niz1)
def ucitavanje(path):
image_path = []
with open(path) as f:
data = f.read()
lines = data.split('\n')
for i, line in enumerate(lines):
if(i>1):
cols = line.split('\t')
if(cols[0]!=''):
image_path.append(cols[0])
f.close()
return image_path
def upis(path,image_path,result):
with open(path,'w') as f:
f.write('RA 1/2013 Viktor Sanca\n')
f.write('file\tsum\n')
for i in range(0,len(image_path)):
f.write(image_path[i]+'\t'+str(result[i])+'\n')
f.close()
def get_img(image_path):
img = imread(image_path)
gray = rgb2gray(img)
#gray = closing(gray)
#gray = opening(gray)
#binary = (gray < 0.5)
return gray
def binarize(img):
return img>1
def rgb2gray(img_rgb):
img_gray = np.ndarray((img_rgb.shape[0], img_rgb.shape[1]))
img_gray = 0.8*img_rgb[:, :, 0] + 0.2*img_rgb[:, :, 1] + 1*img_rgb[:, :, 2]
img_gray = img_gray.astype('uint8')
return img_gray
def mark_indices(image):
starting_indices = []
img = image.reshape(640*480)
for i in range(0,(640)*(480-28)):
if(img[i]<10 and img[i+27]<10 and img[i+27*(640)]<10 and img[i+27*(640)+27]<10):
starting_indices.append(i)
return starting_indices
def get_image_from_indice(image,start_indice):
image28_28 = np.empty((28*28),dtype='uint8')
img = image.reshape(640*480)
for i in range(0,28):
for j in range(0,28):
image28_28[28*i+j]=img[start_indice+i*(640)+j]
return image28_28
def find_number(image28_28):
mmx = [0]*10
for i in range(0,10):
for j in range(0,len(numbers[i])):
res = binarize(image28_28) == binarize(numbers[i][j])
if(np.count_nonzero(res)>mmx[i]):
mmx[i]=np.count_nonzero(res)
return max_idx(mmx)
def max_idx(lista):
mx = max(lista)
for i in range(0,len(lista)):
if(lista[i]==mx):
return i
return -1
image_path = []
result = []
in_path = 'level-1-mnist-train/level-1-mnist/out.txt'
out_path = 'level-1-mnist-test/level-1-mnist-test/out.txt'
train_path = 'level-1-mnist-train/level-1-mnist/'
test_path = 'level-1-mnist-test/level-1-mnist-test/'
image_paths = ucitavanje(out_path)
#knn = KNeighborsClassifier()
knn = KNeighborsClassifier(n_neighbors=2000,weights='distance',algorithm='auto',n_jobs=-1)
knn.fit(mnist.data,mnist.target)
suma = [0]*len(image_paths)
for i in range(0,len(image_paths)):
print('Image'+str(i+1)+'/'+str(len(image_paths)))
img = get_img(test_path+image_paths[i])
start_indices = mark_indices(img.reshape(640*480))
for start_indice in start_indices:
img_d = get_image_from_indice(img,start_indice)
#nr = find_number(img_d)
nr = knn.predict(img_d)
suma[i] = suma[i] + nr[0]
suma[i] = int(suma[i])
for i in range(0,len(suma)):
suma[i] = float(suma[i])
upis(out_path, image_paths, suma)
image28_28 = img_d
mmx = [0]*10
for i in range(0,10):
for j in range(0,len(numbers[i])):
res = image28_28 == numbers[i][j]
if(np.count_nonzero(res)>mmx[i]):
mmx[i]=np.count_nonzero(res)
total = np.zeros(784, dtype='float')
for i in range(0,10):
total = total + representative_number[i]
img = representative_number[4]
img = img.reshape(28,28)
plt.imshow(img, cmap='Greys')
| check = numbers[5][123] | 502 | lcc_e | python | null | 3a9e8b1453db3c2f6454e9ee3926124b1bbca244e7f8e637 |
|
import gtk
import gobject
import pygame
import pygame.event
class _MockEvent(object):
def __init__(self, keyval):
self.keyval = keyval
class Translator(object):
key_trans = {
'Alt_L': pygame.K_LALT,
'Alt_R': pygame.K_RALT,
'Control_L': pygame.K_LCTRL,
'Control_R': pygame.K_RCTRL,
'Shift_L': pygame.K_LSHIFT,
'Shift_R': pygame.K_RSHIFT,
'Super_L': pygame.K_LSUPER,
'Super_R': pygame.K_RSUPER,
'KP_Page_Up' : pygame.K_KP9,
'KP_Page_Down' : pygame.K_KP3,
'KP_End' : pygame.K_KP1,
'KP_Home' : pygame.K_KP7,
'KP_Up' : pygame.K_KP8,
'KP_Down' : pygame.K_KP2,
'KP_Left' : pygame.K_KP4,
'KP_Right' : pygame.K_KP6,
'numbersign' : pygame.K_HASH,
'percent' : ord('%'),
'exclam' : pygame.K_EXCLAIM,
'asciicircum' : pygame.K_CARET,
'parenleft' : pygame.K_LEFTPAREN,
'parenright' : pygame.K_RIGHTPAREN,
'braceleft' : ord('{'),
'braceright' : ord('}'),
'bracketleft' : pygame.K_LEFTBRACKET,
'bracketright' : pygame.K_RIGHTBRACKET,
'apostrophe' : ord('\''),
'equal' : pygame.K_EQUALS,
'grave' : pygame.K_BACKQUOTE,
'Caps_Lock' : pygame.K_CAPSLOCK,
'Page_Up' : pygame.K_PAGEUP,
'Page_Down' : pygame.K_PAGEDOWN,
'Num_Lock' : pygame.K_NUMLOCK,
'Bar' : ord('|')
}
mod_map = {
pygame.K_LALT: pygame.KMOD_LALT,
pygame.K_RALT: pygame.KMOD_RALT,
pygame.K_LCTRL: pygame.KMOD_LCTRL,
pygame.K_RCTRL: pygame.KMOD_RCTRL,
pygame.K_LSHIFT: pygame.KMOD_LSHIFT,
pygame.K_RSHIFT: pygame.KMOD_RSHIFT,
}
def __init__(self, mainwindow, inner_evb):
"""Initialise the Translator with the windows to which to listen"""
self._mainwindow = mainwindow
self._inner_evb = inner_evb
# Enable events
# (add instead of set here because the main window is already realized)
self._mainwindow.add_events(
gtk.gdk.KEY_PRESS_MASK | \
gtk.gdk.KEY_RELEASE_MASK | \
gtk.gdk.VISIBILITY_NOTIFY_MASK
)
self._inner_evb.set_events(
gtk.gdk.POINTER_MOTION_MASK | \
gtk.gdk.POINTER_MOTION_HINT_MASK | \
gtk.gdk.BUTTON_MOTION_MASK | \
gtk.gdk.BUTTON_PRESS_MASK | \
gtk.gdk.BUTTON_RELEASE_MASK
)
self._mainwindow.set_flags(gtk.CAN_FOCUS)
self._inner_evb.set_flags(gtk.CAN_FOCUS)
# Callback functions to link the event systems
self._mainwindow.connect('unrealize', self._quit_cb)
self._mainwindow.connect('visibility_notify_event', self._visibility)
self._inner_evb.connect('key_press_event', self._keydown_cb)
self._inner_evb.connect('key_release_event', self._keyup_cb)
self._inner_evb.connect('button_press_event', self._mousedown_cb)
self._inner_evb.connect('button_release_event', self._mouseup_cb)
self._inner_evb.connect('motion-notify-event', self._mousemove_cb)
self._inner_evb.connect('expose-event', self._expose_cb)
self._inner_evb.connect('configure-event', self._resize_cb)
self._inner_evb.connect('screen-changed', self._screen_changed_cb)
# Internal data
self.__stopped = False
self.__keystate = [0] * 323
self.__button_state = [0,0,0]
self.__mouse_pos = (0,0)
self.__repeat = (None, None)
self.__held = set()
self.__held_time_left = {}
self.__held_last_time = {}
self.__held_last_value = {}
self.__tick_id = None
def hook_pygame(self):
pygame.key.get_pressed = self._get_pressed
pygame.key.set_repeat = self._set_repeat
pygame.mouse.get_pressed = self._get_mouse_pressed
pygame.mouse.get_pos = self._get_mouse_pos
def _visibility(self, widget, event):
if pygame.display.get_init():
pygame.event.post(pygame.event.Event(pygame.VIDEOEXPOSE))
return False
def _expose_cb(self, widget, event):
if pygame.display.get_init():
pygame.event.post(pygame.event.Event(pygame.VIDEOEXPOSE))
return True
def _resize_cb(self, widget, event):
evt = pygame.event.Event(pygame.VIDEORESIZE,
size=(event.width,event.height), width=event.width, height=event.height)
pygame.event.post(evt)
return False # continue processing
def _screen_changed_cb(self, widget, event):
if pygame.display.get_init():
pygame.event.post(pygame.event.Event(pygame.VIDEOEXPOSE))
def _quit_cb(self, data=None):
self.__stopped = True
pygame.event.post(pygame.event.Event(pygame.QUIT))
def _keydown_cb(self, widget, event):
key = event.hardware_keycode
keyval = event.keyval
if key in self.__held:
return True
else:
if self.__repeat[0] is not None:
self.__held_last_time[key] = pygame.time.get_ticks()
self.__held_time_left[key] = self.__repeat[0]
self.__held_last_value[key] = keyval
self.__held.add(key)
return self._keyevent(widget, event, pygame.KEYDOWN)
def _keyup_cb(self, widget, event):
key = event.hardware_keycode
if self.__repeat[0] is not None:
if key in self.__held:
# This is possibly false if set_repeat() is called with a key held
del self.__held_time_left[key]
del self.__held_last_time[key]
del self.__held_last_value[key]
self.__held.discard(key)
return self._keyevent(widget, event, pygame.KEYUP)
def _keymods(self):
mod = 0
for key_val, mod_val in self.mod_map.iteritems():
mod |= self.__keystate[key_val] and mod_val
return mod
def _keyevent(self, widget, event, type):
key = gtk.gdk.keyval_name(event.keyval)
if key is None:
# No idea what this key is.
return False
keycode = None
if key in self.key_trans:
keycode = self.key_trans[key]
elif hasattr(pygame, 'K_'+key.upper()):
keycode = getattr(pygame, 'K_'+key.upper())
elif hasattr(pygame, 'K_'+key.lower()):
keycode = getattr(pygame, 'K_'+key.lower())
elif key == 'XF86Start':
# view source request, specially handled...
self._mainwindow.view_source()
else:
print 'Key %s unrecognized' % key
if keycode is not None:
if type == pygame.KEYDOWN:
mod = self._keymods()
self.__keystate[keycode] = type == pygame.KEYDOWN
if type == pygame.KEYUP:
mod = self._keymods()
ukey = unichr(gtk.gdk.keyval_to_unicode(event.keyval))
if ukey == '\000':
ukey = ''
evt = pygame.event.Event(type, key=keycode, unicode=ukey, mod=mod)
self._post(evt)
return True
def _get_pressed(self):
return self.__keystate
def _get_mouse_pressed(self):
return self.__button_state
def _mousedown_cb(self, widget, event):
self.__button_state[event.button-1] = 1
widget.grab_focus()
return self._mouseevent(widget, event, pygame.MOUSEBUTTONDOWN)
def _mouseup_cb(self, widget, event):
self.__button_state[event.button-1] = 0
return self._mouseevent(widget, event, pygame.MOUSEBUTTONUP)
def _mouseevent(self, widget, event, type):
evt = pygame.event.Event(type, button=event.button, pos=(event.x, event.y))
self._post(evt)
return True
def _mousemove_cb(self, widget, event):
# From http://www.learningpython.com/2006/07/25/writing-a-custom-widget-using-pygtk/
# if this is a hint, then let's get all the necessary
# information, if not it's all we need.
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x = event.x
y = event.y
state = event.state
rel = (x - self.__mouse_pos[0], y - self.__mouse_pos[1])
self.__mouse_pos = (x, y)
self.__button_state = [
state & gtk.gdk.BUTTON1_MASK and 1 or 0,
state & gtk.gdk.BUTTON2_MASK and 1 or 0,
state & gtk.gdk.BUTTON3_MASK and 1 or 0,
]
evt = pygame.event.Event(pygame.MOUSEMOTION,
pos=self.__mouse_pos, rel=rel, buttons=self.__button_state)
self._post(evt)
return True
def _tick_cb(self):
cur_time = pygame.time.get_ticks()
for key in self.__held:
delta = cur_time - self.__held_last_time[key]
self.__held_last_time[key] = cur_time
self.__held_time_left[key] -= delta
if self.__held_time_left[key] <= 0:
self.__held_time_left[key] = self.__repeat[1]
self._keyevent(None, _MockEvent(self.__held_last_value[key]), pygame.KEYDOWN)
return True
def _set_repeat(self, delay=None, interval=None):
if delay is not None and self.__repeat[0] is None:
| self.__tick_id = gobject.timeout_add(10, self._tick_cb) | 703 | lcc_e | python | null | 8470084fed939b4e4fcf2f79678790b3986d9ec82bc29985 |
|
import json
import os
import sys
from datetime import datetime, timedelta
import wptserve
from wptserve import sslutils
from . import environment as env
from . import instruments
from . import mpcontext
from . import products
from . import testloader
from . import wptcommandline
from . import wptlogging
from . import wpttest
from mozlog import capture, handlers
from .font import FontInstaller
from .testrunner import ManagerGroup
here = os.path.dirname(__file__)
logger = None
"""Runner for web-platform-tests
The runner has several design goals:
* Tests should run with no modification from upstream.
* Tests should be regarded as "untrusted" so that errors, timeouts and even
crashes in the tests can be handled without failing the entire test run.
* For performance tests can be run in multiple browsers in parallel.
The upstream repository has the facility for creating a test manifest in JSON
format. This manifest is used directly to determine which tests exist. Local
metadata files are used to store the expected test results.
"""
def setup_logging(*args, **kwargs):
global logger
logger = wptlogging.setup(*args, **kwargs)
return logger
def get_loader(test_paths, product, debug=None, run_info_extras=None, chunker_kwargs=None,
test_groups=None, **kwargs):
if run_info_extras is None:
run_info_extras = {}
run_info = wpttest.get_run_info(kwargs["run_info"], product,
browser_version=kwargs.get("browser_version"),
browser_channel=kwargs.get("browser_channel"),
verify=kwargs.get("verify"),
debug=debug,
extras=run_info_extras,
enable_webrender=kwargs.get("enable_webrender"))
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"],
manifest_download=kwargs["manifest_download"]).load()
manifest_filters = []
include = kwargs["include"]
if kwargs["include_file"]:
include = include or []
include.extend(testloader.read_include_from_file(kwargs["include_file"]))
if test_groups:
include = testloader.update_include_for_groups(test_groups, include)
if include or kwargs["exclude"] or kwargs["include_manifest"] or kwargs["default_exclude"]:
manifest_filters.append(testloader.TestFilter(include=include,
exclude=kwargs["exclude"],
manifest_path=kwargs["include_manifest"],
test_manifests=test_manifests,
explicit=kwargs["default_exclude"]))
ssl_enabled = sslutils.get_cls(kwargs["ssl_type"]).ssl_enabled
h2_enabled = wptserve.utils.http2_compatible()
test_loader = testloader.TestLoader(test_manifests,
kwargs["test_types"],
run_info,
manifest_filters=manifest_filters,
chunk_type=kwargs["chunk_type"],
total_chunks=kwargs["total_chunks"],
chunk_number=kwargs["this_chunk"],
include_https=ssl_enabled,
include_h2=h2_enabled,
include_webtransport_h3=kwargs["enable_webtransport_h3"],
skip_timeout=kwargs["skip_timeout"],
skip_implementation_status=kwargs["skip_implementation_status"],
chunker_kwargs=chunker_kwargs)
return run_info, test_loader
def list_test_groups(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.Product(kwargs["config"], product).run_info_extras(**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for item in sorted(test_loader.groups(kwargs["test_types"])):
print(item)
def list_disabled(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
rv = []
run_info_extras = products.Product(kwargs["config"], product).run_info_extras(**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test_type, tests in test_loader.disabled_tests.items():
for test in tests:
rv.append({"test": test.id, "reason": test.disabled()})
print(json.dumps(rv, indent=2))
def list_tests(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.Product(kwargs["config"], product).run_info_extras(**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test in test_loader.test_ids:
print(test)
def get_pause_after_test(test_loader, **kwargs):
if kwargs["pause_after_test"] is None:
if kwargs["repeat_until_unexpected"]:
return False
if kwargs["headless"]:
return False
if kwargs["debug_test"]:
return True
tests = test_loader.tests
is_single_testharness = (sum(len(item) for item in tests.values()) == 1 and
len(tests.get("testharness", [])) == 1)
if kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and is_single_testharness:
return True
return False
return kwargs["pause_after_test"]
def run_test_iteration(test_status, test_loader, test_source_kwargs, test_source_cls, run_info,
recording, test_environment, product, run_test_kwargs):
"""Runs the entire test suite.
This is called for each repeat run requested."""
tests = []
for test_type in test_loader.test_types:
tests.extend(test_loader.tests[test_type])
try:
test_groups = test_source_cls.tests_by_group(
tests, **test_source_kwargs)
except Exception:
logger.critical("Loading tests failed")
return False
logger.suite_start(test_groups,
name='web-platform-test',
run_info=run_info,
extra={"run_by_dir": run_test_kwargs["run_by_dir"]})
for test_type in run_test_kwargs["test_types"]:
logger.info(f"Running {test_type} tests")
browser_cls = product.get_browser_cls(test_type)
browser_kwargs = product.get_browser_kwargs(logger,
test_type,
run_info,
config=test_environment.config,
num_test_groups=len(test_groups),
**run_test_kwargs)
executor_cls = product.executor_classes.get(test_type)
executor_kwargs = product.get_executor_kwargs(logger,
test_type,
test_environment,
run_info,
**run_test_kwargs)
if executor_cls is None:
logger.error(f"Unsupported test type {test_type} for product {product.name}")
continue
for test in test_loader.disabled_tests[test_type]:
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
test_status.skipped += 1
if test_type == "testharness":
run_tests = {"testharness": []}
for test in test_loader.tests["testharness"]:
if ((test.testdriver and not executor_cls.supports_testdriver) or
(test.jsshell and not executor_cls.supports_jsshell)):
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
test_status.skipped += 1
else:
run_tests["testharness"].append(test)
else:
run_tests = test_loader.tests
recording.pause()
with ManagerGroup("web-platform-tests",
run_test_kwargs["processes"],
test_source_cls,
test_source_kwargs,
browser_cls,
browser_kwargs,
executor_cls,
executor_kwargs,
run_test_kwargs["rerun"],
run_test_kwargs["pause_after_test"],
run_test_kwargs["pause_on_unexpected"],
run_test_kwargs["restart_on_unexpected"],
run_test_kwargs["debug_info"],
not run_test_kwargs["no_capture_stdio"],
recording=recording) as manager_group:
try:
manager_group.run(test_type, run_tests)
except KeyboardInterrupt:
logger.critical("Main thread got signal")
manager_group.stop()
raise
test_status.total_tests += manager_group.test_count()
test_status.unexpected += manager_group.unexpected_count()
test_status.unexpected_pass += manager_group.unexpected_pass_count()
return True
def evaluate_runs(test_status, run_test_kwargs):
"""Evaluates the test counts after the given number of repeat runs has finished"""
if test_status.total_tests == 0:
if test_status.skipped > 0:
logger.warning("All requested tests were skipped")
else:
if run_test_kwargs["default_exclude"]:
logger.info("No tests ran")
return True
else:
logger.critical("No tests ran")
return False
if test_status.unexpected and not run_test_kwargs["fail_on_unexpected"]:
logger.info(f"Tolerating {test_status.unexpected} unexpected results")
return True
all_unexpected_passed = (test_status.unexpected and
test_status.unexpected == test_status.unexpected_pass)
if all_unexpected_passed and not run_test_kwargs["fail_on_unexpected_pass"]:
logger.info(f"Tolerating {test_status.unexpected_pass} unexpected results "
"because they all PASS")
return True
return test_status.unexpected == 0
class TestStatus:
"""Class that stores information on the results of test runs for later reference"""
def __init__(self):
self.total_tests = 0
self.skipped = 0
self.unexpected = 0
self.unexpected_pass = 0
self.repeated_runs = 0
self.expected_repeated_runs = 0
self.all_skipped = False
def run_tests(config, test_paths, product, **kwargs):
"""Set up the test environment, load the list of tests to be executed, and
invoke the remainder of the code to execute tests"""
mp = mpcontext.get_context()
if kwargs["instrument_to_file"] is None:
recorder = instruments.NullInstrument()
else:
recorder = instruments.Instrument(kwargs["instrument_to_file"])
with recorder as recording, capture.CaptureIO(logger,
not kwargs["no_capture_stdio"],
mp_context=mp):
recording.set(["startup"])
env.do_delayed_imports(logger, test_paths)
product = products.Product(config, product)
env_extras = product.get_env_extras(**kwargs)
product.check_args(**kwargs)
if kwargs["install_fonts"]:
env_extras.append(FontInstaller(
logger,
font_dir=kwargs["font_dir"],
ahem=os.path.join(test_paths["/"]["tests_path"], "fonts/Ahem.ttf")
))
recording.set(["startup", "load_tests"])
test_groups = (testloader.TestGroupsFile(logger, kwargs["test_groups_file"])
if kwargs["test_groups_file"] else None)
(test_source_cls,
test_source_kwargs,
chunker_kwargs) = testloader.get_test_src(logger=logger,
test_groups=test_groups,
**kwargs)
run_info, test_loader = get_loader(test_paths,
product.name,
run_info_extras=product.run_info_extras(**kwargs),
chunker_kwargs=chunker_kwargs,
test_groups=test_groups,
**kwargs)
logger.info("Using %i client processes" % kwargs["processes"])
test_status = TestStatus()
repeat = kwargs["repeat"]
test_status.expected_repeat = repeat
if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
logger.critical("Unable to find any tests at the path(s):")
for path in kwargs["test_list"]:
logger.critical(" %s" % path)
logger.critical("Please check spelling and make sure there are tests in the specified path(s).")
return False, test_status
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
ssl_config = {"type": kwargs["ssl_type"],
"openssl": {"openssl_binary": kwargs["openssl_binary"]},
"pregenerated": {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}}
testharness_timeout_multipler = product.get_timeout_multiplier("testharness",
run_info,
**kwargs)
mojojs_path = kwargs["mojojs_path"] if kwargs["enable_mojojs"] else None
recording.set(["startup", "start_environment"])
with env.TestEnvironment(test_paths,
testharness_timeout_multipler,
kwargs["pause_after_test"],
kwargs["debug_test"],
kwargs["debug_info"],
product.env_options,
ssl_config,
env_extras,
kwargs["enable_webtransport_h3"],
mojojs_path) as test_environment:
recording.set(["startup", "ensure_environment"])
try:
test_environment.ensure_started()
start_time = datetime.now()
except env.TestEnvironmentError as e:
logger.critical("Error starting test environment: %s" % e)
raise
recording.set(["startup"])
max_time = None
if "repeat_max_time" in kwargs:
max_time = timedelta(minutes=kwargs["repeat_max_time"])
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
# keep track of longest time taken to complete a test suite iteration
# so that the runs can be stopped to avoid a possible TC timeout.
longest_iteration_time = timedelta()
while test_status.repeated_runs < repeat or repeat_until_unexpected:
# if the next repeat run could cause the TC timeout to be reached,
# stop now and use the test results we have.
# Pad the total time by 10% to ensure ample time for the next iteration(s).
estimate = (datetime.now() +
timedelta(seconds=(longest_iteration_time.total_seconds() * 1.1)))
if not repeat_until_unexpected and max_time and estimate >= start_time + max_time:
logger.info(f"Ran {test_status.repeated_runs} of {repeat} iterations.")
break
# begin tracking runtime of the test suite
iteration_start = datetime.now()
test_status.repeated_runs += 1
if repeat_until_unexpected:
logger.info(f"Repetition {test_status.repeated_runs}")
elif repeat > 1:
logger.info(f"Repetition {test_status.repeated_runs} / {repeat}")
iter_success = run_test_iteration(test_status, test_loader, test_source_kwargs,
test_source_cls, run_info, recording,
test_environment, product, kwargs)
# if there were issues with the suite run(tests not loaded, etc.) return
if not iter_success:
return False, test_status
recording.set(["after-end"])
logger.info(f"Got {test_status.unexpected} unexpected results, "
f"with {test_status.unexpected_pass} unexpected passes")
logger.suite_end()
# Note this iteration's runtime
iteration_runtime = datetime.now() - iteration_start
# determine the longest test suite runtime seen.
longest_iteration_time = max(longest_iteration_time,
iteration_runtime)
if repeat_until_unexpected and test_status.unexpected > 0:
break
if test_status.repeated_runs == 1 and len(test_loader.test_ids) == test_status.skipped:
test_status.all_skipped = True
break
# Return the evaluation of the runs and the number of repeated iterations that were run.
return evaluate_runs(test_status, kwargs), test_status
def check_stability(**kwargs):
from . import stability
if kwargs["stability"]:
logger.warning("--stability is deprecated; please use --verify instead!")
kwargs['verify_max_time'] = None
kwargs['verify_chaos_mode'] = False
kwargs['verify_repeat_loop'] = 0
kwargs['verify_repeat_restart'] = 10 if kwargs['repeat'] == 1 else kwargs['repeat']
kwargs['verify_output_results'] = True
return stability.check_stability(logger,
max_time=kwargs['verify_max_time'],
chaos_mode=kwargs['verify_chaos_mode'],
repeat_loop=kwargs['verify_repeat_loop'],
repeat_restart=kwargs['verify_repeat_restart'],
output_results=kwargs['verify_output_results'],
**kwargs)
def start(**kwargs):
assert logger is not None
logged_critical = wptlogging.LoggedAboveLevelHandler("CRITICAL")
| handler = handlers.LogLevelFilter(logged_critical, "CRITICAL") | 1,196 | lcc_e | python | null | 2477e626c89426f565807bbc5e35a472279f3e388787ac17 |
|
# -*- coding: utf-8 -*-
"""
This module contains a POI Manager core class which gives capability to mark
points of interest, re-optimise their position, and keep track of sample drift
over time.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from qtpy import QtCore
import ctypes # is a foreign function library for Python. It provides C
# compatible data types, and allows calling functions in DLLs
# or shared libraries. It can be used to wrap these libraries
# in pure Python.
from interface.wavemeter_interface import WavemeterInterface
from core.base import Base
from core.util.mutex import Mutex
class HardwarePull(QtCore.QObject):
""" Helper class for running the hardware communication in a separate thread. """
# signal to deliver the wavelength to the parent class
sig_wavelength = QtCore.Signal(float, float)
def __init__(self, parentclass):
super().__init__()
# remember the reference to the parent class to access functions ad settings
self._parentclass = parentclass
def handle_timer(self, state_change):
""" Threaded method that can be called by a signal from outside to start the timer.
@param bool state: (True) starts timer, (False) stops it.
"""
if state_change:
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self._measure_thread)
self.timer.start(self._parentclass._measurement_timing)
else:
if hasattr(self, 'timer'):
self.timer.stop()
def _measure_thread(self):
""" The threaded method querying the data from the wavemeter.
"""
# update as long as the state is busy
if self._parentclass.getState() == 'running':
# get the current wavelength from the wavemeter
temp1=float(self._parentclass._wavemeterdll.GetWavelength(0))
temp2=float(self._parentclass._wavemeterdll.GetWavelength(0))
# send the data to the parent via a signal
self.sig_wavelength.emit(temp1, temp2)
class HighFinesseWavemeter(Base,WavemeterInterface):
_modclass = 'HighFinesseWavemeter'
_modtype = 'hardware'
## declare connectors
_out = {'highfinessewavemeter': 'WavemeterInterface'}
sig_handle_timer = QtCore.Signal(bool)
#############################################
# Flags for the external DLL
#############################################
# define constants as flags for the wavemeter
_cCtrlStop = ctypes.c_uint16(0x00)
# this following flag is modified to override every existing file
_cCtrlStartMeasurment = ctypes.c_uint16(0x1002)
_cReturnWavelangthAir = ctypes.c_long(0x0001)
_cReturnWavelangthVac = ctypes.c_long(0x0000)
def __init__(self, config, **kwargs):
super().__init__(config=config, **kwargs)
#locking for thread safety
self.threadlock = Mutex()
# the current wavelength read by the wavemeter in nm (vac)
self._current_wavelength=0.0
self._current_wavelength2=0.0
# time between two measurement points of the wavemeter in milliseconds
if 'measurement_timing' in config.keys():
self._measurement_timing=config['measurement_timing']
else:
self._measurement_timing = 10.
self.log.warning('No measurement_timing configured, '\
'using {} instead.'.format(self._measurement_timing))
def on_activate(self, e):
#############################################
# Initialisation to access external DLL
#############################################
try:
# imports the spectrometer specific function from dll
self._wavemeterdll = ctypes.windll.LoadLibrary('wlmData.dll')
except:
self.log.critical('There is no Wavemeter installed on this '
'Computer.\nPlease install a High Finesse Wavemeter and '
'try again.')
# define the use of the GetWavelength function of the wavemeter
# self._GetWavelength2 = self._wavemeterdll.GetWavelength2
# return data type of the GetWavelength function of the wavemeter
self._wavemeterdll.GetWavelength2.restype = ctypes.c_double
# parameter data type of the GetWavelength function of the wavemeter
self._wavemeterdll.GetWavelength2.argtypes = [ctypes.c_double]
# define the use of the GetWavelength function of the wavemeter
# self._GetWavelength = self._wavemeterdll.GetWavelength
# return data type of the GetWavelength function of the wavemeter
self._wavemeterdll.GetWavelength.restype = ctypes.c_double
# parameter data type of the GetWavelength function of the wavemeter
self._wavemeterdll.GetWavelength.argtypes = [ctypes.c_double]
# define the use of the ConvertUnit function of the wavemeter
# self._ConvertUnit = self._wavemeterdll.ConvertUnit
# return data type of the ConvertUnit function of the wavemeter
self._wavemeterdll.ConvertUnit.restype = ctypes.c_double
# parameter data type of the ConvertUnit function of the wavemeter
self._wavemeterdll.ConvertUnit.argtypes = [ctypes.c_double, ctypes.c_long, ctypes.c_long]
# manipulate perdefined operations with simple flags
# self._Operation = self._wavemeterdll.Operation
# return data type of the Operation function of the wavemeter
self._wavemeterdll.Operation.restype = ctypes.c_long
# parameter data type of the Operation function of the wavemeter
self._wavemeterdll.Operation.argtypes = [ctypes.c_ushort]
# create an indepentent thread for the hardware communication
self.hardware_thread = QtCore.QThread()
# create an object for the hardware communication and let it live on the new thread
self._hardware_pull = HardwarePull(self)
self._hardware_pull.moveToThread(self.hardware_thread)
# connect the signals in and out of the threaded object
self.sig_handle_timer.connect(self._hardware_pull.handle_timer)
self._hardware_pull.sig_wavelength.connect(self.handle_wavelength)
# start the event loop for the hardware
self.hardware_thread.start()
def on_deactivate(self, e):
if self.getState() != 'idle' and self.getState() != 'deactivated':
self.stop_acqusition()
self.hardware_thread.quit()
self.sig_handle_timer.disconnect()
self._hardware_pull.sig_wavelength.disconnect()
try:
# clean up by removing reference to the ctypes library object
del self._wavemeterdll
return 0
except:
self.log.error('Could not unload the wlmData.dll of the '
'wavemeter.')
#############################################
# Methods of the main class
#############################################
def handle_wavelength(self, wavelength1, wavelength2):
""" Function to save the wavelength, when it comes in with a signal.
"""
self._current_wavelength = wavelength1
self._current_wavelength2 = wavelength2
def start_acqusition(self):
""" Method to start the wavemeter software.
@return int: error code (0:OK, -1:error)
Also the actual threaded method for getting the current wavemeter reading is started.
"""
# first check its status
if self.getState() == 'running':
self.log.error('Wavemeter busy')
return -1
self.run()
# actually start the wavemeter
self._wavemeterdll.Operation(self._cCtrlStartMeasurment) #starts measurement
# start the measuring thread
self.sig_handle_timer.emit(True)
return 0
def stop_acqusition(self):
""" Stops the Wavemeter from measuring and kills the thread that queries the data.
@return int: error code (0:OK, -1:error)
"""
# check status just for a sanity check
| if self.getState() == 'idle': | 885 | lcc_e | python | null | e59f877d3863977e0c9a383546007617a3bcb1d48ab84132 |
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_link_status
version_added: "2.4"
short_description: Get interface link status on HUAWEI CloudEngine switches.
description:
- Get interface link status on HUAWEI CloudEngine switches.
author:
- Zhijin Zhou (@QijunPan)
notes:
- Current physical state shows an interface's physical status.
- Current link state shows an interface's link layer protocol status.
- Current IPv4 state shows an interface's IPv4 protocol status.
- Current IPv6 state shows an interface's IPv6 protocol status.
- Inbound octets(bytes) shows the number of bytes that an interface received.
- Inbound unicast(pkts) shows the number of unicast packets that an interface received.
- Inbound multicast(pkts) shows the number of multicast packets that an interface received.
- Inbound broadcast(pkts) shows the number of broadcast packets that an interface received.
- Inbound error(pkts) shows the number of error packets that an interface received.
- Inbound drop(pkts) shows the total number of packets that were sent to the interface but dropped by an interface.
- Inbound rate(byte/sec) shows the rate at which an interface receives bytes within an interval.
- Inbound rate(pkts/sec) shows the rate at which an interface receives packets within an interval.
- Outbound octets(bytes) shows the number of the bytes that an interface sent.
- Outbound unicast(pkts) shows the number of unicast packets that an interface sent.
- Outbound multicast(pkts) shows the number of multicast packets that an interface sent.
- Outbound broadcast(pkts) shows the number of broadcast packets that an interface sent.
- Outbound error(pkts) shows the total number of packets that an interface sent but dropped by the remote interface.
- Outbound drop(pkts) shows the number of dropped packets that an interface sent.
- Outbound rate(byte/sec) shows the rate at which an interface sends bytes within an interval.
- Outbound rate(pkts/sec) shows the rate at which an interface sends packets within an interval.
- Speed shows the rate for an Ethernet interface.
options:
interface:
description:
- For the interface parameter, you can enter C(all) to display information about all interface,
an interface type such as C(40GE) to display information about interfaces of the specified type,
or full name of an interface such as C(40GE1/0/22) or C(vlanif10)
to display information about the specific interface.
required: true
'''
EXAMPLES = '''
- name: Link status test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Get specified interface link status information
ce_link_status:
interface: 40GE1/0/1
provider: "{{ cli }}"
- name: Get specified interface type link status information
ce_link_status:
interface: 40GE
provider: "{{ cli }}"
- name: Get all interface link status information
ce_link_status:
interface: all
provider: "{{ cli }}"
'''
RETURN = '''
result:
description: Interface link status information
returned: always
type: dict
sample: {
"40ge2/0/8": {
"Current IPv4 state": "down",
"Current IPv6 state": "down",
"Current link state": "up",
"Current physical state": "up",
"Inbound broadcast(pkts)": "0",
"Inbound drop(pkts)": "0",
"Inbound error(pkts)": "0",
"Inbound multicast(pkts)": "20151",
"Inbound octets(bytes)": "7314813",
"Inbound rate(byte/sec)": "11",
"Inbound rate(pkts/sec)": "0",
"Inbound unicast(pkts)": "0",
"Outbound broadcast(pkts)": "1",
"Outbound drop(pkts)": "0",
"Outbound error(pkts)": "0",
"Outbound multicast(pkts)": "20152",
"Outbound octets(bytes)": "7235021",
"Outbound rate(byte/sec)": "11",
"Outbound rate(pkts/sec)": "0",
"Outbound unicast(pkts)": "0",
"Speed": "40GE"
}
}
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config
CE_NC_GET_PORT_SPEED = """
<filter type="subtree">
<devm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ports>
<port>
<position>%s</position>
<ethernetPort>
<speed></speed>
</ethernetPort>
</port>
</ports>
</devm>
</filter>
"""
CE_NC_GET_INT_STATISTICS = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<ifDynamicInfo>
<ifPhyStatus></ifPhyStatus>
<ifLinkStatus></ifLinkStatus>
<ifV4State></ifV4State>
<ifV6State></ifV6State>
</ifDynamicInfo>
<ifStatistics>
<receiveByte></receiveByte>
<sendByte></sendByte>
<rcvUniPacket></rcvUniPacket>
<rcvMutiPacket></rcvMutiPacket>
<rcvBroadPacket></rcvBroadPacket>
<sendUniPacket></sendUniPacket>
<sendMutiPacket></sendMutiPacket>
<sendBroadPacket></sendBroadPacket>
<rcvErrorPacket></rcvErrorPacket>
<rcvDropPacket></rcvDropPacket>
<sendErrorPacket></sendErrorPacket>
<sendDropPacket></sendDropPacket>
</ifStatistics>
<ifClearedStat>
<inByteRate></inByteRate>
<inPacketRate></inPacketRate>
<outByteRate></outByteRate>
<outPacketRate></outPacketRate>
</ifClearedStat>
</interface>
</interfaces>
</ifm>
</filter>
"""
INTERFACE_ALL = 1
INTERFACE_TYPE = 2
INTERFACE_FULL_NAME = 3
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_ethernet_port(interface):
"""Judge whether it is ethernet port"""
ethernet_port = ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'meth']
if_type = get_interface_type(interface)
if if_type in ethernet_port:
return True
return False
class LinkStatus(object):
"""Get interface link status information"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface name
self.interface = self.module.params['interface']
self.interface = self.interface.replace(' ', '').lower()
self.param_type = None
self.if_type = None
# state
self.results = dict()
self.result = dict()
def check_params(self):
"""Check all input params"""
if not self.interface:
self.module.fail_json(msg='Error: Interface name cannot be empty.')
if self.interface and self.interface != 'all':
if not self.if_type:
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def show_result(self):
"""Show result"""
self.results['result'] = self.result
self.module.exit_json(**self.results)
def get_intf_dynamic_info(self, dyn_info, intf_name):
"""Get interface dynamic information"""
if not intf_name:
return
if dyn_info:
for eles in dyn_info:
if eles.tag in ["ifPhyStatus", "ifV4State", "ifV6State", "ifLinkStatus"]:
if eles.tag == "ifPhyStatus":
self.result[intf_name][
'Current physical state'] = eles.text
elif eles.tag == "ifLinkStatus":
self.result[intf_name][
'Current link state'] = eles.text
elif eles.tag == "ifV4State":
self.result[intf_name][
'Current IPv4 state'] = eles.text
elif eles.tag == "ifV6State":
self.result[intf_name][
'Current IPv6 state'] = eles.text
def get_intf_statistics_info(self, stat_info, intf_name):
"""Get interface statistics information"""
if not intf_name:
return
if_type = get_interface_type(intf_name)
if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \
if_type == 'vbdif' or if_type == 'vlanif':
return
if stat_info:
for eles in stat_info:
if eles.tag in ["receiveByte", "sendByte", "rcvUniPacket", "rcvMutiPacket", "rcvBroadPacket",
"sendUniPacket", "sendMutiPacket", "sendBroadPacket", "rcvErrorPacket",
"rcvDropPacket", "sendErrorPacket", "sendDropPacket"]:
if eles.tag == "receiveByte":
self.result[intf_name][
'Inbound octets(bytes)'] = eles.text
elif eles.tag == "rcvUniPacket":
self.result[intf_name][
'Inbound unicast(pkts)'] = eles.text
elif eles.tag == "rcvMutiPacket":
self.result[intf_name][
'Inbound multicast(pkts)'] = eles.text
elif eles.tag == "rcvBroadPacket":
self.result[intf_name][
'Inbound broadcast(pkts)'] = eles.text
elif eles.tag == "rcvErrorPacket":
self.result[intf_name][
'Inbound error(pkts)'] = eles.text
elif eles.tag == "rcvDropPacket":
self.result[intf_name][
'Inbound drop(pkts)'] = eles.text
elif eles.tag == "sendByte":
self.result[intf_name][
'Outbound octets(bytes)'] = eles.text
elif eles.tag == "sendUniPacket":
self.result[intf_name][
'Outbound unicast(pkts)'] = eles.text
elif eles.tag == "sendMutiPacket":
self.result[intf_name][
'Outbound multicast(pkts)'] = eles.text
elif eles.tag == "sendBroadPacket":
self.result[intf_name][
'Outbound broadcast(pkts)'] = eles.text
elif eles.tag == "sendErrorPacket":
self.result[intf_name][
'Outbound error(pkts)'] = eles.text
elif eles.tag == "sendDropPacket":
self.result[intf_name][
'Outbound drop(pkts)'] = eles.text
def get_intf_cleared_stat(self, clr_stat, intf_name):
"""Get interface cleared state information"""
if not intf_name:
return
if_type = get_interface_type(intf_name)
if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \
if_type == 'vbdif' or if_type == 'vlanif':
return
if clr_stat:
for eles in clr_stat:
if eles.tag in ["inByteRate", "inPacketRate", "outByteRate", "outPacketRate"]:
if eles.tag == "inByteRate":
self.result[intf_name][
'Inbound rate(byte/sec)'] = eles.text
elif eles.tag == "inPacketRate":
self.result[intf_name][
'Inbound rate(pkts/sec)'] = eles.text
elif eles.tag == "outByteRate":
self.result[intf_name][
'Outbound rate(byte/sec)'] = eles.text
elif eles.tag == "outPacketRate":
self.result[intf_name][
'Outbound rate(pkts/sec)'] = eles.text
def get_all_interface_info(self, intf_type=None):
"""Get interface information all or by interface type"""
xml_str = CE_NC_GET_INT_STATISTICS % ''
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
intfs_info = root.find("data/ifm/interfaces")
if not intfs_info:
return
intf_name = ''
flag = False
for eles in intfs_info:
if eles.tag == "interface":
for ele in eles:
if ele.tag in ["ifName", "ifDynamicInfo", "ifStatistics", "ifClearedStat"]:
if ele.tag == "ifName":
intf_name = ele.text.lower()
if intf_type:
if get_interface_type(intf_name) != intf_type.lower():
break
else:
flag = True
self.init_interface_data(intf_name)
if is_ethernet_port(intf_name):
self.get_port_info(intf_name)
if ele.tag == "ifDynamicInfo":
self.get_intf_dynamic_info(ele, intf_name)
elif ele.tag == "ifStatistics":
self.get_intf_statistics_info(ele, intf_name)
elif ele.tag == "ifClearedStat":
self.get_intf_cleared_stat(ele, intf_name)
if intf_type and not flag:
self.module.fail_json(
msg='Error: %s interface type does not exist.' % intf_type.upper())
def get_interface_info(self):
"""Get interface information"""
xml_str = CE_NC_GET_INT_STATISTICS % self.interface.upper()
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
self.module.fail_json(
msg='Error: %s interface does not exist.' % self.interface.upper())
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
intf_info = root.find("data/ifm/interfaces/interface")
if intf_info:
for eles in intf_info:
if eles.tag in ["ifDynamicInfo", "ifStatistics", "ifClearedStat"]:
if eles.tag == "ifDynamicInfo":
self.get_intf_dynamic_info(eles, self.interface)
elif eles.tag == "ifStatistics":
self.get_intf_statistics_info(eles, self.interface)
elif eles.tag == "ifClearedStat":
self.get_intf_cleared_stat(eles, self.interface)
def init_interface_data(self, intf_name):
"""Init interface data"""
# init link status data
self.result[intf_name] = dict()
self.result[intf_name]['Current physical state'] = 'down'
self.result[intf_name]['Current link state'] = 'down'
self.result[intf_name]['Current IPv4 state'] = 'down'
self.result[intf_name]['Current IPv6 state'] = 'down'
self.result[intf_name]['Inbound octets(bytes)'] = '--'
self.result[intf_name]['Inbound unicast(pkts)'] = '--'
self.result[intf_name]['Inbound multicast(pkts)'] = '--'
self.result[intf_name]['Inbound broadcast(pkts)'] = '--'
self.result[intf_name]['Inbound error(pkts)'] = '--'
self.result[intf_name]['Inbound drop(pkts)'] = '--'
self.result[intf_name]['Inbound rate(byte/sec)'] = '--'
self.result[intf_name]['Inbound rate(pkts/sec)'] = '--'
self.result[intf_name]['Outbound octets(bytes)'] = '--'
self.result[intf_name]['Outbound unicast(pkts)'] = '--'
self.result[intf_name]['Outbound multicast(pkts)'] = '--'
self.result[intf_name]['Outbound broadcast(pkts)'] = '--'
self.result[intf_name]['Outbound error(pkts)'] = '--'
self.result[intf_name]['Outbound drop(pkts)'] = '--'
self.result[intf_name]['Outbound rate(byte/sec)'] = '--'
self.result[intf_name]['Outbound rate(pkts/sec)'] = '--'
self.result[intf_name]['Speed'] = '--'
def get_port_info(self, interface):
"""Get port information"""
if_type = get_interface_type(interface)
if if_type == 'meth':
xml_str = CE_NC_GET_PORT_SPEED % interface.lower().replace('meth', 'MEth')
else:
xml_str = CE_NC_GET_PORT_SPEED % interface.upper()
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
port_info = root.find("data/devm/ports/port")
if port_info:
for eles in port_info:
if eles.tag == "ethernetPort":
for ele in eles:
if ele.tag == 'speed':
self.result[interface]['Speed'] = ele.text
def get_link_status(self):
"""Get link status information"""
if self.param_type == INTERFACE_FULL_NAME:
self.init_interface_data(self.interface)
self.get_interface_info()
if is_ethernet_port(self.interface):
self.get_port_info(self.interface)
elif self.param_type == INTERFACE_TYPE:
self.get_all_interface_info(self.interface)
else:
self.get_all_interface_info()
def get_intf_param_type(self):
"""Get the type of input interface parameter"""
| if self.interface == 'all': | 1,718 | lcc_e | python | null | ac75f42af7295a6d31f508d44bcc5ac42fd665147ac86554 |
|
"""
Convert human-editable CSV files into JSON files, used by the web application.
"""
import json
import csv
from io import StringIO
from datetime import datetime
################################################################################
# CONFIG
# behavior categories to include in the JSON file
categories = set(('G', 'M', 'W', 'C', 'F', 'H', 'I', 'P', 'V',)) #'A', 'L', 'O', 'E', 'S'
# time of first GPS point
firstGPStime = datetime(2014,1,24,5,36,14)
# seconds between each GPS point
intervalseconds = 60
class InFileNames:
observations = 'behavior observation codes.csv'
translations = 'behavior code translations.csv'
mediafeatures = 'media features.json'
gpstrack = 'GPS track.csv'
pictures = 'pictures.csv'
textbubbles = 'text bubbles.csv'
videos = 'videos.csv'
class OutFileNames:
behavior = 'behavior.json' # observations + translations
behaviorcsv = 'behavior observation data.csv'
media = 'media.js' # pictures, videos, text, media features
tourIntro = {
'loc': [10.5142232962, -85.3693762701],
'note': 'intro',
'data': [],
'time': '05:30:00',
}
tourStart = {
'loc': [10.5142232962, -85.3693762701],
'note': 'start',
'data': [],
'time': '05:30:00',
}
tourEnd = {
'loc': [10.5143646989, -85.3639992792], #[10.5148555432, -85.3643822484],
'note': 'end',
'data': [],
'time': '18:10:43',
}
# monkey patch json encoder to format floats
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.5f')
################################################################################
# GPS track
with open(InFileNames.gpstrack) as f:
reader = csv.reader(f, skipinitialspace=True)
GPStrack = [(float(lat[:9]), float(lon[:10])) for (lat,lon) in list(reader)[1:]]
def parsetime(timestr):
"""
Get the time from a string, ignore the date.
(Return a datetime with the date of the first GPS point.)
"""
# take out the date (get only the last space-separated part)
timestr = timestr.split()[-1]
time = datetime.strptime(timestr, '%H:%M:%S').time()
return datetime.combine(firstGPStime.date(), time)
def getTimeInterval(time):
"""
Get start and end points on the GPS track, of the time interval containing "time".
"""
index = int((time - firstGPStime).total_seconds() / intervalseconds)
interval = GPStrack[index:index+2]
if len(interval) == 2:
return interval
# if the time is past the last GPS point, return an interval with just the last GPS point
else:
return (GPStrack[-1], GPStrack[-1])
def getGPSCoords(time):
"""
Get a geographical point along Winslow Homer's GPS track, by linear interpolation
"""
# get start and stop
start, stop = getTimeInterval(time)
timediff = (time - firstGPStime).total_seconds()
proportion = (timediff % intervalseconds) / float(intervalseconds)
latdelta = (stop[0] - start[0])
lat = (proportion * latdelta) + start[0]
londelta = (stop[1] - start[1])
lon = (proportion * londelta) + start[1]
return (lat, lon)
def loadTranslationsFile():
"""
Load the translations file, return a list of dicts with the fields in the file
"""
with open(InFileNames.translations) as f:
reader = csv.DictReader(f, skipinitialspace=True)
return list(reader)
def loadObservationFile(translations=None):
"""
Load the observations file, return a list with a dict for each observation
record, and a set with all of the unique behavior codes.
"""
# ordered list of observations in file
observations = []
# set of codes we've seen
codes = set()
with open(InFileNames.observations) as f:
reader = csv.DictReader(f, skipinitialspace=True)
for line in reader:
# look up GPS coordinates from timestamp
line['loc'] = getGPSCoords(parsetime(line['timestamp']))
# add a 'time' field without the date, to display to user
line['time'] = line['timestamp'].split()[1]
observations.append(line)
codes.add(line['code'])
return observations, codes
def filterObservationsTranslations():
"""
Return (observations, translations) list containing the intersection
(inner join) of the observations and translations, and only in the
configured categories.
"""
translations = loadTranslationsFile()
observations, obs_code_set = loadObservationFile()
# Find codes that occur in the observations, and are in the right categories.
# Make a {code : translation-fields} 2-dimensional dict.
translations_dict = {
t['code'] : t
for t in translations
if (t['code'] in obs_code_set) and (t['category'].upper() in categories) }
# Find observations that have a translation.
observations = list(filter(lambda o: o['code'] in translations_dict, observations))
return observations, translations_dict
def writeBehaviorJSON(observations, translations_dict, tourlist):
"""
Write behavior JSON file, with observations and translations joined.
"""
#observations, translations_dict = filterObservationsTranslations()
# join together observations with translations
| behavior_list = [ checkOnTour(tourlist, o, | 609 | lcc_e | python | null | a9b4285cd9b366f640dd9663e9dd44dfdc153065ee32a6d8 |
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <[email protected]>
# Copyright (C) 2007-2009 Gary Burton <[email protected]>
# Copyright (C) 2007-2009 Stephane Charette <[email protected]>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <[email protected]>
# Copyright (C) 2008-2011 Rob G. Healey <[email protected]>
# Copyright (C) 2010 Doug Blank <[email protected]>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
StatisticsPage
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (Person, Family, Event, Place, Source,
Citation, Repository)
from gramps.gen.plug.report import Bibliography
from gramps.gen.utils.file import media_path_full
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import FULLCLEAR
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
_ = glocale.translation.sgettext
class StatisticsPage(BasePage):
"""
Create one page for statistics
"""
def __init__(self, report, title, step):
"""
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
"""
import posixpath
BasePage.__init__(self, report, title)
self.bibli = Bibliography()
self.uplink = False
self.report = report
# set the file name and open file
output_file, sio = self.report.create_file("statistics")
addressbookpage, head, body = self.write_header(_("Statistics"))
(males,
females,
unknown) = self.get_gender(report.database.iter_person_handles())
step()
mobjects = report.database.get_number_of_media()
npersons = report.database.get_number_of_people()
nfamilies = report.database.get_number_of_families()
nsurnames = len(set(report.database.surname_list))
notfound = []
total_media = 0
mbytes = "0"
chars = 0
for media in report.database.iter_media():
total_media += 1
fullname = media_path_full(report.database, media.get_path())
try:
chars += posixpath.getsize(fullname)
length = len(str(chars))
if chars <= 999999:
mbytes = _("less than 1")
else:
mbytes = str(chars)[:(length-6)]
except OSError:
notfound.append(media.get_path())
with Html("div", class_="content", id='EventDetail') as section:
section += Html("h3", self._("Database overview"), inline=True)
body += section
with Html("div", class_="content", id='subsection narrative') as sec11:
sec11 += Html("h4", self._("Individuals"), inline=True)
body += sec11
with Html("div", class_="content", id='subsection narrative') as sec1:
sec1 += Html("br", self._("Number of individuals") + self.colon +
"%d" % npersons, inline=True)
sec1 += Html("br", self._("Males") + self.colon +
"%d" % males, inline=True)
sec1 += Html("br", self._("Females") + self.colon +
"%d" % females, inline=True)
sec1 += Html("br", self._("Individuals with unknown gender") +
self.colon + "%d" % unknown, inline=True)
body += sec1
with Html("div", class_="content", id='subsection narrative') as sec2:
sec2 += Html("h4", self._("Family Information"), inline=True)
sec2 += Html("br", self._("Number of families") + self.colon +
"%d" % nfamilies, inline=True)
sec2 += Html("br", self._("Unique surnames") + self.colon +
"%d" % nsurnames, inline=True)
body += sec2
with Html("div", class_="content", id='subsection narrative') as sec3:
sec3 += Html("h4", self._("Media Objects"), inline=True)
sec3 += Html("br",
self._("Total number of media object references") +
self.colon + "%d" % total_media, inline=True)
sec3 += Html("br", self._("Number of unique media objects") +
self.colon + "%d" % mobjects, inline=True)
sec3 += Html("br", self._("Total size of media objects") +
self.colon +
"%8s %s" % (mbytes, self._("Megabyte|MB")),
inline=True)
sec3 += Html("br", self._("Missing Media Objects") +
self.colon + "%d" % len(notfound), inline=True)
body += sec3
with Html("div", class_="content", id='subsection narrative') as sec4:
sec4 += Html("h4", self._("Miscellaneous"), inline=True)
sec4 += Html("br", self._("Number of events") + self.colon +
"%d" % report.database.get_number_of_events(),
inline=True)
sec4 += Html("br", self._("Number of places") + self.colon +
"%d" % report.database.get_number_of_places(),
inline=True)
nsources = report.database.get_number_of_sources()
sec4 += Html("br", self._("Number of sources") +
self.colon + "%d" % nsources,
inline=True)
ncitations = report.database.get_number_of_citations()
sec4 += Html("br", self._("Number of citations") +
self.colon + "%d" % ncitations,
inline=True)
nrepo = report.database.get_number_of_repositories()
sec4 += Html("br", self._("Number of repositories") +
self.colon + "%d" % nrepo,
inline=True)
body += sec4
(males,
females,
unknown) = self.get_gender(self.report.bkref_dict[Person].keys())
origin = " :<br/>" + report.filter.get_name(self.rlocale)
with Html("div", class_="content", id='EventDetail') as section:
section += Html("h3",
self._("Narrative web content report for") + origin,
inline=True)
body += section
with Html("div", class_="content", id='subsection narrative') as sec5:
sec5 += Html("h4", self._("Individuals"), inline=True)
sec5 += Html("br", self._("Number of individuals") + self.colon +
"%d" % len(self.report.bkref_dict[Person]),
inline=True)
sec5 += Html("br", self._("Males") + self.colon +
"%d" % males, inline=True)
sec5 += Html("br", self._("Females") + self.colon +
"%d" % females, inline=True)
sec5 += Html("br", self._("Individuals with unknown gender") +
self.colon + "%d" % unknown, inline=True)
body += sec5
with Html("div", class_="content", id='subsection narrative') as sec6:
sec6 += Html("h4", self._("Family Information"), inline=True)
sec6 += Html("br", self._("Number of families") + self.colon +
"%d" % len(self.report.bkref_dict[Family]),
inline=True)
body += sec6
with Html("div", class_="content", id='subsection narrative') as sec7:
sec7 += Html("h4", self._("Miscellaneous"), inline=True)
sec7 += Html("br", self._("Number of events") + self.colon +
"%d" % len(self.report.bkref_dict[Event]),
inline=True)
sec7 += Html("br", self._("Number of places") + self.colon +
"%d" % len(self.report.bkref_dict[Place]),
inline=True)
sec7 += Html("br", self._("Number of sources") + self.colon +
"%d" % len(self.report.bkref_dict[Source]),
inline=True)
sec7 += Html("br", self._("Number of citations") + self.colon +
"%d" % len(self.report.bkref_dict[Citation]),
inline=True)
sec7 += Html("br", self._("Number of repositories") + self.colon +
"%d" % len(self.report.bkref_dict[Repository]),
inline=True)
body += sec7
# add fullclear for proper styling
# and footer section to page
| footer = self.write_footer(None) | 960 | lcc_e | python | null | eed31a60b2b619d0d6e7f0a366fbd64a29d110c6a51d5a73 |
|
#!/usr/bin/env python
'''
Fly Helicopter in SITL
AP_FLAKE8_CLEAN
'''
from __future__ import print_function
from arducopter import AutoTestCopter
from common import AutoTest
from common import NotAchievedException, AutoTestTimeoutException
from pymavlink import mavutil
from pysim import vehicleinfo
class AutoTestHelicopter(AutoTestCopter):
sitl_start_loc = mavutil.location(40.072842, -105.230575, 1586, 0) # Sparkfun AVC Location
def vehicleinfo_key(self):
return 'Helicopter'
def log_name(self):
return "HeliCopter"
def default_frame(self):
return "heli"
def sitl_start_location(self):
return self.sitl_start_loc
def default_speedup(self):
'''Heli seems to be race-free'''
return 100
def is_heli(self):
return True
def rc_defaults(self):
ret = super(AutoTestHelicopter, self).rc_defaults()
ret[8] = 1000
ret[3] = 1000 # collective
return ret
@staticmethod
def get_position_armable_modes_list():
'''filter THROW mode out of armable modes list; Heli is special-cased'''
ret = AutoTestCopter.get_position_armable_modes_list()
ret = filter(lambda x : x != "THROW", ret)
return ret
def loiter_requires_position(self):
self.progress("Skipping loiter-requires-position for heli; rotor runup issues")
def get_collective_out(self):
servo = self.mav.recv_match(type='SERVO_OUTPUT_RAW', blocking=True)
chan_pwm = (servo.servo1_raw + servo.servo2_raw + servo.servo3_raw)/3.0
return chan_pwm
def rotor_runup_complete_checks(self):
# Takeoff and landing in Loiter
TARGET_RUNUP_TIME = 10
self.zero_throttle()
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
servo = self.mav.recv_match(type='SERVO_OUTPUT_RAW', blocking=True)
coll = servo.servo1_raw
coll = coll + 50
self.set_parameter("H_RSC_RUNUP_TIME", TARGET_RUNUP_TIME)
self.progress("Initiate Runup by putting some throttle")
self.set_rc(8, 2000)
self.set_rc(3, 1700)
self.progress("Collective threshold PWM %u" % coll)
tstart = self.get_sim_time()
self.progress("Wait that collective PWM pass threshold value")
servo = self.mav.recv_match(condition='SERVO_OUTPUT_RAW.servo1_raw>%u' % coll, blocking=True)
runup_time = self.get_sim_time() - tstart
self.progress("Collective is now at PWM %u" % servo.servo1_raw)
self.mav.wait_heartbeat()
if runup_time < TARGET_RUNUP_TIME:
self.zero_throttle()
self.set_rc(8, 1000)
self.disarm_vehicle()
self.mav.wait_heartbeat()
raise NotAchievedException("Takeoff initiated before runup time complete %u" % runup_time)
self.progress("Runup time %u" % runup_time)
self.zero_throttle()
self.set_rc(8, 1000)
self.land_and_disarm()
self.mav.wait_heartbeat()
# fly_avc_test - fly AVC mission
def fly_avc_test(self):
# Arm
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
# upload mission from file
self.progress("# Load copter_AVC2013_mission")
# load the waypoint count
num_wp = self.load_mission("copter_AVC2013_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_AVC2013_mission failed")
self.progress("Fly AVC mission from 1 to %u" % num_wp)
self.set_current_waypoint(1)
# wait for motor runup
self.delay_sim_time(20)
# switch into AUTO mode and raise throttle
self.change_mode('AUTO')
self.set_rc(3, 1500)
# fly the mission
self.wait_waypoint(0, num_wp-1, timeout=500)
# set throttle to minimum
self.zero_throttle()
# wait for disarm
self.wait_disarmed()
self.progress("MOTORS DISARMED OK")
self.progress("Lowering rotor speed")
self.set_rc(8, 1000)
self.progress("AVC mission completed: passed!")
def takeoff(self,
alt_min=30,
takeoff_throttle=1700,
require_absolute=True,
mode="STABILIZE",
timeout=120):
"""Takeoff get to 30m altitude."""
self.progress("TAKEOFF")
self.change_mode(mode)
if not self.armed():
self.wait_ready_to_arm(require_absolute=require_absolute, timeout=timeout)
self.zero_throttle()
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
self.progress("wait for rotor runup to complete")
self.wait_servo_channel_value(8, 1660, timeout=10)
if mode == 'GUIDED':
self.user_takeoff(alt_min=alt_min)
else:
self.set_rc(3, takeoff_throttle)
self.wait_for_alt(alt_min=alt_min, timeout=timeout)
self.hover()
self.progress("TAKEOFF COMPLETE")
def fly_each_frame(self):
vinfo = vehicleinfo.VehicleInfo()
vinfo_options = vinfo.options[self.vehicleinfo_key()]
known_broken_frames = {
}
for frame in sorted(vinfo_options["frames"].keys()):
self.start_subtest("Testing frame (%s)" % str(frame))
if frame in known_broken_frames:
self.progress("Actually, no I'm not - it is known-broken (%s)" %
(known_broken_frames[frame]))
continue
frame_bits = vinfo_options["frames"][frame]
print("frame_bits: %s" % str(frame_bits))
if frame_bits.get("external", False):
self.progress("Actually, no I'm not - it is an external simulation")
continue
model = frame_bits.get("model", frame)
# the model string for Callisto has crap in it.... we
# should really have another entry in the vehicleinfo data
# to carry the path to the JSON.
actual_model = model.split(":")[0]
defaults = self.model_defaults_filepath(actual_model)
if type(defaults) != list:
defaults = [defaults]
self.customise_SITL_commandline(
["--defaults", ','.join(defaults), ],
model=model,
wipe=True,
)
self.takeoff(10)
self.do_RTL()
self.set_rc(8, 1000)
def hover(self):
self.progress("Setting hover collective")
self.set_rc(3, 1500)
def fly_heli_poshold_takeoff(self):
"""ensure vehicle stays put until it is ready to fly"""
self.context_push()
ex = None
try:
self.set_parameter("PILOT_TKOFF_ALT", 700)
self.change_mode('POSHOLD')
self.zero_throttle()
self.set_rc(8, 1000)
self.wait_ready_to_arm()
# Arm
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
self.progress("wait for rotor runup to complete")
self.wait_servo_channel_value(8, 1660, timeout=10)
self.delay_sim_time(20)
# check we are still on the ground...
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
max_relalt_mm = 1000
| if abs(m.relative_alt) > max_relalt_mm: | 577 | lcc_e | python | null | afc69ae78edec70b690890e285882cf26620106e9a4ecc3d |
|
import json
from django.core.management import call_command
from django.test import TestCase
from bulk_adding.models import RawPeople
from candidates.tests.uk_examples import UK2015ExamplesMixin
from official_documents.models import OfficialDocument
from parties.tests.factories import PartyFactory
from parties.tests.fixtures import DefaultPartyFixtures
from sopn_parsing.models import ParsedSOPN
from sopn_parsing.helpers import parse_tables
from ynr.apps.sopn_parsing.management.commands.sopn_parsing_parse_tables import (
Command as ParseTablesCommand,
)
from unittest import skipIf
from pandas import Index, Series
from sopn_parsing.tests import should_skip_pdf_tests
class TestSOPNHelpers(DefaultPartyFixtures, UK2015ExamplesMixin, TestCase):
def setUp(self):
PartyFactory(ec_id="PP85", name="UK Independence Party (UKIP)")
@skipIf(should_skip_pdf_tests(), "Required PDF libs not installed")
def test_basic_parsing(self):
self.assertFalse(RawPeople.objects.exists())
doc = OfficialDocument.objects.create(
ballot=self.dulwich_post_ballot,
document_type=OfficialDocument.NOMINATION_PAPER,
source_url="example.com",
relevant_pages="all",
)
dataframe = json.dumps(
{
"0": {
"0": "Name of \nCandidate",
"1": "BRADBURY \nAndrew John",
"2": "COLLINS \nDave",
"3": "HARVEY \nPeter John",
"4": "JENNER \nMelanie",
},
"1": {
"0": "Home Address",
"1": "10 Fowey Close, \nShoreham by Sea, \nWest Sussex, \nBN43 5HE",
"2": "51 Old Fort Road, \nShoreham by Sea, \nBN43 5RL",
"3": "76 Harbour Way, \nShoreham by Sea, \nSussex, \nBN43 5HH",
"4": "9 Flag Square, \nShoreham by Sea, \nWest Sussex, \nBN43 5RZ",
},
"2": {
"0": "Description (if \nany)",
"1": "Green Party",
"2": "Independent",
"3": "UK Independence \nParty (UKIP)",
"4": "Labour Party",
},
"3": {
"0": "Name of \nProposer",
"1": "Tiffin Susan J",
"2": "Loader Jocelyn C",
"3": "Hearne James H",
"4": "O`Connor Lavinia",
},
"4": {
"0": "Reason \nwhy no \nlonger \nnominated\n*",
"1": "",
"2": "",
"3": "",
"4": "",
},
}
)
ParsedSOPN.objects.create(
sopn=doc, raw_data=dataframe, status="unparsed"
)
call_command("sopn_parsing_parse_tables")
self.assertEqual(RawPeople.objects.count(), 1)
raw_people = RawPeople.objects.get()
self.assertEqual(
raw_people.data,
[
{"name": "Andrew John Bradbury", "party_id": "PP63"},
{"name": "Dave Collins", "party_id": "ynmp-party:2"},
{"name": "Peter John Harvey", "party_id": "PP85"},
{"name": "Melanie Jenner", "party_id": "PP53"},
],
)
class TestParseTablesUnitTests(TestCase):
def get_two_name_field_cases(self):
# this could be updated with more combinations as we come across them
return [
{
"name_fields": ["candidate surname", "candidate forename"],
"row": {
"candidate surname": "BAGSHAW",
"candidate forename": "Elaine Sheila",
"home address": "1 Foo Street \n London \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
"ordered_name_fields": [
"candidate forename",
"candidate surname",
],
"expected_name": "Elaine Sheila Bagshaw",
},
{
"name_fields": ["surname", "other names"],
"row": {
"surname": "BAGSHAW",
"other names": "Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
"ordered_name_fields": ["other names", "surname"],
"expected_name": "Elaine Sheila Bagshaw",
},
{
"name_fields": ["last name", "other names"],
"row": {
"last name": "BAGSHAW",
"other names": "Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
"ordered_name_fields": ["other names", "last name"],
"expected_name": "Elaine Sheila Bagshaw",
},
{
"name_fields": ["candidate forename", "candidate surname"],
"row": {
"candidate forename": "Elaine Sheila",
"candidate surname": "BAGSHAW",
"home address": "1 Foo Street \n London \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
"ordered_name_fields": [
"candidate forename",
"candidate surname",
],
"expected_name": "Elaine Sheila Bagshaw",
},
]
def get_single_name_field_cases(self):
return [
{
"name_fields": ["name of candidate"],
"row": {
"name of candidate": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \n London \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["names of candidate"],
"row": {
"names of candidate": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["candidate name"],
"row": {
"candidate name": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["surname"],
"row": {
"surname": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["candidates surname"],
"row": {
"candidates surname": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["other name"],
"row": {
"other name": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
]
def test_get_name_single_field(self):
for case in self.get_single_name_field_cases():
row = Series(case["row"])
name_fields = case["name_fields"]
with self.subTest(name_fields=name_fields):
assert len(case["name_fields"]) == 1
name = parse_tables.get_name(row=row, name_fields=name_fields)
assert name == "Elaine Sheila Bagshaw"
def test_get_name_two_fields(self):
for case in self.get_two_name_field_cases():
row = Series(case["row"])
name_fields = case["name_fields"]
with self.subTest(name_fields=name_fields):
assert len(case["name_fields"]) == 2
name = parse_tables.get_name(row=row, name_fields=name_fields)
assert name == case["expected_name"]
def test_get_name_fields_single(self):
for case in self.get_single_name_field_cases():
row = Index(case["row"])
with self.subTest(row=row):
name_fields = parse_tables.get_name_fields(row=row)
assert len(name_fields) == 1
assert name_fields == case["name_fields"]
def test_get_name_fields_two(self):
for case in self.get_two_name_field_cases():
row = Index(case["row"])
with self.subTest(row=row):
name_fields = parse_tables.get_name_fields(row=row)
assert len(name_fields) == 2
assert name_fields == case["name_fields"]
def test_get_name_fields_raises_error(self):
row = Index({"foo": "Bar"})
with self.assertRaises(ValueError):
parse_tables.get_name_fields(row=row)
def test_order_name_fields(self):
for case in self.get_two_name_field_cases():
name_fields = case["name_fields"]
with self.subTest(name_fields=name_fields):
result = parse_tables.order_name_fields(name_fields)
assert result == case["ordered_name_fields"]
def test_clean_name_replaces_backticks(self):
name = parse_tables.clean_name("D`SOUZA")
assert "`" not in name
assert "'" in name
def test_clean_name_replaces_newlines(self):
name = parse_tables.clean_name(
"A Very Long Name That Splits \nOver Lines"
)
assert "\n" not in name
def test_clean_name_capitalized_last_and_titalized(self):
name = parse_tables.clean_name("SMITH John")
assert name == "John Smith"
def test_clean_last_names(self):
name = parse_tables.clean_last_names(["MACDONALD", "John"])
assert name == "MacDonald"
def test_clean_name_two_word_surnames(self):
names = [
("EDE COOPER \nPalmer", "Palmer Ede Cooper"),
("VAN DULKEN \nRichard Michael", "Richard Michael Van Dulken"),
("ARMSTRONG LILLEY \nLynne", "Lynne Armstrong Lilley"),
(
" D`SOUZA Aaron Anthony Jose \nHasan",
"Aaron Anthony Jose Hasan D'Souza",
),
("Michael James Collins", "Michael James Collins"),
(" Michael James Collins ", "Michael James Collins"),
("DAVE Nitesh Pravin", "Nitesh Pravin Dave"),
("DAVE\nNitesh Pravin", "Nitesh Pravin Dave"),
("COOKE Anne-Marie", "Anne-Marie Cooke"),
("COOKE\nAnne-Marie", "Anne-Marie Cooke"),
("BROOKES-\nDUNCAN\nKaty", "Katy Brookes-Duncan"),
("HOUNSOME\nJohn", "John Hounsome"),
("O`CONNELL \nStephen John", "Stephen John O'Connell"),
| ("O`NEAL \nCarol Joy", "Carol Joy O'Neal"), | 919 | lcc_e | python | null | 471d9cb59a1c21652129c86006d9d0c5592f5e0867def0cb |
|
"""
This module provides an abstraction for working with XModuleDescriptors
that are stored in a database an accessible using their Location as an identifier
"""
import logging
import re
import json
import datetime
from uuid import uuid4
from pytz import UTC
from collections import namedtuple, defaultdict
import collections
from contextlib import contextmanager
import functools
import threading
from operator import itemgetter
from sortedcontainers import SortedListWithKey
from abc import ABCMeta, abstractmethod
from contracts import contract, new_contract
from xblock.plugin import default_select
from .exceptions import InvalidLocationError, InsufficientSpecificationError
from xmodule.errortracker import make_error_tracker
from xmodule.assetstore import AssetMetadata
from opaque_keys.edx.keys import CourseKey, UsageKey, AssetKey
from opaque_keys.edx.locations import Location # For import backwards compatibility
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xblock.runtime import Mixologist
from xblock.core import XBlock
log = logging.getLogger('edx.modulestore')
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('XBlock', XBlock)
LIBRARY_ROOT = 'library.xml'
COURSE_ROOT = 'course.xml'
class ModuleStoreEnum(object):
"""
A class to encapsulate common constants that are used with the various modulestores.
"""
class Type(object):
"""
The various types of modulestores provided
"""
split = 'split'
mongo = 'mongo'
xml = 'xml'
class RevisionOption(object):
"""
Revision constants to use for Module Store operations
Note: These values are passed into store APIs and only used at run time
"""
# both DRAFT and PUBLISHED versions are queried, with preference to DRAFT versions
draft_preferred = 'rev-opt-draft-preferred'
# only DRAFT versions are queried and no PUBLISHED versions
draft_only = 'rev-opt-draft-only'
# # only PUBLISHED versions are queried and no DRAFT versions
published_only = 'rev-opt-published-only'
# all revisions are queried
all = 'rev-opt-all'
class Branch(object):
"""
Branch constants to use for stores, such as Mongo, that have only 2 branches: DRAFT and PUBLISHED
Note: These values are taken from server configuration settings, so should not be changed without alerting DevOps
"""
draft_preferred = 'draft-preferred'
published_only = 'published-only'
class BranchName(object):
"""
Branch constants to use for stores, such as Split, that have named branches
"""
draft = 'draft-branch'
published = 'published-branch'
library = 'library'
class UserID(object):
"""
Values for user ID defaults
"""
# Note: we use negative values here to (try to) not collide
# with user identifiers provided by actual user services.
# user ID to use for all management commands
mgmt_command = -1
# user ID to use for primitive commands
primitive_command = -2
# user ID to use for tests that do not have a django user available
test = -3
class SortOrder(object):
"""
Values for sorting asset metadata.
"""
ascending = 1
descending = 2
class BulkOpsRecord(object):
"""
For handling nesting of bulk operations
"""
def __init__(self):
self._active_count = 0
@property
def active(self):
"""
Return whether this bulk write is active.
"""
return self._active_count > 0
def nest(self):
"""
Record another level of nesting of this bulk write operation
"""
self._active_count += 1
def unnest(self):
"""
Record the completion of a level of nesting of the bulk write operation
"""
self._active_count -= 1
@property
def is_root(self):
"""
Return whether the bulk write is at the root (first) level of nesting
"""
return self._active_count == 1
class ActiveBulkThread(threading.local):
"""
Add the expected vars to the thread.
"""
def __init__(self, bulk_ops_record_type, **kwargs):
super(ActiveBulkThread, self).__init__(**kwargs)
self.records = defaultdict(bulk_ops_record_type)
class BulkOperationsMixin(object):
"""
This implements the :meth:`bulk_operations` modulestore semantics which handles nested invocations
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
def __init__(self, *args, **kwargs):
super(BulkOperationsMixin, self).__init__(*args, **kwargs)
self._active_bulk_ops = ActiveBulkThread(self._bulk_ops_record_type)
@contextmanager
def bulk_operations(self, course_id, emit_signals=True):
"""
A context manager for notifying the store of bulk operations. This affects only the current thread.
In the case of Mongo, it temporarily disables refreshing the metadata inheritance tree
until the bulk operation is completed.
"""
try:
self._begin_bulk_operation(course_id)
yield
finally:
self._end_bulk_operation(course_id, emit_signals)
# the relevant type of bulk_ops_record for the mixin (overriding classes should override
# this variable)
_bulk_ops_record_type = BulkOpsRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.BulkOpsRecord` for this course.
"""
if course_key is None:
return self._bulk_ops_record_type()
# Retrieve the bulk record based on matching org/course/run (possibly ignoring case)
if ignore_case:
for key, record in self._active_bulk_ops.records.iteritems():
# Shortcut: check basic equivalence for cases where org/course/run might be None.
if key == course_key or (
key.org.lower() == course_key.org.lower() and
key.course.lower() == course_key.course.lower() and
key.run.lower() == course_key.run.lower()
):
return record
return self._active_bulk_ops.records[course_key.for_branch(None)]
@property
def _active_records(self):
"""
Yield all active (CourseLocator, BulkOpsRecord) tuples.
"""
for course_key, record in self._active_bulk_ops.records.iteritems():
if record.active:
yield (course_key, record)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
del self._active_bulk_ops.records[course_key.for_branch(None)]
def _start_outermost_bulk_operation(self, bulk_ops_record, course_key):
"""
The outermost nested bulk_operation call: do the actual begin of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass
def _begin_bulk_operation(self, course_key):
"""
Begin a bulk operation on course_key.
"""
bulk_ops_record = self._get_bulk_ops_record(course_key)
# Increment the number of active bulk operations (bulk operations
# on the same course can be nested)
bulk_ops_record.nest()
# If this is the highest level bulk operation, then initialize it
if bulk_ops_record.is_root:
self._start_outermost_bulk_operation(bulk_ops_record, course_key)
def _end_outermost_bulk_operation(self, bulk_ops_record, course_key, emit_signals=True):
"""
The outermost nested bulk_operation call: do the actual end of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass
def _end_bulk_operation(self, course_key, emit_signals=True):
"""
End the active bulk operation on course_key.
"""
# If no bulk op is active, return
bulk_ops_record = self._get_bulk_ops_record(course_key)
if not bulk_ops_record.active:
return
bulk_ops_record.unnest()
# If this wasn't the outermost context, then don't close out the
# bulk operation.
if bulk_ops_record.active:
return
self._end_outermost_bulk_operation(bulk_ops_record, course_key, emit_signals)
self._clear_bulk_ops_record(course_key)
def _is_in_bulk_operation(self, course_key, ignore_case=False):
"""
Return whether a bulk operation is active on `course_key`.
"""
return self._get_bulk_ops_record(course_key, ignore_case).active
class EditInfo(object):
"""
Encapsulates the editing info of a block.
"""
def __init__(self, **kwargs):
self.from_storable(kwargs)
# For details, see caching_descriptor_system.py get_subtree_edited_by/on.
self._subtree_edited_on = kwargs.get('_subtree_edited_on', None)
self._subtree_edited_by = kwargs.get('_subtree_edited_by', None)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'previous_version': self.previous_version,
'update_version': self.update_version,
'source_version': self.source_version,
'edited_on': self.edited_on,
'edited_by': self.edited_by,
'original_usage': self.original_usage,
'original_usage_version': self.original_usage_version,
}
def from_storable(self, edit_info):
"""
De-serialize from Mongo-storable format to an object.
"""
# Guid for the structure which previously changed this XBlock.
# (Will be the previous value of 'update_version'.)
self.previous_version = edit_info.get('previous_version', None)
# Guid for the structure where this XBlock got its current field values.
# May point to a structure not in this structure's history (e.g., to a draft
# branch from which this version was published).
self.update_version = edit_info.get('update_version', None)
self.source_version = edit_info.get('source_version', None)
# Datetime when this XBlock's fields last changed.
self.edited_on = edit_info.get('edited_on', None)
# User ID which changed this XBlock last.
self.edited_by = edit_info.get('edited_by', None)
self.original_usage = edit_info.get('original_usage', None)
self.original_usage_version = edit_info.get('original_usage_version', None)
def __str__(self):
return ("EditInfo(previous_version={0.previous_version}, "
"update_version={0.update_version}, "
"source_version={0.source_version}, "
"edited_on={0.edited_on}, "
"edited_by={0.edited_by}, "
"original_usage={0.original_usage}, "
"original_usage_version={0.original_usage_version}, "
"_subtree_edited_on={0._subtree_edited_on}, "
"_subtree_edited_by={0._subtree_edited_by})").format(self)
class BlockData(object):
"""
Wrap the block data in an object instead of using a straight Python dictionary.
Allows the storing of meta-information about a structure that doesn't persist along with
the structure itself.
"""
def __init__(self, **kwargs):
# Has the definition been loaded?
self.definition_loaded = False
self.from_storable(kwargs)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'fields': self.fields,
'block_type': self.block_type,
'definition': self.definition,
'defaults': self.defaults,
'edit_info': self.edit_info.to_storable()
}
def from_storable(self, block_data):
"""
De-serialize from Mongo-storable format to an object.
"""
# Contains the Scope.settings and 'children' field values.
# 'children' are stored as a list of (block_type, block_id) pairs.
self.fields = block_data.get('fields', {})
# XBlock type ID.
self.block_type = block_data.get('block_type', None)
# DB id of the record containing the content of this XBlock.
self.definition = block_data.get('definition', None)
# Scope.settings default values copied from a template block (used e.g. when
# blocks are copied from a library to a course)
self.defaults = block_data.get('defaults', {})
# EditInfo object containing all versioning/editing data.
self.edit_info = EditInfo(**block_data.get('edit_info', {}))
def __str__(self):
return ("BlockData(fields={0.fields}, "
"block_type={0.block_type}, "
"definition={0.definition}, "
"definition_loaded={0.definition_loaded}, "
"defaults={0.defaults}, "
"edit_info={0.edit_info})").format(self)
new_contract('BlockData', BlockData)
class IncorrectlySortedList(Exception):
"""
Thrown when calling find() on a SortedAssetList not sorted by filename.
"""
pass
class SortedAssetList(SortedListWithKey):
"""
List of assets that is sorted based on an asset attribute.
"""
def __init__(self, **kwargs):
self.filename_sort = False
key_func = kwargs.get('key', None)
if key_func is None:
kwargs['key'] = itemgetter('filename')
self.filename_sort = True
super(SortedAssetList, self).__init__(**kwargs)
@contract(asset_id=AssetKey)
def find(self, asset_id):
"""
Find the index of a particular asset in the list. This method is only functional for lists
sorted by filename. If the list is sorted on any other key, find() raises a
Returns: Index of asset, if found. None if not found.
"""
# Don't attempt to find an asset by filename in a list that's not sorted by filename.
if not self.filename_sort:
raise IncorrectlySortedList()
# See if this asset already exists by checking the external_filename.
# Studio doesn't currently support using multiple course assets with the same filename.
# So use the filename as the unique identifier.
idx = None
idx_left = self.bisect_left({'filename': asset_id.path})
idx_right = self.bisect_right({'filename': asset_id.path})
if idx_left != idx_right:
# Asset was found in the list.
idx = idx_left
return idx
@contract(asset_md=AssetMetadata)
def insert_or_update(self, asset_md):
"""
Insert asset metadata if asset is not present. Update asset metadata if asset is already present.
"""
metadata_to_insert = asset_md.to_storable()
asset_idx = self.find(asset_md.asset_id)
if asset_idx is None:
# Add new metadata sorted into the list.
self.add(metadata_to_insert)
else:
# Replace existing metadata.
self[asset_idx] = metadata_to_insert
class ModuleStoreAssetBase(object):
"""
The methods for accessing assets and their metadata
"""
def _find_course_asset(self, asset_key):
"""
Returns same as _find_course_assets plus the index to the given asset or None. Does not convert
to AssetMetadata; thus, is internal.
Arguments:
asset_key (AssetKey): what to look for
Returns:
Tuple of:
- AssetMetadata[] for all assets of the given asset_key's type
- the index of asset in list (None if asset does not exist)
"""
course_assets = self._find_course_assets(asset_key.course_key)
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_key.block_type, []))
idx = all_assets.find(asset_key)
return course_assets, idx
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
return None
mdata = AssetMetadata(asset_key, asset_key.path, **kwargs)
all_assets = course_assets[asset_key.asset_type]
mdata.from_storable(all_assets[asset_idx])
return mdata
@contract(
course_key='CourseKey', asset_type='None | basestring',
start='int | None', maxresults='int | None', sort='tuple(str,(int,>=1,<=2))|None'
)
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of asset metadata for all assets of the given asset_type in the course.
Args:
course_key (CourseKey): course identifier
asset_type (str): the block_type of the assets to return. If None, return assets of all types.
start (int): optional - start at this asset number. Zero-based!
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of SortOrder.ascending or SortOrder.descending
Returns:
List of AssetMetadata objects.
"""
course_assets = self._find_course_assets(course_key)
# Determine the proper sort - with defaults of ('displayname', SortOrder.ascending).
key_func = None
sort_order = ModuleStoreEnum.SortOrder.ascending
if sort:
| if sort[0] == 'uploadDate': | 1,853 | lcc_e | python | null | f3072fdefd4e8da2e7e9faf48cffb40392217553f43722a7 |
|
# unionrepo.py - repository class for viewing union of repository changesets
#
# Derived from bundlerepo.py
# Copyright 2006, 2007 Benoit Boissinot <[email protected]>
# Copyright 2013 Unity Technologies, Mads Kiilerich <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Repository class for "in-memory pull" of one local repository to another,
allowing operations like diff and log with revsets.
"""
from node import nullid
from i18n import _
import os
import util, mdiff, cmdutil, scmutil
import localrepo, changelog, manifest, filelog, revlog
class unionrevlog(revlog.revlog):
def __init__(self, opener, indexfile, revlog2, linkmapper):
# How it works:
# To retrieve a revision, we just need to know the node id so we can
# look it up in revlog2.
#
# To differentiate a rev in the second revlog from a rev in the revlog,
# we check revision against repotiprev.
opener = scmutil.readonlyvfs(opener)
revlog.revlog.__init__(self, opener, indexfile)
self.revlog2 = revlog2
n = len(self)
self.repotiprev = n - 1
self.bundlerevs = set() # used by 'bundle()' revset expression
for rev2 in self.revlog2:
rev = self.revlog2.index[rev2]
# rev numbers - in revlog2, very different from self.rev
_start, _csize, _rsize, _base, linkrev, p1rev, p2rev, node = rev
if linkmapper is None: # link is to same revlog
assert linkrev == rev2 # we never link back
link = n
else: # rev must be mapped from repo2 cl to unified cl by linkmapper
link = linkmapper(linkrev)
if node in self.nodemap:
# this happens for the common revlog revisions
self.bundlerevs.add(self.nodemap[node])
continue
p1node = self.revlog2.node(p1rev)
p2node = self.revlog2.node(p2rev)
e = (None, None, None, None,
link, self.rev(p1node), self.rev(p2node), node)
self.index.insert(-1, e)
self.nodemap[node] = n
self.bundlerevs.add(n)
n += 1
def _chunk(self, rev):
if rev <= self.repotiprev:
return revlog.revlog._chunk(self, rev)
return self.revlog2._chunk(self.node(rev))
def revdiff(self, rev1, rev2):
"""return or calculate a delta between two revisions"""
if rev1 > self.repotiprev and rev2 > self.repotiprev:
return self.revlog2.revdiff(
self.revlog2.rev(self.node(rev1)),
self.revlog2.rev(self.node(rev2)))
elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
return self.baserevdiff(rev1, rev2)
return mdiff.textdiff(self.revision(self.node(rev1)),
self.revision(self.node(rev2)))
def revision(self, nodeorrev):
"""return an uncompressed revision of a given node or revision
number.
"""
if isinstance(nodeorrev, int):
rev = nodeorrev
node = self.node(rev)
else:
node = nodeorrev
rev = self.rev(node)
if node == nullid:
return ""
if rev > self.repotiprev:
text = self.revlog2.revision(node)
self._cache = (node, rev, text)
else:
text = self.baserevision(rev)
# already cached
return text
def baserevision(self, nodeorrev):
# Revlog subclasses may override 'revision' method to modify format of
# content retrieved from revlog. To use unionrevlog with such class one
# needs to override 'baserevision' and make more specific call here.
return revlog.revlog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
# Exists for the same purpose as baserevision.
return revlog.revlog.revdiff(self, rev1, rev2)
def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
raise NotImplementedError
def addgroup(self, revs, linkmapper, transaction):
raise NotImplementedError
def strip(self, rev, minlink):
raise NotImplementedError
def checksize(self):
raise NotImplementedError
class unionchangelog(unionrevlog, changelog.changelog):
def __init__(self, opener, opener2):
changelog.changelog.__init__(self, opener)
linkmapper = None
changelog2 = changelog.changelog(opener2)
unionrevlog.__init__(self, opener, self.indexfile, changelog2,
linkmapper)
def baserevision(self, nodeorrev):
# Although changelog doesn't override 'revision' method, some extensions
# may replace this class with another that does. Same story with
# manifest and filelog classes.
return changelog.changelog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return changelog.changelog.revdiff(self, rev1, rev2)
class unionmanifest(unionrevlog, manifest.manifest):
def __init__(self, opener, opener2, linkmapper):
manifest.manifest.__init__(self, opener)
manifest2 = manifest.manifest(opener2)
unionrevlog.__init__(self, opener, self.indexfile, manifest2,
linkmapper)
def baserevision(self, nodeorrev):
return manifest.manifest.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return manifest.manifest.revdiff(self, rev1, rev2)
class unionfilelog(unionrevlog, filelog.filelog):
def __init__(self, opener, path, opener2, linkmapper, repo):
filelog.filelog.__init__(self, opener, path)
filelog2 = filelog.filelog(opener2, path)
unionrevlog.__init__(self, opener, self.indexfile, filelog2,
linkmapper)
self._repo = repo
def baserevision(self, nodeorrev):
return filelog.filelog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return filelog.filelog.revdiff(self, rev1, rev2)
def _file(self, f):
self._repo.file(f)
class unionpeer(localrepo.localpeer):
def canpush(self):
return False
class unionrepository(localrepo.localrepository):
def __init__(self, ui, path, path2):
localrepo.localrepository.__init__(self, ui, path)
self.ui.setconfig('phases', 'publish', False, 'unionrepo')
self._url = 'union:%s+%s' % (util.expandpath(path),
util.expandpath(path2))
self.repo2 = localrepo.localrepository(ui, path2)
@localrepo.unfilteredpropertycache
def changelog(self):
return unionchangelog(self.sopener, self.repo2.sopener)
def _clrev(self, rev2):
"""map from repo2 changelog rev to temporary rev in self.changelog"""
node = self.repo2.changelog.node(rev2)
return self.changelog.rev(node)
@localrepo.unfilteredpropertycache
def manifest(self):
return unionmanifest(self.sopener, self.repo2.sopener,
self._clrev)
def url(self):
return self._url
def file(self, f):
return unionfilelog(self.sopener, f, self.repo2.sopener,
self._clrev, self)
def close(self):
self.repo2.close()
def cancopy(self):
return False
def peer(self):
return unionpeer(self)
def getcwd(self):
return os.getcwd() # always outside the repo
def instance(ui, path, create):
if create:
raise util.Abort(_('cannot create new union repository'))
parentpath = ui.config("bundle", "mainreporoot", "")
if not parentpath:
# try to find the correct path to the working directory repo
parentpath = cmdutil.findrepo(os.getcwd())
if parentpath is None:
parentpath = ''
if parentpath:
# Try to make the full path relative so we get a nice, short URL.
# In particular, we don't want temp dir names in test outputs.
cwd = os.getcwd()
if parentpath == cwd:
parentpath = ''
else:
cwd = os.path.join(cwd,'')
if parentpath.startswith(cwd):
| parentpath = parentpath[len(cwd):] | 795 | lcc_e | python | null | ccc7e6259fdffd163ebd0d5779c4ced9624d7b3d215079cf |
|
#
# Copyright (c) 2015-2018 Canonical, Ltd.
#
# This file is part of Talisker
# (see http://github.com/canonical-ols/talisker).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # noqa
from collections import OrderedDict
from contextlib import contextmanager
import logging
import logging.handlers
import numbers
import sys
import time
from talisker.context import Context, ContextId
from talisker.util import (
get_errno_fields,
module_cache,
module_dict,
)
__all__ = [
'configure',
'configure_test_logging',
'logging_context',
]
logging_globals = module_dict()
def set_global_extra(extra):
if 'extra' not in logging_globals:
logging_globals['extra'] = OrderedDict()
logging_globals['extra'].update(extra)
def reset_logging():
"""Reset logging config"""
# avoid unclosed file resource warning
for handler in logging.getLogger().handlers:
if getattr(handler, '_debug_handler', False):
handler.stream.close()
logging.getLogger().handlers = []
NOISY_LOGS = {
'requests': logging.WARNING,
}
class LoggingContextProxy():
def __getattr__(self, attr):
return getattr(Context.logging, attr)
@contextmanager
def __call__(self, extra=None, **kwargs):
with Context.logging(extra, **kwargs):
yield
logging_context = LoggingContextProxy()
# backwards compat aliases
def set_logging_context(*args, **kwargs):
Context.logging.push(*args, **kwargs)
extra_logging = logging_context
def add_talisker_handler(level, handler, formatter=None):
if formatter is None:
formatter = StructuredFormatter()
handler.setFormatter(formatter)
handler.setLevel(level)
handler._talisker_handler = True
logging.getLogger().addHandler(handler)
def set_logger_class():
logging.setLoggerClass(StructuredLogger)
logging.getLogger().setLevel(logging.NOTSET)
@module_cache
def get_talisker_handler():
handler = logging.StreamHandler()
handler._root_talisker = True
return handler
def configure(config): # pragma: no cover
"""Configure default logging setup for our services.
This is basically:
- log to stderr
- output hybrid logfmt structured format
- maybe configure debug logging
"""
# avoid duplicate logging
if logging_globals.get('configured'):
return
set_logger_class()
formatter = StructuredFormatter()
if config.colour:
formatter = ColouredFormatter(style=config.colour)
# always INFO to stderr
add_talisker_handler(logging.INFO, get_talisker_handler(), formatter)
configure_warnings(config.devel)
supress_noisy_logs()
# defer this until logging has been set up
logger = logging.getLogger(__name__)
config_extra = {k: v.value for k, v in config.metadata().items() if v.raw}
if config_extra:
logger.info('talisker configured', extra=config_extra)
if config.ERRORS:
errors = {name: str(err) for name, err in config.ERRORS.items()}
logger.error('configuration errors', extra=errors)
if config.debuglog is not None:
if can_write_to_file(config.debuglog):
handler = logging.handlers.TimedRotatingFileHandler(
config.debuglog,
when='D',
interval=1,
backupCount=1,
delay=True,
utc=True,
)
handler._debug_handler = True
add_talisker_handler(logging.DEBUG, handler)
logger.info('enabling debug log', extra={'path': config.debuglog})
else:
logger.info('could not enable debug log, could not write to path',
extra={'path': config.debuglog})
# sentry integration
import talisker.sentry # defer to avoid logging setup
if talisker.sentry.enabled:
sentry_handler = talisker.sentry.get_log_handler()
add_talisker_handler(logging.ERROR, sentry_handler)
logging_globals['configured'] = True
def can_write_to_file(path):
try:
open(path, 'a').close()
except Exception:
return False
else:
return True
def supress_noisy_logs():
"""Set some custom log levels on some sub logs"""
for name, level in NOISY_LOGS.items():
logger = logging.getLogger(name)
logger.setLevel(level)
def configure_warnings(enable):
# never propogate warnings to root
warnings = logging.getLogger('py.warnings')
warnings.propagate = False
if enable:
warnings.addHandler(logging.StreamHandler())
def configure_test_logging(handler=None):
"""Add a handler (defaults to NullHandler) to root logger.
Prevents unconfigured logging from erroring, and swallows all logging,
which is usually what you want for unit tests. Unit test fixtures can
still add their own loggers to assert against log messages if needed.
"""
set_logger_class()
if handler is None:
handler = logging.NullHandler()
add_talisker_handler(logging.NOTSET, handler)
configure_warnings(True)
def enable_debug_log_stderr():
"""Enables debug logging on stderr
Checks for devel mode."""
logger = logging.getLogger(__name__)
logger.warning('setting stderr logging to DEBUG')
get_talisker_handler().setLevel(logging.DEBUG)
class StructuredLogger(logging.Logger):
"""A logger that handles passing 'extra' arguments to all logging calls.
Supports 3 sources of extra structured data:
1) global extra, designed to be set once at process start/
2) context extra, designed to be set per request or job, can cleaned up
afterwards.
3) per call extra, passed by the log call, as per normal logging
e.g. log.info('...', extra={...})
"""
# sadly, we must subclass and override, rather that use the new
# setLogRecordFactory() in 3.2+, as that does not pass the extra args
# through. Also, we need to support python 2.
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
# at this point we have 3 possible sources of extra kwargs
# - log call: extra
# - context : local_context.flat
# - global : logging_globals['extra']
#
# In case of collisions, we append _ to the end of the name, so no data
# is lost. The global ones are more important, so take priority - the
# user supplied keys are the ones renamed if needed
# Also, the ordering is specific - more specific tags first
trailer = None
structured = OrderedDict()
try:
if ContextId.get(None) is None:
context_extra = {}
request_id = None
else:
context_extra = logging_context.flat
request_id = Context.request_id
global_extra = logging_globals.get('extra', {})
if extra:
trailer = extra.pop('trailer', None)
for k, v in extra.items():
if k in context_extra or k in global_extra:
k = k + '_'
structured[k] = v
for k, v in context_extra.items():
if k in global_extra:
k = k + '_'
structured[k] = v
structured.update(global_extra)
if request_id:
structured['request_id'] = request_id
except Exception:
# ensure unexpected error doesn't break logging completely
structured = extra
kwargs = dict(func=func, extra=structured, sinfo=sinfo)
# python 2 doesn't support sinfo parameter
| if sys.version_info[0] == 2: | 897 | lcc_e | python | null | 17f8060bda3eea4e6b94e07ca7850afadc2a42895f40a08a |
|
"""
Step definitions for working with Django models.
"""
from datetime import datetime
import re
from django.core.management import call_command
from django.core.management.color import no_style
from django.db import connection
from django.db.models.loading import get_models
from django.utils.functional import curry
from functools import wraps
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for model in get_models():
yield (unicode(model._meta.verbose_name), model)
yield (unicode(model._meta.verbose_name_plural), model)
MODELS = dict(_models_generator())
_WRITE_MODEL = {}
def creates_models(model):
"""
Register a model-specific creation function. Wrapper around writes_models
that removes the field parameter (always a create operation).
"""
def decorated(func):
@wraps(func)
@writes_models(model)
def wrapped(data, field):
if field:
raise NotImplementedError(
"Must use the writes_models decorator to update models")
return func(data)
return decorated
def writes_models(model):
"""
Register a model-specific create and update function.
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_MODEL_EXISTS = {}
def checks_existence(model):
"""
Register a model-specific existence check function.
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_MODEL_EXISTS[model] = func
return func
return decorated
def hash_data(hash_):
"""
Convert strings from a Lettuce hash to appropriate types
"""
res = {}
for key, value in hash_.items():
if type(value) in (str, unicode):
if value == "true":
value = True
elif value == "false":
value = False
elif value == "null":
value = None
elif value.isdigit() and not re.match("^0[0-9]+", value):
value = int(value)
elif re.match(r'^\d{4}-\d{2}-\d{2}$', value):
value = datetime.strptime(value, "%Y-%m-%d")
res[key] = value
return res
def hashes_data(step):
"""
Convert strings from step hashes to appropriate types
"""
return [hash_data(hash_) for hash_ in step.hashes]
def get_model(model):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
name = model.lower()
model = MODELS.get(model, None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def create_models(model, data):
"""
Create models for each data hash. Wrapper around write_models.
"""
return write_models(model, data, None)
def write_models(model, data, field=None):
"""
Create or update models for each data hash. If field is present, it is the
field that is used to get the existing models out of the database to update
them; otherwise, new models are created.
"""
if hasattr(data, 'hashes'):
data = hashes_data(data)
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
for field in model._meta.fields:
print '%s=%s,' % (field.name, str(getattr(model, field.name))),
if attrs is not None:
for attr in attrs:
print '%s=%s,' % (attr, str(getattr(model, attr))),
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
print '%s=%s (%i),' % (
field.name,
', '.join(map(str, vals.all())),
vals.count()),
print
def models_exist(model, data, queryset=None):
"""
Check whether the models defined by @data exist in the @queryset.
"""
if hasattr(data, 'hashes'):
data = hashes_data(data)
if not queryset:
queryset = model.objects
failed = 0
try:
for hash_ in data:
fields = {}
extra_attrs = {}
| for k, v in hash_.iteritems(): | 546 | lcc_e | python | null | 7ae84ee3dc227ab3ad301914db5aa12c8e0ecb2042da37aa |
|
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""Invenio BibEdit Administrator Interface."""
__revision__ = "$Id"
__lastupdated__ = """$Date: 2008/08/12 09:26:46 $"""
import cProfile
import cStringIO
import pstats
from invenio.jsonutils import json, json_unicode_to_utf8
from invenio.access_control_engine import acc_authorize_action
from invenio.bibedit_engine import (perform_request_ajax,
perform_request_init,
perform_request_newticket,
perform_request_compare,
perform_request_init_template_interface,
perform_request_ajax_template_interface)
from invenio.bibedit_utils import user_can_edit_record_collection
from invenio.config import CFG_SITE_LANG, CFG_SITE_SECURE_URL, CFG_SITE_RECORD
from invenio.messages import gettext_set_language
from invenio.urlutils import redirect_to_url
from invenio.webinterface_handler import WebInterfaceDirectory, wash_urlargd
from invenio.webpage import page
from invenio.webuser import collect_user_info, getUid, page_not_authorized
navtrail = (' <a class="navtrail" href=\"%s/help/admin\">Admin Area</a> '
) % CFG_SITE_SECURE_URL
navtrail_bibedit = (' <a class="navtrail" href=\"%s/help/admin\">Admin Area</a> ' + \
' > <a class="navtrail" href=\"%s/%s/edit\">Record Editor</a>'
) % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, CFG_SITE_RECORD)
def wrap_json_req_profiler(func):
def json_req_profiler(self, req, form):
if "ajaxProfile" in form:
profiler = cProfile.Profile()
return_val = profiler.runcall(func, self, req, form)
results = cStringIO.StringIO()
stats = pstats.Stats(profiler, stream=results)
stats.sort_stats('cumulative')
stats.print_stats(100)
json_in = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_in = json_unicode_to_utf8(json_in)
json_data = json.loads(return_val)
json_data.update({"profilerStats": "<pre style='overflow: scroll'>" + json_in['requestType'] + results.getvalue() + "</pre>"})
return json.dumps(json_data)
else:
return func(self, req, form)
return json_req_profiler
class WebInterfaceEditPages(WebInterfaceDirectory):
"""Defines the set of /edit pages."""
_exports = ['', 'new_ticket', 'compare_revisions', 'templates']
def __init__(self, recid=None):
"""Initialize."""
self.recid = recid
@wrap_json_req_profiler
def index(self, req, form):
"""Handle all BibEdit requests.
The responsibilities of this functions is:
* JSON decoding and encoding.
* Redirection, if necessary.
* Authorization.
* Calling the appropriate function from the engine.
"""
uid = getUid(req)
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
# If it is an Ajax request, extract any JSON data.
ajax_request, recid = False, None
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
if json_data.has_key('recID'):
recid = json_data['recID']
json_response = {'resultCode': 0, 'ID': json_data['ID']}
# Authorization.
user_info = collect_user_info(req)
if user_info['email'] == 'guest':
# User is not logged in.
if not ajax_request:
# Do not display the introductory recID selection box to guest
# users (as it used to be with v0.99.0):
dummy_auth_code, auth_message = acc_authorize_action(req,
'runbibedit')
referer = '/edit/'
if self.recid:
referer = '/%s/%s/edit/' % (CFG_SITE_RECORD, self.recid)
return page_not_authorized(req=req, referer=referer,
text=auth_message, navtrail=navtrail)
else:
# Session has most likely timed out.
json_response.update({'resultCode': 100})
return json.dumps(json_response)
elif self.recid:
# Handle redirects from /record/<record id>/edit
# generic URL.
redirect_to_url(req, '%s/%s/edit/#state=edit&recid=%s&recrev=%s' % (
CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, ""))
elif recid is not None:
json_response.update({'recID': recid})
if json_data['requestType'] == "getRecord":
# Authorize access to record.
if not user_can_edit_record_collection(req, recid):
json_response.update({'resultCode': 101})
return json.dumps(json_response)
# Handle request.
if not ajax_request:
# Show BibEdit start page.
body, errors, warnings = perform_request_init(uid, argd['ln'], req, __lastupdated__)
title = 'Record Editor'
return page(title = title,
body = body,
errors = errors,
warnings = warnings,
uid = uid,
language = argd['ln'],
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
else:
# Handle AJAX request.
json_response.update(perform_request_ajax(req, recid, uid,
json_data))
return json.dumps(json_response)
def compare_revisions(self, req, form):
"""Handle the compare revisions request"""
argd = wash_urlargd(form, { \
'ln': (str, CFG_SITE_LANG), \
'rev1' : (str, ''), \
'rev2' : (str, ''), \
'recid': (int, 0)})
ln = argd['ln']
uid = getUid(req)
_ = gettext_set_language(ln)
# Checking if currently logged user has permission to perform this request
auth_code, auth_message = acc_authorize_action(req, 'runbibedit')
if auth_code != 0:
return page_not_authorized(req=req, referer="/edit",
text=auth_message, navtrail=navtrail)
recid = argd['recid']
rev1 = argd['rev1']
rev2 = argd['rev2']
ln = argd['ln']
body, errors, warnings = perform_request_compare(ln, recid, rev1, rev2)
return page(title = _("Comparing two record revisions"),
body = body,
errors = errors,
warnings = warnings,
uid = uid,
language = ln,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
def new_ticket(self, req, form):
"""handle a edit/new_ticket request"""
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG), 'recid': (int, 0)})
ln = argd['ln']
_ = gettext_set_language(ln)
auth_code, auth_message = acc_authorize_action(req, 'runbibedit')
if auth_code != 0:
| return page_not_authorized(req=req, referer="/edit", | 756 | lcc_e | python | null | 04e295ceba60a64b82588ffaa2a96d5c35e2e469380e6944 |
|
# Copyright (c) 2012, Michael DeHaan <[email protected]>
# Copyright 2015 Abhijit Menon-Sen <[email protected]>
# Copyright 2017 Toshio Kuratomi <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
connection: ssh
short_description: connect via ssh client binary
description:
- This connection plugin allows ansible to communicate to the target machines via normal ssh command line.
- Ansible does not expose a channel to allow communication between the user and the ssh process to accept
a password manually to decrypt an ssh key when using this connection plugin (which is the default). The
use of ``ssh-agent`` is highly recommended.
author: ansible (@core)
version_added: historical
options:
host:
description: Hostname/ip to connect to.
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_ssh_host
host_key_checking:
description: Determines if ssh should check host keys
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
- section: ssh_connection
key: 'host_key_checking'
version_added: '2.5'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
ssh_args:
description: Arguments to pass to all ssh cli tools
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
vars:
- name: ansible_ssh_args
version_added: '2.7'
ssh_common_args:
description: Common extra args for all ssh CLI tools
ini:
- section: 'ssh_connection'
key: 'ssh_common_args'
version_added: '2.7'
env:
- name: ANSIBLE_SSH_COMMON_ARGS
version_added: '2.7'
vars:
- name: ansible_ssh_common_args
ssh_executable:
default: ssh
description:
- This defines the location of the ssh binary. It defaults to ``ssh`` which will use the first ssh binary available in $PATH.
- This option is usually not required, it might be useful when access to system ssh is restricted,
or when using ssh wrappers to connect to remote hosts.
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
vars:
- name: ansible_ssh_executable
version_added: '2.7'
sftp_executable:
default: sftp
description:
- This defines the location of the sftp binary. It defaults to ``sftp`` which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
ini:
- {key: sftp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_sftp_executable
version_added: '2.7'
scp_executable:
default: scp
description:
- This defines the location of the scp binary. It defaults to `scp` which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SCP_EXECUTABLE}]
ini:
- {key: scp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_scp_executable
version_added: '2.7'
scp_extra_args:
description: Extra exclusive to the ``scp`` CLI
vars:
- name: ansible_scp_extra_args
env:
- name: ANSIBLE_SCP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: scp_extra_args
section: ssh_connection
version_added: '2.7'
sftp_extra_args:
description: Extra exclusive to the ``sftp`` CLI
vars:
- name: ansible_sftp_extra_args
env:
- name: ANSIBLE_SFTP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: sftp_extra_args
section: ssh_connection
version_added: '2.7'
ssh_extra_args:
description: Extra exclusive to the 'ssh' CLI
vars:
- name: ansible_ssh_extra_args
env:
- name: ANSIBLE_SSH_EXTRA_ARGS
version_added: '2.7'
ini:
- key: ssh_extra_args
section: ssh_connection
version_added: '2.7'
retries:
# constant: ANSIBLE_SSH_RETRIES
description: Number of attempts to connect.
default: 3
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
vars:
- name: ansible_ssh_retries
version_added: '2.7'
port:
description: Remote port to connect to.
type: int
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the ssh client binary choose the user as it normally
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
pipelining:
default: ANSIBLE_PIPELINING
description:
- Pipelining reduces the number of SSH operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfer.
- This can result in a very significant performance improvement when enabled.
- However this conflicts with privilege escalation (become).
For example, when using sudo operations you must first disable 'requiretty' in the sudoers file for the target hosts,
which is why this feature is disabled by default.
env:
- name: ANSIBLE_PIPELINING
#- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
#- section: ssh_connection
# key: pipelining
type: boolean
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
control_path:
description:
- This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution.
- Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
vars:
- name: ansible_control_path
version_added: '2.7'
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the `%(directory)s` variable for the control path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
vars:
- name: ansible_control_path_dir
version_added: '2.7'
sftp_batch_mode:
default: 'yes'
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: bool
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
scp_if_ssh:
default: smart
description:
- "Prefered method to use when transfering files over ssh"
- When set to smart, Ansible will try them until one succeeds or they all fail
- If set to True, it will force 'scp', if False it will use 'sftp'
env: [{name: ANSIBLE_SCP_IF_SSH}]
ini:
- {key: scp_if_ssh, section: ssh_connection}
vars:
- name: ansible_scp_if_ssh
version_added: '2.7'
use_tty:
version_added: '2.5'
default: 'yes'
description: add -tt to ssh commands to force tty allocation
env: [{name: ANSIBLE_SSH_USETTY}]
ini:
| - {key: usetty, section: ssh_connection} | 968 | lcc_e | python | null | 372cbea864b598e206205434a3c9016ac96913fb9e29fd56 |
|
# -*- coding: utf-8 -*-
##
## This file is part of Harvesting Kit.
## Copyright (C) 2013, 2014, 2015 CERN.
##
## Harvesting Kit is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Harvesting Kit is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Harvesting Kit; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import re
import sys
import time
from os import pardir
from os.path import (join,
dirname,
basename)
try:
from invenio.errorlib import register_exception
except ImportError:
register_exception = lambda a=1, b=2: True
from harvestingkit.minidom_utils import (get_value_in_tag,
xml_to_text)
from harvestingkit.utils import (format_arxiv_id,
add_nations_field)
from harvestingkit.bibrecord import (
record_add_field,
create_record,
record_xml_output,
)
from xml.dom.minidom import parse
class JATSParser(object):
def __init__(self, tag_to_remove=None, extract_nations=False):
self.references = None
self.tag_to_remove = tag_to_remove
self.extract_nations = extract_nations
def get_article(self, path):
return parse(open(path))
def get_title(self, xml):
try:
return get_value_in_tag(xml, "article-title", tag_to_remove=self.tag_to_remove)
except Exception:
print >> sys.stderr, "Can't find title"
def get_issn(self, xml):
issns = xml.getElementsByTagName('issn')
ret = None
for issn in issns:
if issn.getAttribute('date-type').encode('utf-8') == 'epub' or issn.getAttribute('pub-type').encode('utf-8') == 'epub':
if issn.getAttribute("pub-type").encode('utf-8'):
ret = issn.getAttribute("pub-type").encode('utf-8')
else:
ret = issn.getAttribute("date-type").encode('utf-8')
if not ret and issns:
ret = xml_to_text(issns[0])
return ret
def get_date(self, xml):
dates = xml.getElementsByTagName('pub-date')
ret = None
for date in dates:
if date.getAttribute('date-type').encode('utf-8') == 'epub' or date.getAttribute('pub-type').encode('utf-8') == 'epub':
ret = get_value_in_tag(date, 'year')
if not ret and dates:
return dates[0]
else:
return ret
def get_publication_information(self, xml):
jid = get_value_in_tag(xml, "journal-title")
journal = ""
if "European Physical Journal" in jid:
journal = "EPJC"
try:
art = xml.getElementsByTagName('article-meta')[0]
except IndexError as err:
register_exception()
print >> sys.stderr, "ERROR: XML corrupted: %s" % err
pass
except Exception as err:
register_exception()
print >> sys.stderr, "ERROR: Exception captured: %s" % err
pass
issn = self.get_issn(art)
volume = get_value_in_tag(art, "volume")
issue = get_value_in_tag(art, "issue")
year = self.get_date(art)
first_page = get_value_in_tag(art, "fpage")
last_page = get_value_in_tag(art, "lpage")
doi = self.get_doi(art)
return (journal, issn, volume, issue, first_page, last_page, year, doi)
def get_doi(self, xml):
ids = xml.getElementsByTagName('article-id')
ret = ""
for i in ids:
if i.getAttribute('pub-id-type').encode('utf-8') == 'doi':
ret = xml_to_text(i)
if not ret:
print >> sys.stdout, "Can't find DOI."
return ret
def _get_orcid(self, xml_author):
try:
contrib_id = xml_author.getElementsByTagName('contrib-id')[0]
if contrib_id.getAttribute('contrib-id-type') == 'orcid':
orcid_raw = xml_to_text(contrib_id)
pattern = '\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d[\d|X]'
return re.search(pattern, orcid_raw).group()
except (IndexError, AttributeError):
return None
def get_authors(self, xml):
authors = []
for author in xml.getElementsByTagName("contrib"):
# Springer puts colaborations in additional "contrib" tag so to
# avoid having fake author with all affiliations we skip "contrib"
# tag with "contrib" subtags.
if author.getElementsByTagName("contrib"):
continue
tmp = {}
surname = get_value_in_tag(author, "surname")
if surname:
tmp["surname"] = surname
given_name = get_value_in_tag(author, "given-names")
if given_name:
tmp["given_name"] = given_name.replace('\n', ' ')
if not surname and not given_name:
tmp["name"] = get_value_in_tag(author, "string-name")
# It's not there yet
orcid = self._get_orcid(author)
if orcid:
tmp["orcid"] = 'ORCID:{0}'.format(orcid)
# cross_refs = author.getElementsByTagName("ce:cross-ref")
# if cross_refs:
# tmp["cross_ref"] = []
# for cross_ref in cross_refs:
# tmp["cross_ref"].append(cross_ref.getAttribute("refid").encode('utf-8'))
tmp["affiliations_ids"] = []
tmp["contact_ids"] = []
xrefs = author.getElementsByTagName("xref")
for x in xrefs:
if x.getAttribute('ref-type').encode('utf-8') == 'aff':
tmp["affiliations_ids"].extend([a.encode('utf-8') for a in x.getAttribute('rid').split()])
if x.getAttribute('ref-type').encode('utf-8') == 'corresp':
tmp["contact_ids"].extend([a.encode('utf-8') for a in x.getAttribute('rid').split()])
authors.append(tmp)
affiliations = {}
for affiliation in xml.getElementsByTagName("aff"):
aff_id = affiliation.getAttribute("id").encode('utf-8')
# removes numbering in from affiliations
text = re.sub(r'^(\d+,\ ?)', "", xml_to_text(affiliation, delimiter=", "))
affiliations[aff_id] = text
emails = {}
for contact in xml.getElementsByTagName("corresp"):
contact_id = contact.getAttribute("id").encode('utf-8')
if contact.getElementsByTagName('email'):
text = xml_to_text(contact.getElementsByTagName('email')[0])
emails[contact_id] = text
implicit_affilations = True
for author in authors:
matching_ref = [ref for ref in author.get("affiliations_ids") if ref in affiliations]
if matching_ref:
implicit_affilations = False
author["affiliation"] = []
for i in xrange(0, len(matching_ref)):
author["affiliation"].append(affiliations[matching_ref[i]])
matching_contact = [cont for cont in author.get('contact_ids') if cont in emails]
if matching_contact:
author["email"] = emails[matching_contact[0]]
if implicit_affilations and len(affiliations) > 1:
print >> sys.stderr, "Implicit affiliations are used, but there are more than one affiliation: %s" % affiliations
if implicit_affilations and len(affiliations) >= 1:
for author in authors:
author["affiliation"] = []
for aff in affiliations.values():
author["affiliation"].append(aff)
return authors
def get_abstract(self, xml):
try:
return get_value_in_tag(xml, "abstract", tag_to_remove=self.tag_to_remove).replace("Abstract", "", 1)
except Exception:
print >> sys.stderr, "Can't find abstract"
def get_copyright(self, xml, logger=None):
tags = ["copyright-holder", "copyright-statement"]
for tag in tags:
if tag is "copyright-holder":
ret = get_value_in_tag(xml, tag)
if not ret:
if logger:
logger.info("Can't find copyright, trying different tag.")
print >> sys.stderr, "Can't find copyright, trying different tag."
else:
return ret
else:
ret = get_value_in_tag(xml, tag)
if not ret:
if logger:
logger.info("Can't find copyright")
print >> sys.stderr, "Can't find copyright"
else:
ret = ret.split('.')
return ret[0]
def get_keywords(self, xml):
try:
kwd_groups = xml.getElementsByTagName('kwd-group')
pacs = []
other = []
for kwd_group in kwd_groups:
if kwd_group.getAttribute('kwd-group-type').encode('utf-8') == "pacs":
pacs = [xml_to_text(keyword, tag_to_remove=self.tag_to_remove) for keyword in kwd_group.getElementsByTagName("kwd")]
else:
other = [xml_to_text(keyword, tag_to_remove=self.tag_to_remove) for keyword in kwd_group.getElementsByTagName("kwd")]
return {"pacs": pacs, "other": other}
except Exception:
print >> sys.stderr, "Can't find keywords"
def get_ref_link(self, xml, name):
links = xml.getElementsByTagName('ext-link')
ret = None
for link in links:
if name in link.getAttribute("xlink:href").encode('utf-8'):
ret = xml_to_text(link).strip()
if not ret:
links = xml.getElementsByTagName('elocation-id')
for link in links:
if name in link.getAttribute("content-type").encode('utf-8'):
ret = xml_to_text(link).strip()
return ret
def get_page_count(self, xml):
counts = xml.getElementsByTagName("counts")
if counts:
page_count = counts[0].getElementsByTagName("page-count")
if page_count:
return page_count[0].getAttribute("count").encode('utf-8')
else:
return None
else:
return None
def get_publication_date(self, xml, logger=None):
date_xmls = xml.getElementsByTagName('pub-date')
day = None
month = None
year = None
if date_xmls:
for date_xml in date_xmls:
if date_xml.hasAttribute('pub-type'):
if date_xml.getAttribute('pub-type') == "epub":
day = get_value_in_tag(date_xml, "day")
month = get_value_in_tag(date_xml, "month")
year = get_value_in_tag(date_xml, "year")
if not year:
day = get_value_in_tag(date_xml, "day")
month = get_value_in_tag(date_xml, "month")
year = get_value_in_tag(date_xml, "year")
if logger:
logger.info('%s-%s-%s' % (year, month, day))
return '%s-%s-%s' % (year, month, day)
else:
print >> sys.stderr, "Can't find publication date. Using 'today'."
if logger:
logger.info("Can't find publication date. Using 'today'.")
return time.strftime('%Y-%m-%d')
def get_references(self, xml):
references = []
for reference in xml.getElementsByTagName("ref"):
plain_text = None
try:
ref_type = reference.getElementsByTagName('mixed-citation')[0]
ref_type = ref_type.getAttribute('publication-type').encode('utf-8')
except:
ref_type = reference.getElementsByTagName('citation')[0]
ref_type = ref_type.getAttribute('publication-type').encode('utf-8')
label = get_value_in_tag(reference, "label").strip('.')
authors = []
for author in reference.getElementsByTagName("name"):
given_name = get_value_in_tag(author, "given-names")
surname = get_value_in_tag(author, "surname")
if given_name:
name = "%s, %s" % (surname, given_name)
else:
name = surname
if name.strip().split() == []:
name = get_value_in_tag(author, "string-name")
authors.append(name)
doi_tag = reference.getElementsByTagName("pub-id")
doi = ""
for tag in doi_tag:
if tag.getAttribute("pub-id-type") == "doi":
doi = xml_to_text(tag)
issue = get_value_in_tag(reference, "issue")
page = get_value_in_tag(reference, "fpage")
page_last = get_value_in_tag(reference, "lpage")
title = get_value_in_tag(reference, "source")
volume = get_value_in_tag(reference, "volume")
year = get_value_in_tag(reference, "year")
ext_link = format_arxiv_id(self.get_ref_link(reference, "arxiv"))
if ref_type != 'journal':
try:
plain_text = get_value_in_tag(reference,
"mixed-citation",
tag_to_remove=self.tag_to_remove)
except:
plain_text = get_value_in_tag(reference,
"citation",
tag_to_remove=self.tag_to_remove)
references.append((label, authors, doi,
issue, page, page_last,
title, volume, year,
ext_link, plain_text))
self.references = references
def get_record(self, f_path, publisher=None, collection=None, logger=None):
xml = self.get_article(f_path)
rec = create_record()
title = self.get_title(xml)
if title:
record_add_field(rec, '245', subfields=[('a', title)])
record_add_field(rec, '260', subfields=[('c', self.get_publication_date(xml, logger))])
journal, issn, volume, issue, first_page, last_page, year, doi = self.get_publication_information(xml)
if logger:
logger.info("Creating record: %s %s" % (join(f_path, pardir), doi))
if doi:
record_add_field(rec, '024', ind1='7', subfields=[('a', doi), ('2', 'DOI')])
authors = self.get_authors(xml)
first_author = True
for author in authors:
if author.get('surname'):
subfields = [('a', '%s, %s' % (author.get('surname'), author.get('given_name') or author.get('initials', '')))]
else:
subfields = [('a', '%s' % (author.get('name', '')))]
if 'orcid' in author:
subfields.append(('j', author['orcid']))
if 'affiliation' in author:
for aff in author["affiliation"]:
subfields.append(('v', aff))
if self.extract_nations:
add_nations_field(subfields)
if author.get('email'):
subfields.append(('m', author['email']))
if first_author:
record_add_field(rec, '100', subfields=subfields)
first_author = False
else:
record_add_field(rec, '700', subfields=subfields)
page_count = self.get_page_count(xml)
if page_count:
| record_add_field(rec, '300', subfields=[('a', page_count)]) | 1,283 | lcc_e | python | null | 86663145e9262154f0a3a437a533c6207187300f593522f9 |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
- Downloads last month
- 43