code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class HeaderDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_block_status_with_blockend_for_single_token(),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_alignment_status(),
token_features.get_token_font_status(),
token_features.get_token_font_size_feature(),
token_features.get_str_is_bold(),
token_features.get_str_is_italic(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_email(),
token_features.get_dummy_str_is_http(),
token_features.get_punctuation_type_feature(),
token_features.get_str_is_largest_font_size(),
# bug in GROBID #795
token_features.get_dummy_str_is_smallest_font_size(),
# due to bug, usually larger than mean
token_features.get_dummy_str_is_larger_than_average_font_size('1'),
token_features.get_dummy_label()
]) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/header/data.py | 0.760028 | 0.161155 | data.py | pypi |
import logging
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticAddressLine,
SemanticAffiliationAddress,
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticCountry,
SemanticDepartment,
SemanticInstitution,
SemanticLaboratory,
SemanticMarker,
SemanticNote,
SemanticPostBox,
SemanticPostCode,
SemanticRegion,
SemanticSettlement
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import (
SimpleModelSemanticExtractor,
get_regex_cleaned_layout_block_with_prefix_suffix
)
LOGGER = logging.getLogger(__name__)
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<institution>': SemanticInstitution,
'<department>': SemanticDepartment,
'<laboratory>': SemanticLaboratory,
'<addrLine>': SemanticAddressLine,
'<postCode>': SemanticPostCode,
'<postBox>': SemanticPostBox,
'<region>': SemanticRegion,
'<settlement>': SemanticSettlement,
'<country>': SemanticCountry
}
CLEAN_REGEX_BY_TAG: Mapping[str, str] = {
'<country>': r'(.*[^.]).*'
}
class AffiliationAddressSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('aff'))
aff: Optional[SemanticAffiliationAddress] = None
for name, layout_block in entity_tokens:
if name == '<marker>':
if aff:
yield aff
aff = SemanticAffiliationAddress(content_id=next(ids_iterator, '?'))
aff.add_content(SemanticMarker(layout_block=layout_block))
continue
prefix_block, cleaned_block, suffix_block = (
get_regex_cleaned_layout_block_with_prefix_suffix(
layout_block,
CLEAN_REGEX_BY_TAG.get(name)
)
)
semantic_content = self.get_semantic_content_for_entity_name(
name, cleaned_block
)
if (
aff is not None
and isinstance(semantic_content, SemanticInstitution)
and aff.has_type(SemanticInstitution)
):
yield aff
aff = None
if not aff:
if isinstance(semantic_content, SemanticNote):
yield semantic_content
continue
aff = SemanticAffiliationAddress(content_id=next(ids_iterator, '?'))
if prefix_block:
aff.add_content(SemanticNote(layout_block=prefix_block, note_type=f'{name}-prefix'))
aff.add_content(semantic_content)
if suffix_block:
aff.add_content(SemanticNote(layout_block=suffix_block, note_type=f'{name}-suffix'))
if aff:
yield aff | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/affiliation_address/extract.py | 0.741861 | 0.151467 | extract.py | pypi |
import os
import logging
from time import monotonic
from typing import Dict, Iterable, Mapping, Optional, Sequence, Set
import PIL.Image
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.document.semantic_document import SemanticGraphic
from sciencebeam_parser.document.layout_document import (
DEFAULT_LAYOUT_PAGE_META,
LayoutDocument,
LayoutGraphic,
LayoutPage,
LayoutPageCoordinates
)
from sciencebeam_parser.cv_models.cv_model import ComputerVisionModel
from sciencebeam_parser.processors.document_page_image import DocumentPageImage
from sciencebeam_parser.processors.graphic_provider import (
DocumentGraphicProvider,
get_semantic_graphic_for_layout_graphic,
get_semantic_graphic_list_for_layout_graphic_list
)
LOGGER = logging.getLogger(__name__)
def get_cropped_image(image: PIL.Image.Image, bounding_box: BoundingBox) -> PIL.Image.Image:
return image.crop((
bounding_box.x,
bounding_box.y,
bounding_box.right,
bounding_box.bottom
))
def get_bounding_box_intersection_area_ratio(
bounding_box_1: BoundingBox,
bounding_box_2: BoundingBox,
empty_ratio: float = 0.0
) -> float:
max_area = max(bounding_box_1.area, bounding_box_2.area)
if not max_area:
return empty_ratio
intersection_area = bounding_box_1.intersection(bounding_box_2).area
return intersection_area / max_area
def get_layout_graphic_with_similar_coordinates(
page_graphics: Sequence[LayoutGraphic],
bounding_box: BoundingBox,
threshold: float = 0.80,
ignored_graphic_types: Set[str] = None
) -> Optional[LayoutGraphic]:
sorted_area_intersection_bounding_boxes = sorted((
(
get_bounding_box_intersection_area_ratio(
bounding_box,
graphic.coordinates.bounding_box
),
graphic
)
for graphic in page_graphics
if graphic.coordinates and (
not ignored_graphic_types or graphic.graphic_type not in ignored_graphic_types
)
), key=lambda t: -t[0])
if not sorted_area_intersection_bounding_boxes:
return None
LOGGER.debug(
'sorted_area_intersection_bounding_boxes: %r',
sorted_area_intersection_bounding_boxes
)
best_area_ratio, best_matching_graphic = sorted_area_intersection_bounding_boxes[0]
if best_area_ratio < threshold:
LOGGER.debug('best_area_ratio below threshold: %.3f < %.3f', best_area_ratio, threshold)
return None
return best_matching_graphic
class ComputerVisionDocumentGraphicProvider(DocumentGraphicProvider):
def __init__(
self,
computer_vision_model: ComputerVisionModel,
page_image_iterable: Iterable[DocumentPageImage],
temp_dir: str
):
super().__init__()
self.computer_vision_model = computer_vision_model
self.page_image_iterable = page_image_iterable
self.temp_dir = temp_dir
# ignoring svg for now because we are also ignoring it when matching graphics
# an svg image may also not be standalone and require text to be complete
self.ignored_graphic_types = {'svg'}
def iter_semantic_graphic_for_image( # pylint: disable=too-many-locals
self,
image: PIL.Image.Image,
extract_graphic_assets: bool,
page_number: int,
page: Optional[LayoutPage]
) -> Iterable[SemanticGraphic]:
LOGGER.debug('image size: %d x %d', image.width, image.height)
page_meta = page.meta if page is not None else DEFAULT_LAYOUT_PAGE_META
page_coordinates = (
page.meta.coordinates if page is not None else None
)
page_graphics = (
page.graphics if page is not None else []
)
cv_start = monotonic()
cv_result = self.computer_vision_model.predict_single(image)
cv_end = monotonic()
cv_instances = cv_result.get_instances_by_type_names(['Figure', 'Table'])
cv_type_name_and_coordinates_list = [
(instance.get_type_name(), instance.get_bounding_box())
for instance in cv_instances
]
LOGGER.info(
(
'cv result, took=%.3fs, page_number=%d, image_size=%dx%d'
', cv_type_name_and_coordinates_list=%r'
),
cv_end - cv_start,
page_number,
image.width,
image.height,
cv_type_name_and_coordinates_list
)
count_by_type_name_map: Dict[str, int] = {}
for type_name, cv_coordinates in cv_type_name_and_coordinates_list:
lower_type_name = type_name.lower()
count_by_type_name_map[type_name] = count_by_type_name_map.get(type_name, 0) + 1
item_number = count_by_type_name_map[type_name]
local_image_path: Optional[str] = None
relative_image_path: Optional[str] = None
scaled_item_coordinates = cv_coordinates
if page_coordinates:
scaled_item_coordinates = (
cv_coordinates
.scale_by(
page_coordinates.width / image.width,
page_coordinates.height / image.height
)
)
matching_layout_graphic = get_layout_graphic_with_similar_coordinates(
page_graphics=page_graphics,
bounding_box=scaled_item_coordinates,
ignored_graphic_types=self.ignored_graphic_types
)
if matching_layout_graphic is not None:
yield get_semantic_graphic_for_layout_graphic(
matching_layout_graphic,
extract_graphic_assets=extract_graphic_assets
)
continue
if extract_graphic_assets:
local_image_path = os.path.join(
self.temp_dir, f'{lower_type_name}-{page_number}-{item_number}.png'
)
relative_image_path = os.path.basename(local_image_path)
cropped_image = get_cropped_image(image, cv_coordinates)
cropped_image.save(local_image_path)
layout_graphic = LayoutGraphic(
coordinates=LayoutPageCoordinates(
x=scaled_item_coordinates.x,
y=scaled_item_coordinates.y,
width=scaled_item_coordinates.width,
height=scaled_item_coordinates.height,
page_number=page_number
),
page_meta=page_meta,
graphic_type=f'cv-{lower_type_name}',
local_file_path=local_image_path
)
semantic_graphic = SemanticGraphic(
layout_graphic=layout_graphic,
relative_path=relative_image_path
)
yield semantic_graphic
def get_page_by_page_number_map(
self,
layout_document: LayoutDocument
) -> Mapping[int, Optional[LayoutPage]]:
return {
page.meta.page_number: page
for page in layout_document.pages
}
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
page_by_page_number_map = self.get_page_by_page_number_map(
layout_document
)
LOGGER.debug(
'cv model: page_by_page_number_map=%r',
page_by_page_number_map
)
has_cv_semantic_graphic: bool = False
for page_image in self.page_image_iterable:
LOGGER.debug('page_image: %r', page_image)
page_number = page_image.page_number
with PIL.Image.open(page_image.page_image_path) as image:
for semantic_graphic in self.iter_semantic_graphic_for_image(
image,
extract_graphic_assets=extract_graphic_assets,
page_number=page_number,
page=page_by_page_number_map.get(page_number)
):
has_cv_semantic_graphic = True
yield semantic_graphic
if not has_cv_semantic_graphic:
LOGGER.info('no graphics detected using cv model, falling back to regular graphics')
yield from get_semantic_graphic_list_for_layout_graphic_list(
layout_document.iter_all_graphics(),
extract_graphic_assets=extract_graphic_assets
) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/cv_graphic_provider.py | 0.835618 | 0.210685 | cv_graphic_provider.py | pypi |
import functools
import logging
import os
from abc import ABC, abstractmethod
from typing import Counter, Iterable, List, Optional, Sequence
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutDocument,
LayoutGraphic,
LayoutPage,
LayoutPageCoordinates,
LayoutToken
)
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper, SemanticGraphic
LOGGER = logging.getLogger(__name__)
class DocumentGraphicProvider(ABC):
@abstractmethod
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
pass
def get_semantic_graphic_for_layout_graphic(
layout_graphic: LayoutGraphic,
extract_graphic_assets: bool
) -> SemanticGraphic:
relative_path: Optional[str] = None
if layout_graphic.local_file_path and extract_graphic_assets:
relative_path = os.path.basename(layout_graphic.local_file_path)
return SemanticGraphic(
layout_graphic=layout_graphic,
relative_path=relative_path
)
def get_semantic_graphic_list_for_layout_graphic_list(
layout_graphic_iterable: Iterable[LayoutGraphic],
extract_graphic_assets: bool
) -> List[SemanticGraphic]:
return [
get_semantic_graphic_for_layout_graphic(
layout_graphic,
extract_graphic_assets=extract_graphic_assets
)
for layout_graphic in layout_graphic_iterable
if layout_graphic.coordinates
]
def get_page_numbers_for_semantic_content_list(
semantic_content_list: Sequence[SemanticContentWrapper]
) -> Sequence[int]:
return sorted({
coordinates.page_number
for semantic_content in semantic_content_list
for coordinates in semantic_content.merged_block.get_merged_coordinates_list()
})
def get_all_page_numbers_of_layout_document(
layout_document: LayoutDocument
) -> Sequence[int]:
return sorted({
page.meta.page_number
for page in layout_document.pages
})
def get_graphic_matching_candidate_page_numbers_for_semantic_content_list(
semantic_content_list: Sequence[SemanticContentWrapper],
layout_document: Optional[LayoutDocument] = None
) -> Sequence[int]:
page_numbers = get_page_numbers_for_semantic_content_list(semantic_content_list)
if layout_document:
document_page_numbers = set(get_all_page_numbers_of_layout_document(layout_document))
page_numbers = sorted(
set(page_numbers).union({
page_number + 1
for page_number in page_numbers
if page_number + 1 in document_page_numbers
})
)
return page_numbers
def get_page_numbers_with_uncommon_page_dimension(
layout_document: LayoutDocument
) -> Sequence[int]:
page_dimension_counter = Counter((
page.meta.coordinates.bounding_box
for page in layout_document.pages
if page.meta and page.meta.coordinates
))
LOGGER.debug('page_dimension_counter: %r', page_dimension_counter)
if len(page_dimension_counter) < 2:
return []
most_common_page_dimension = page_dimension_counter.most_common(1)[0][0]
LOGGER.debug('most_common_page_dimension: %r', most_common_page_dimension)
return sorted({
page.meta.page_number
for page in layout_document.pages
if (
page.meta
and page.meta.coordinates
and page.meta.coordinates.bounding_box != most_common_page_dimension
)
})
def is_page_with_mostly_bitmap_graphics(
layout_page: LayoutPage
) -> bool:
if not layout_page.meta or not layout_page.meta.coordinates:
LOGGER.debug('page has no coordinates')
return False
page_area = layout_page.meta.coordinates.bounding_box.area
if not page_area:
LOGGER.debug('page has no area')
return False
bitmap_graphics_with_area_ratio = [
(graphic, graphic.coordinates.bounding_box.area / page_area)
for graphic in layout_page.graphics
if (
graphic.graphic_type != 'svg'
and graphic.coordinates
)
]
LOGGER.debug('bitmap_graphics_with_area_ratio: %r', bitmap_graphics_with_area_ratio)
if not bitmap_graphics_with_area_ratio:
LOGGER.debug('no bitmap images')
return False
accepted_bitmap_graphics = [
bitmap_graphics
for bitmap_graphics, area_ratio in bitmap_graphics_with_area_ratio
if area_ratio > 0.5
]
if not accepted_bitmap_graphics:
LOGGER.debug('no too small bitmap images')
return False
return True
def get_page_numbers_with_mostly_bitmap_graphics(
layout_document: LayoutDocument
) -> Sequence[int]:
return [
page.meta.page_number
for page in layout_document.pages
if (
page.meta
and is_page_with_mostly_bitmap_graphics(page)
)
]
def are_page_coordinates_within_bounding_box(
page_coordinates: Optional[LayoutPageCoordinates],
bounding_box: BoundingBox,
min_area_ratio: float = 0.5
) -> bool:
if not page_coordinates:
return False
item_bounding_box = page_coordinates.bounding_box
item_bounding_box_area = item_bounding_box.area
if not item_bounding_box_area:
return False
intersection_bounding_box = item_bounding_box.intersection(bounding_box)
if not intersection_bounding_box:
return False
if intersection_bounding_box.area / item_bounding_box_area < min_area_ratio:
return False
return True
def is_layout_token_within_bounding_box(layout_token: LayoutToken, **kwargs) -> bool:
return are_page_coordinates_within_bounding_box(layout_token.coordinates, **kwargs)
def is_layout_graphic_within_bounding_box(layout_graphic: LayoutGraphic, **kwargs) -> bool:
return are_page_coordinates_within_bounding_box(layout_graphic.coordinates, **kwargs)
def _remove_tokens_within_bounding_box_flatmap_fn(
layout_token: LayoutToken,
**kwargs
) -> List[LayoutToken]:
if not is_layout_token_within_bounding_box(layout_token, **kwargs):
return [layout_token]
return []
def get_layout_page_with_text_or_graphic_replaced_by_graphic(
layout_page: LayoutPage,
semantic_graphic: SemanticGraphic,
is_only_semantic_graphic_on_page: bool,
is_replace_overlapping_text: bool
) -> LayoutPage:
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
assert layout_graphic.coordinates
graphic_bounding_box = layout_graphic.coordinates.bounding_box
if is_only_semantic_graphic_on_page:
layout_graphic = layout_graphic._replace(
related_block=LayoutBlock.for_tokens(list(layout_page.iter_all_tokens()))
)
modified_layout_page = (
layout_page.replace(
graphics=[
_layout_graphic
for _layout_graphic in layout_page.graphics
if not is_layout_graphic_within_bounding_box(
_layout_graphic,
bounding_box=graphic_bounding_box
)
] + [layout_graphic]
)
)
if is_replace_overlapping_text:
modified_layout_page = (
modified_layout_page
.flat_map_layout_tokens(functools.partial(
_remove_tokens_within_bounding_box_flatmap_fn,
bounding_box=graphic_bounding_box
)).remove_empty_blocks()
)
return modified_layout_page
def get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic],
is_replace_overlapping_text: bool
) -> LayoutDocument:
page_by_page_number = {
page.meta.page_number: page
for page in layout_document.pages
if page.meta
}
LOGGER.debug('page_by_page_number.keys: %r', page_by_page_number.keys())
has_changes = False
semantic_graphics_list = list(semantic_graphics)
semantic_graphic_count_by_page = Counter((
semantic_graphic.layout_graphic.coordinates.page_number
for semantic_graphic in semantic_graphics_list
if (
semantic_graphic.layout_graphic
and semantic_graphic.layout_graphic.coordinates
)
))
for semantic_graphic in semantic_graphics_list:
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
if not layout_graphic.coordinates:
continue
page_number = layout_graphic.coordinates.page_number
page_by_page_number[page_number] = (
get_layout_page_with_text_or_graphic_replaced_by_graphic(
page_by_page_number[page_number],
semantic_graphic,
is_only_semantic_graphic_on_page=(
semantic_graphic_count_by_page[page_number] < 2
),
is_replace_overlapping_text=is_replace_overlapping_text
)
)
has_changes = True
if not has_changes:
return layout_document
pages = [
(
page_by_page_number[page.meta.page_number]
if page.meta
else page
)
for page in layout_document.pages
]
return layout_document.replace(pages=pages)
def get_layout_document_with_text_and_graphics_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic]
) -> LayoutDocument:
return get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document,
semantic_graphics=semantic_graphics,
is_replace_overlapping_text=True
)
def get_layout_document_with_graphics_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic]
) -> LayoutDocument:
return get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document,
semantic_graphics=semantic_graphics,
is_replace_overlapping_text=False
)
class SimpleDocumentGraphicProvider(DocumentGraphicProvider):
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
return get_semantic_graphic_list_for_layout_graphic_list(
layout_document.iter_all_graphics(),
extract_graphic_assets=extract_graphic_assets
) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/graphic_provider.py | 0.816077 | 0.184859 | graphic_provider.py | pypi |
from typing import NamedTuple, Set
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.processors.document_page_image import (
DEFAULT_PDF_RENDER_DPI
)
from sciencebeam_parser.processors.graphic_matching import DEFAULT_MAX_GRAPHIC_DISTANCE
class RequestFieldNames:
"""
"Abstract" field names that should be independent from the model architecture.
"""
TITLE = 'title'
ABSTRACT = 'abstract'
AUTHORS = 'authors'
AFFILIATIONS = 'affiliations'
REFERENCES = 'references'
FRONT_FIELDS = {
RequestFieldNames.TITLE,
RequestFieldNames.ABSTRACT,
RequestFieldNames.AUTHORS,
RequestFieldNames.AFFILIATIONS
}
class FullTextProcessorConfig(NamedTuple):
extract_front: bool = True
extract_authors: bool = True
extract_affiliations: bool = True
extract_body_sections: bool = True
extract_acknowledgements: bool = True
extract_back_sections: bool = True
extract_references: bool = True
extract_citation_fields: bool = True
extract_citation_authors: bool = True
extract_citation_editors: bool = False
extract_figure_fields: bool = True
extract_table_fields: bool = True
merge_raw_authors: bool = False
extract_graphic_bounding_boxes: bool = True
extract_graphic_assets: bool = False
use_cv_model: bool = False
cv_render_dpi: float = DEFAULT_PDF_RENDER_DPI
use_ocr_model: bool = False
replace_text_by_cv_graphic: bool = False
max_graphic_distance: float = DEFAULT_MAX_GRAPHIC_DISTANCE
@staticmethod
def from_app_config(app_config: AppConfig) -> 'FullTextProcessorConfig':
return FullTextProcessorConfig()._replace(
**app_config.get('processors', {}).get('fulltext', {})
)
def get_for_requested_field_names(
self,
request_field_names: Set[str]
) -> 'FullTextProcessorConfig':
if not request_field_names:
return self
remaining_field_names = request_field_names - FRONT_FIELDS - {RequestFieldNames.REFERENCES}
if remaining_field_names:
return self
extract_front = bool(FRONT_FIELDS & request_field_names)
extract_authors = RequestFieldNames.AUTHORS in request_field_names
extract_affiliations = RequestFieldNames.AFFILIATIONS in request_field_names
extract_references = RequestFieldNames.REFERENCES in request_field_names
return self._replace( # pylint: disable=no-member
extract_front=extract_front,
extract_authors=extract_authors,
extract_affiliations=extract_affiliations,
extract_body_sections=False,
extract_acknowledgements=False,
extract_back_sections=False,
extract_references=extract_references,
extract_graphic_bounding_boxes=False
)
def get_for_header_document(self) -> 'FullTextProcessorConfig':
return self.get_for_requested_field_names(FRONT_FIELDS) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/fulltext/config.py | 0.818954 | 0.185929 | config.py | pypi |
import logging
import multiprocessing
from typing import (
Iterable,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Tuple,
Type,
Union
)
from sciencebeam_parser.models.data import AppFeaturesContext, DEFAULT_APP_FEATURES_CONTEXT
from sciencebeam_parser.models.model import LayoutDocumentLabelResult, Model
from sciencebeam_parser.cv_models.cv_model import ComputerVisionModel
from sciencebeam_parser.processors.fulltext.models import FullTextModels
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticAffiliationAddress,
SemanticAuthor,
SemanticCitation,
SemanticContentWrapper,
SemanticDocument,
SemanticEditor,
SemanticFigure,
SemanticFigureCitation,
SemanticGraphic,
SemanticInvalidReference,
SemanticLabel,
SemanticMixedContentWrapper,
SemanticMixedNote,
SemanticRawAffiliationAddress,
SemanticRawAuthors,
SemanticRawEditors,
SemanticRawFigure,
SemanticRawReference,
SemanticRawReferenceText,
SemanticRawTable,
SemanticReference,
SemanticReferenceCitation,
SemanticReferenceList,
SemanticSection,
SemanticSectionTypes,
SemanticTable,
SemanticTableCitation,
T_SemanticContentWrapper,
T_SemanticName,
T_SemanticRawNameList
)
from sciencebeam_parser.document.tei_document import TeiDocument, get_tei_for_semantic_document
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.models.segmentation.model import SegmentationModel
from sciencebeam_parser.models.header.model import HeaderModel
from sciencebeam_parser.models.name.model import NameModel
from sciencebeam_parser.models.affiliation_address.model import AffiliationAddressModel
from sciencebeam_parser.models.fulltext.model import FullTextModel
from sciencebeam_parser.models.figure.model import FigureModel
from sciencebeam_parser.models.table.model import TableModel
from sciencebeam_parser.models.reference_segmenter.model import ReferenceSegmenterModel
from sciencebeam_parser.models.citation.model import CitationModel
from sciencebeam_parser.processors.ref_matching import (
ChainedContentIdMatcher,
ContentIdMatcher,
PartialContentIdMatcher,
SimpleContentIdMatcher
)
from sciencebeam_parser.processors.document_page_image import (
iter_pdf_document_page_images
)
from sciencebeam_parser.processors.graphic_matching import (
BoundingBoxDistanceGraphicMatcher,
ChainedGraphicMatcher,
GraphicMatcher,
GraphicRelatedBlockTextGraphicMatcher,
OpticalCharacterRecognitionGraphicMatcher
)
from sciencebeam_parser.processors.graphic_provider import (
DocumentGraphicProvider,
SimpleDocumentGraphicProvider,
get_graphic_matching_candidate_page_numbers_for_semantic_content_list,
get_layout_document_with_graphics_replaced_by_graphics,
get_layout_document_with_text_and_graphics_replaced_by_graphics,
get_page_numbers_with_mostly_bitmap_graphics,
get_page_numbers_with_uncommon_page_dimension
)
from sciencebeam_parser.processors.fulltext.config import (
FullTextProcessorConfig
)
LOGGER = logging.getLogger(__name__)
class FullTextProcessorDocumentContext(NamedTuple):
pdf_path: Optional[str] = None
temp_dir: Optional[str] = None
def get_cv_document_graphic_provider(
cv_model: ComputerVisionModel,
context: FullTextProcessorDocumentContext,
page_numbers: Optional[Sequence[int]],
cv_render_dpi: float
) -> DocumentGraphicProvider:
from sciencebeam_parser.processors.cv_graphic_provider import ( # noqa pylint: disable=import-outside-toplevel
ComputerVisionDocumentGraphicProvider
)
assert context.pdf_path
assert context.temp_dir
return ComputerVisionDocumentGraphicProvider(
cv_model,
iter_pdf_document_page_images(
pdf_path=context.pdf_path,
output_dir=context.temp_dir,
page_numbers=page_numbers,
dpi=cv_render_dpi,
thread_count=multiprocessing.cpu_count()
),
temp_dir=context.temp_dir
)
class FullTextProcessor:
def __init__(
self,
fulltext_models: FullTextModels,
config: Optional[FullTextProcessorConfig] = None,
app_features_context: AppFeaturesContext = DEFAULT_APP_FEATURES_CONTEXT
) -> None:
self.fulltext_models = fulltext_models
self.app_features_context = app_features_context
if not config:
config = FullTextProcessorConfig()
self.config = config
@property
def segmentation_model(self) -> SegmentationModel:
return self.fulltext_models.segmentation_model
@property
def header_model(self) -> HeaderModel:
return self.fulltext_models.header_model
@property
def affiliation_address_model(self) -> AffiliationAddressModel:
return self.fulltext_models.affiliation_address_model
@property
def name_header_model(self) -> NameModel:
return self.fulltext_models.name_header_model
@property
def name_citation_model(self) -> NameModel:
return self.fulltext_models.name_citation_model
@property
def fulltext_model(self) -> FullTextModel:
return self.fulltext_models.fulltext_model
@property
def figure_model(self) -> FigureModel:
return self.fulltext_models.figure_model
@property
def table_model(self) -> TableModel:
return self.fulltext_models.table_model
@property
def reference_segmenter_model(self) -> ReferenceSegmenterModel:
return self.fulltext_models.reference_segmenter_model
@property
def citation_model(self) -> CitationModel:
return self.fulltext_models.citation_model
def get_semantic_document_for_layout_document(
self,
layout_document: LayoutDocument,
context: Optional[FullTextProcessorDocumentContext] = None
) -> SemanticDocument:
if context is None:
context = FullTextProcessorDocumentContext()
layout_document = self._preprocess_layout_graphics(
layout_document,
context=context
)
segmentation_label_result = self.segmentation_model.get_label_layout_document_result(
layout_document,
app_features_context=self.app_features_context
)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
document = SemanticDocument()
if self.config.extract_front:
self._process_header_layout_document(
header_layout_document=header_layout_document,
semantic_document=document
)
if self.config.extract_body_sections:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.body_section,
segmentation_label_result,
'<body>',
SemanticSectionTypes.OTHER
)
if self.config.extract_acknowledgements:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.back_section,
segmentation_label_result,
'<acknowledgement>',
SemanticSectionTypes.ACKNOWLEDGEMENT
)
if self.config.extract_back_sections:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.back_section,
segmentation_label_result,
'<annex>',
SemanticSectionTypes.OTHER
)
if self.config.extract_references:
self._extract_raw_references_from_segmentation(
semantic_document=document,
segmentation_label_result=segmentation_label_result
)
if self.config.extract_citation_fields:
self._extract_reference_fields_from_raw_references(
semantic_document=document
)
if self.config.extract_citation_authors or self.config.extract_citation_editors:
self._extract_reference_name_lists_from_raw_references(
semantic_document=document
)
references = list(document.iter_by_type_recursively(SemanticReference))
ref_citations = list(document.iter_by_type_recursively(SemanticReferenceCitation))
self._assign_content_ids(references, iter(iter_ids('b')))
self._assign_target_content_ids(ref_citations, ChainedContentIdMatcher([
SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(references, SemanticLabel)
),
PartialContentIdMatcher(
self._get_semantic_content_text_by_content_id(
references, SemanticRawReferenceText
)
)
]))
if self.config.extract_figure_fields:
self._extract_figure_fields_from_raw_figures(semantic_document=document)
figures = list(document.iter_by_type_recursively(SemanticFigure))
figure_citations = list(document.iter_by_type_recursively(SemanticFigureCitation))
self._assign_content_ids(figures, iter(iter_ids('fig_')))
self._assign_target_content_ids(figure_citations, SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(figures, SemanticLabel)
))
if self.config.extract_table_fields:
self._extract_table_fields_from_raw_tables(semantic_document=document)
tables = list(document.iter_by_type_recursively(SemanticTable))
table_citations = list(document.iter_by_type_recursively(SemanticTableCitation))
self._assign_content_ids(tables, iter(iter_ids('tab_')))
self._assign_target_content_ids(table_citations, SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(tables, SemanticLabel)
))
if self.config.extract_graphic_bounding_boxes:
self._process_graphics(
document=document,
layout_document=layout_document,
context=context
)
return document
def _process_header_layout_document(
self,
header_layout_document: LayoutDocument,
semantic_document: SemanticDocument
):
LOGGER.debug('header_layout_document: %s', header_layout_document)
if not header_layout_document.pages:
return
labeled_layout_tokens = self.header_model.predict_labels_for_layout_document(
header_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
entity_blocks = self.header_model.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
)
self.header_model.update_semantic_document_with_entity_blocks(
semantic_document, entity_blocks
)
if self.config.extract_authors:
self._process_raw_authors(semantic_document.front)
if self.config.extract_affiliations:
self._process_raw_affiliations(semantic_document)
def _preprocess_layout_graphics(
self,
layout_document: LayoutDocument,
context: FullTextProcessorDocumentContext
) -> LayoutDocument:
if not self.config.use_cv_model:
return layout_document
candidate_page_numbers = sorted(
set(get_page_numbers_with_uncommon_page_dimension(layout_document))
- set(get_page_numbers_with_mostly_bitmap_graphics(layout_document))
)
LOGGER.debug('candidate_page_numbers: %r', candidate_page_numbers)
if not candidate_page_numbers:
return layout_document
document_graphic_provider = self._get_document_graphic_provider(
context=context,
page_numbers=candidate_page_numbers
)
semantic_graphics = list(
document_graphic_provider.iter_semantic_graphic_for_layout_document(
layout_document,
extract_graphic_assets=self.config.extract_graphic_assets
)
)
if not semantic_graphics:
LOGGER.info('no semantic graphics found on pages %r', candidate_page_numbers)
return layout_document
if not self.config.replace_text_by_cv_graphic:
return get_layout_document_with_graphics_replaced_by_graphics(
layout_document,
semantic_graphics
)
return get_layout_document_with_text_and_graphics_replaced_by_graphics(
layout_document,
semantic_graphics
)
def _process_graphics(
self,
document: SemanticDocument,
layout_document: LayoutDocument,
context: FullTextProcessorDocumentContext
):
unmatched_graphics_container = SemanticMixedNote(note_type='unmatched_graphics')
candidate_semantic_content_list = list(
document.iter_by_types_recursively((SemanticFigure, SemanticTable,))
)
self._match_graphic_elements(
semantic_graphic_list=list(
self._get_document_graphic_provider(
context=context,
page_numbers=(
get_graphic_matching_candidate_page_numbers_for_semantic_content_list(
candidate_semantic_content_list,
layout_document=layout_document
)
)
).iter_semantic_graphic_for_layout_document(
layout_document,
extract_graphic_assets=self.config.extract_graphic_assets
)
),
candidate_semantic_content_list=candidate_semantic_content_list,
unmatched_graphics_container=unmatched_graphics_container
)
if not unmatched_graphics_container.is_empty():
LOGGER.debug('unmatched_graphics_container: %r', unmatched_graphics_container)
document.back_section.add_content(unmatched_graphics_container)
else:
LOGGER.debug('no unmatched graphics')
def _get_document_graphic_provider(
self,
context: FullTextProcessorDocumentContext,
page_numbers: Optional[Sequence[int]]
) -> DocumentGraphicProvider:
if self.config.use_cv_model:
assert self.fulltext_models.cv_model is not None
return get_cv_document_graphic_provider(
cv_model=self.fulltext_models.cv_model,
context=context,
page_numbers=page_numbers,
cv_render_dpi=self.config.cv_render_dpi
)
return SimpleDocumentGraphicProvider()
def _match_graphic_elements(
self,
semantic_graphic_list: Sequence[SemanticGraphic],
candidate_semantic_content_list: Sequence[SemanticContentWrapper],
unmatched_graphics_container: SemanticMixedContentWrapper
):
_graphic_matchers: List[GraphicMatcher] = [
BoundingBoxDistanceGraphicMatcher(
max_distance=self.config.max_graphic_distance
),
GraphicRelatedBlockTextGraphicMatcher()
]
if self.config.use_ocr_model:
assert self.fulltext_models.ocr_model
_graphic_matchers.append(
OpticalCharacterRecognitionGraphicMatcher(
ocr_model=self.fulltext_models.ocr_model
)
)
graphic_matcher = ChainedGraphicMatcher(_graphic_matchers)
graphic_match_result = graphic_matcher.get_graphic_matches(
semantic_graphic_list=semantic_graphic_list,
candidate_semantic_content_list=candidate_semantic_content_list
)
for graphic_match in graphic_match_result.graphic_matches:
if isinstance(graphic_match.candidate_semantic_content, SemanticMixedContentWrapper):
graphic_match.candidate_semantic_content.add_content(
graphic_match.semantic_graphic
)
LOGGER.info('unmatched_graphics: %r', graphic_match_result.unmatched_graphics)
for unmatched_graphic in graphic_match_result.unmatched_graphics:
unmatched_graphics_container.add_content(unmatched_graphic)
def _assign_content_ids(
self,
semantic_content_iterable: Iterable[SemanticMixedContentWrapper],
content_id_iterator: Iterator[str]
):
for semantic_content in semantic_content_iterable:
semantic_content.content_id = next(content_id_iterator)
def _get_semantic_content_text_by_content_id(
self,
semantic_content_iterable: Iterable[SemanticMixedContentWrapper],
type_: Type[SemanticContentWrapper]
) -> Mapping[str, str]:
d = {}
for semantic_content in semantic_content_iterable:
if not semantic_content.content_id:
continue
text = semantic_content.get_text_by_type(type_)
if not text:
continue
d[semantic_content.content_id] = text
return d
def _assign_target_content_ids(
self,
semantic_content_iterable: Iterable[SemanticCitation],
content_id_matcher: ContentIdMatcher
):
for citation in semantic_content_iterable:
content_id = content_id_matcher.get_id_by_text(citation.get_text())
if content_id:
citation.target_content_id = content_id
def _process_raw_authors(self, semantic_parent: SemanticMixedContentWrapper):
result_content: List[SemanticContentWrapper] = []
raw_authors: List[SemanticRawAuthors] = []
for semantic_content in semantic_parent:
if isinstance(semantic_content, SemanticRawAuthors):
raw_authors.append(semantic_content)
continue
result_content.append(semantic_content)
if raw_authors:
if self.config.merge_raw_authors:
raw_authors_layout_documents = [
LayoutDocument.for_blocks([
block
for raw_author in raw_authors
for block in raw_author.iter_blocks()
])
]
else:
raw_authors_layout_documents = [
LayoutDocument.for_blocks(list(raw_author.iter_blocks()))
for raw_author in raw_authors
]
labeled_layout_tokens_list = self.name_header_model.predict_labels_for_layout_documents(
raw_authors_layout_documents,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens_list (author): %r', labeled_layout_tokens_list)
authors_iterable = (
author
for labeled_layout_tokens in labeled_layout_tokens_list
for author in (
self.name_header_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)
)
for author in authors_iterable:
result_content.append(author)
semantic_parent.mixed_content = result_content
def _process_raw_affiliations(self, semantic_document: SemanticDocument):
result_content: List[SemanticContentWrapper] = []
raw_aff_address_list: List[SemanticRawAffiliationAddress] = []
for semantic_content in semantic_document.front:
if isinstance(semantic_content, SemanticRawAffiliationAddress):
raw_aff_address_list.append(semantic_content)
continue
result_content.append(semantic_content)
if raw_aff_address_list:
raw_aff_layout_documents = [
LayoutDocument.for_blocks(list(raw_aff_or_address.iter_blocks()))
for raw_aff_or_address in raw_aff_address_list
]
labeled_layout_tokens_list = (
self.affiliation_address_model
.predict_labels_for_layout_documents(
raw_aff_layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list (aff): %r', labeled_layout_tokens_list)
aff_iterable = (
aff
for labeled_layout_tokens in labeled_layout_tokens_list
for aff in (
self.affiliation_address_model
.iter_semantic_content_for_labeled_layout_tokens(labeled_layout_tokens)
)
)
for aff in aff_iterable:
result_content.append(aff)
semantic_document.front.mixed_content = result_content
self._assign_content_ids(
semantic_document.front.iter_by_type(SemanticAffiliationAddress),
iter(iter_ids('aff'))
)
def _extract_raw_references_from_segmentation(
self,
semantic_document: SemanticDocument,
segmentation_label_result: LayoutDocumentLabelResult
):
references_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
LOGGER.debug('references_layout_document: %s', references_layout_document)
if not references_layout_document:
return
labeled_layout_tokens = self.reference_segmenter_model.predict_labels_for_layout_document(
references_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_content_iterable = (
self.reference_segmenter_model
.iter_semantic_content_for_labeled_layout_tokens(labeled_layout_tokens)
)
reference_list = SemanticReferenceList(list(semantic_content_iterable))
semantic_document.back_section.add_content(reference_list)
def _iter_parse_semantic_references(
self,
semantic_raw_references: List[SemanticRawReference]
) -> Iterable[Union[SemanticReference, SemanticInvalidReference]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_reference.merged_block])
for semantic_raw_reference in semantic_raw_references
]
labeled_layout_tokens_list = (
self.citation_model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_reference in zip(
labeled_layout_tokens_list, semantic_raw_references
):
semantic_content_iterable = (
self.citation_model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens,
semantic_raw_reference=semantic_raw_reference
)
)
ref: Optional[Union[SemanticReference, SemanticInvalidReference]] = None
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, (SemanticReference, SemanticInvalidReference)):
ref = semantic_content
if not ref:
raise AssertionError('no semantic reference extracted')
yield ref
def _extract_reference_fields_from_raw_references(
self,
semantic_document: SemanticDocument
):
reference_lists = list(semantic_document.back_section.iter_by_type(
SemanticReferenceList
))
semantic_raw_references = [
raw_reference
for reference_list in reference_lists
for raw_reference in reference_list.iter_by_type(SemanticRawReference)
]
semantic_references = list(self._iter_parse_semantic_references(
semantic_raw_references
))
LOGGER.debug('semantic_references: %r', semantic_references)
semantic_reference_by_semantic_raw_reference_id = {
id(semantic_raw_reference): semantic_reference
for semantic_raw_reference, semantic_reference in zip(
semantic_raw_references, semantic_references
)
}
LOGGER.debug(
'semantic_reference_by_semantic_raw_reference_id keys: %s',
semantic_reference_by_semantic_raw_reference_id.keys()
)
for reference_list in reference_lists:
updated_content: List[SemanticContentWrapper] = []
for semantic_content in reference_list:
if isinstance(semantic_content, SemanticRawReference):
semantic_reference = semantic_reference_by_semantic_raw_reference_id[
id(semantic_content)
]
updated_content.append(semantic_reference)
continue
updated_content.append(semantic_content)
reference_list.mixed_content = updated_content
def _iter_parse_semantic_name_lists(
self,
semantic_raw_name_lists: Sequence[T_SemanticRawNameList],
name_type: Type[T_SemanticName]
) -> Iterable[Tuple[T_SemanticRawNameList, List[SemanticContentWrapper]]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_name_list.merged_block])
for semantic_raw_name_list in semantic_raw_name_lists
]
labeled_layout_tokens_list = (
self.name_citation_model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_name_list in zip(
labeled_layout_tokens_list, semantic_raw_name_lists
):
semantic_content_iterable = (
self.name_citation_model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens,
name_type=name_type
)
)
yield semantic_raw_name_list, list(semantic_content_iterable)
def _extract_reference_name_lists_from_raw_references(
self,
semantic_document: SemanticDocument
):
reference_lists = list(semantic_document.back_section.iter_by_type(
SemanticReferenceList
))
ref_list = [
ref
for reference_list in reference_lists
for ref in reference_list.iter_by_type(SemanticReference)
]
if self.config.extract_citation_authors:
raw_authors = [
raw_author
for ref in ref_list
for raw_author in ref.iter_by_type(SemanticRawAuthors)
]
else:
raw_authors = []
if self.config.extract_citation_editors:
raw_editors = [
raw_author
for ref in ref_list
for raw_author in ref.iter_by_type(SemanticRawEditors)
]
else:
raw_editors = []
content_list_by_raw_author_id = {
id(raw_author): content_list
for raw_author, content_list in (
self._iter_parse_semantic_name_lists(raw_authors, name_type=SemanticAuthor)
)
}
content_list_by_raw_editor_id = {
id(raw_author): content_list
for raw_author, content_list in (
self._iter_parse_semantic_name_lists(raw_editors, name_type=SemanticEditor)
)
}
LOGGER.debug(
'content_list_by_raw_author_id keys: %s',
content_list_by_raw_author_id.keys()
)
LOGGER.debug(
'content_list_by_raw_editor_id keys: %s',
content_list_by_raw_editor_id.keys()
)
for reference_list in reference_lists:
for semantic_content in reference_list:
if isinstance(semantic_content, SemanticReference):
if self.config.extract_citation_authors:
semantic_content.flat_map_inplace_by_type(
SemanticRawAuthors,
lambda raw_author: content_list_by_raw_author_id[
id(raw_author)
]
)
if self.config.extract_citation_editors:
semantic_content.flat_map_inplace_by_type(
SemanticRawEditors,
lambda raw_editor: content_list_by_raw_editor_id[
id(raw_editor)
]
)
def _iter_parse_semantic_content_lists(
self,
semantic_raw_content_lists: Sequence[T_SemanticContentWrapper],
model: Model
) -> Iterable[Tuple[T_SemanticContentWrapper, List[SemanticContentWrapper]]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_name_list.merged_block])
for semantic_raw_name_list in semantic_raw_content_lists
]
labeled_layout_tokens_list = (
model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_name_list in zip(
labeled_layout_tokens_list, semantic_raw_content_lists
):
semantic_content_iterable = (
model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)
yield semantic_raw_name_list, list(semantic_content_iterable)
def _extract_semantic_content_from_raw_content(
self,
semantic_document: SemanticDocument,
semantic_type: Type[T_SemanticContentWrapper],
model: Model
):
parents = [
parent
for root in [
semantic_document.body_section,
semantic_document.back_section
]
for parent in root.iter_parent_by_semantic_type_recursively(
semantic_type
)
]
raw_content_lists = [
raw_content
for parent in parents
for raw_content in parent.iter_by_type(semantic_type)
]
content_list_by_raw_content_id = {
id(raw_content): content_list
for raw_content, content_list in (
self._iter_parse_semantic_content_lists(
raw_content_lists,
model
)
)
}
LOGGER.debug(
'content_list_by_raw_content_id keys: %s',
content_list_by_raw_content_id.keys()
)
for parent in parents:
parent.flat_map_inplace_by_type(
semantic_type,
lambda raw_content: content_list_by_raw_content_id[
id(raw_content)
]
)
def _extract_figure_fields_from_raw_figures(
self,
semantic_document: SemanticDocument
):
self._extract_semantic_content_from_raw_content(
semantic_document,
SemanticRawFigure,
self.figure_model
)
def _extract_table_fields_from_raw_tables(
self,
semantic_document: SemanticDocument
):
self._extract_semantic_content_from_raw_content(
semantic_document,
SemanticRawTable,
self.table_model
)
def _update_semantic_section_using_segmentation_result_and_fulltext_model(
self,
semantic_section: SemanticSection,
segmentation_label_result: LayoutDocumentLabelResult,
segmentation_tag: str,
section_type: str
):
layout_document = segmentation_label_result.get_filtered_document_by_label(
segmentation_tag
).remove_empty_blocks()
self._update_semantic_section_using_layout_document_and_fulltext_model(
semantic_section,
layout_document,
section_name=segmentation_tag,
section_type=section_type
)
def _update_semantic_section_using_layout_document_and_fulltext_model(
self,
semantic_section: SemanticSection,
layout_document: LayoutDocument,
section_name: str,
section_type: str
):
LOGGER.debug('layout_document (%r): %s', section_name, layout_document)
if not layout_document.pages:
return
labeled_layout_tokens = self.fulltext_model.predict_labels_for_layout_document(
layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens (%r): %r', section_name, labeled_layout_tokens)
entity_blocks = self.fulltext_model.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
)
self.fulltext_model.update_section_with_entity_blocks(
semantic_section,
entity_blocks,
section_type=section_type
)
def get_tei_document_for_layout_document(
self,
layout_document: LayoutDocument
) -> TeiDocument:
return get_tei_for_semantic_document(
self.get_semantic_document_for_layout_document(
layout_document
)
) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/fulltext/processor.py | 0.82029 | 0.169956 | processor.py | pypi |
import logging
import os
from contextlib import ExitStack
from dataclasses import dataclass
from pathlib import Path
from tempfile import TemporaryDirectory
from time import monotonic
from typing import List, Optional, Set
from zipfile import ZipFile
from lxml import etree
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.config.config import AppConfig, get_download_dir
from sciencebeam_parser.external.pdfalto.wrapper import PdfAltoWrapper
from sciencebeam_parser.external.pdfalto.parser import parse_alto_root
from sciencebeam_parser.external.wapiti.wrapper import LazyWapitiBinaryWrapper
from sciencebeam_parser.lookup.loader import load_lookup_from_config
from sciencebeam_parser.models.data import AppFeaturesContext
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.document.semantic_document import (
SemanticDocument,
SemanticGraphic
)
from sciencebeam_parser.document.tei_document import get_tei_for_semantic_document
from sciencebeam_parser.processors.fulltext.models import FullTextModels
from sciencebeam_parser.resources.xslt import TEI_TO_JATS_XSLT_FILE
from sciencebeam_parser.transformers.doc_converter_wrapper import DocConverterWrapper
from sciencebeam_parser.transformers.xslt import XsltTransformerWrapper
from sciencebeam_parser.utils.lazy import LazyLoaded
from sciencebeam_parser.utils.media_types import (
MediaTypes
)
from sciencebeam_parser.utils.text import normalize_text
from sciencebeam_parser.utils.tokenizer import get_tokenized_tokens
from sciencebeam_parser.processors.fulltext.api import (
FullTextProcessor,
FullTextProcessorConfig,
FullTextProcessorDocumentContext,
load_models
)
LOGGER = logging.getLogger(__name__)
TEMP_ALTO_XML_FILENAME = 'temp.lxml'
DOC_TO_PDF_SUPPORTED_MEDIA_TYPES = {
MediaTypes.DOCX,
MediaTypes.DOTX,
MediaTypes.DOC,
MediaTypes.RTF
}
JATS_MEDIA_TYPES = {MediaTypes.JATS_XML, MediaTypes.JATS_ZIP}
ASSET_ZIP_MEDIA_TYPES = {MediaTypes.TEI_ZIP, MediaTypes.JATS_ZIP}
def normalize_and_tokenize_text(text: str) -> List[str]:
return get_tokenized_tokens(
normalize_text(text),
keep_whitespace=True
)
def normalize_layout_document(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return (
layout_document
.retokenize(tokenize_fn=normalize_and_tokenize_text)
.remove_empty_blocks(**kwargs)
)
def load_app_features_context(
config: AppConfig,
download_manager: DownloadManager
):
return AppFeaturesContext(
country_lookup=load_lookup_from_config(
config.get('lookup', {}).get('country'),
download_manager=download_manager
),
first_name_lookup=load_lookup_from_config(
config.get('lookup', {}).get('first_name'),
download_manager=download_manager
),
last_name_lookup=load_lookup_from_config(
config.get('lookup', {}).get('last_name'),
download_manager=download_manager
)
)
def create_asset_zip_for_semantic_document(
zip_filename: str,
semantic_document: SemanticDocument,
relative_xml_filename: str,
local_xml_filename: str
):
semantic_graphic_list = list(semantic_document.iter_by_type_recursively(
SemanticGraphic
))
LOGGER.debug('semantic_graphic_list: %r', semantic_graphic_list)
with ZipFile(zip_filename, 'w') as zip_file:
zip_file.write(
local_xml_filename,
relative_xml_filename
)
for semantic_graphic in semantic_graphic_list:
assert semantic_graphic.relative_path, \
"graphic relative_path missing, ensure extract_graphic_assets was enabled"
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
assert layout_graphic.local_file_path
zip_file.write(
layout_graphic.local_file_path,
semantic_graphic.relative_path
)
LOGGER.debug('response_content (bytes): %d', Path(zip_filename).stat().st_size)
def get_xml_tree(xml_root: etree.ElementBase) -> etree._ElementTree:
if isinstance(xml_root, etree._ElementTree): # pylint: disable=protected-access
# Note: _XSLTResultTree is extending _ElementTree
return xml_root
return etree.ElementTree(xml_root)
def serialize_xml_to_file(
xml_root: etree.ElementBase,
filename: str
):
get_xml_tree(xml_root).write(
filename,
encoding='utf-8',
pretty_print=False
)
@dataclass
class DocumentRequestParameters:
first_page: Optional[int] = None
last_page: Optional[int] = None
class ScienceBeamParserError(RuntimeError):
pass
class BadRequestScienceBeamParserError(ScienceBeamParserError):
pass
class UnsupportedRequestMediaTypeScienceBeamParserError(BadRequestScienceBeamParserError):
pass
class UnsupportedResponseMediaTypeScienceBeamParserError(BadRequestScienceBeamParserError):
pass
class ScienceBeamBaseParser:
def __init__(self, config: AppConfig):
self.config = config
self.download_manager = DownloadManager(
download_dir=get_download_dir(config)
)
self.pdfalto_wrapper = PdfAltoWrapper(
self.download_manager.download_if_url(config['pdfalto']['path'])
)
self.pdfalto_wrapper.ensure_executable()
self.app_context = AppContext(
app_config=config,
download_manager=self.download_manager,
lazy_wapiti_binary_wrapper=LazyWapitiBinaryWrapper(
install_url=config.get('wapiti', {}).get('install_source'),
download_manager=self.download_manager
)
)
self.fulltext_processor_config = FullTextProcessorConfig.from_app_config(app_config=config)
self.fulltext_models = load_models(
config,
app_context=self.app_context,
fulltext_processor_config=self.fulltext_processor_config
)
if config.get('preload_on_startup'):
self.fulltext_models.preload()
self.app_features_context = load_app_features_context(
config,
download_manager=self.download_manager
)
tei_to_jats_config = config.get('xslt', {}).get('tei_to_jats', {})
self.tei_to_jats_xslt_transformer = XsltTransformerWrapper.from_template_file(
TEI_TO_JATS_XSLT_FILE,
xslt_template_parameters=tei_to_jats_config.get('parameters', {})
)
self.doc_to_pdf_enabled = config.get('doc_to_pdf', {}).get('enabled', True)
self.doc_to_pdf_convert_parameters = config.get('doc_to_pdf', {}).get('convert', {})
self.doc_converter_wrapper = DocConverterWrapper(
**config.get('doc_to_pdf', {}).get('listener', {})
)
class ScienceBeamParserBaseSession:
def __init__(
self,
parser: 'ScienceBeamParser',
temp_dir: Optional[str] = None,
fulltext_processor_config: Optional[FullTextProcessorConfig] = None,
document_request_parameters: Optional[DocumentRequestParameters] = None
):
self.parser = parser
self.exit_stack = ExitStack()
self._temp_dir: Optional[str] = temp_dir
if fulltext_processor_config is None:
fulltext_processor_config = parser.fulltext_processor_config
self.fulltext_processor_config = fulltext_processor_config
if document_request_parameters is None:
document_request_parameters = DocumentRequestParameters()
self.document_request_parameters = document_request_parameters
def __enter__(self) -> 'ScienceBeamParserBaseSession':
return self
def close(self):
self.exit_stack.close()
def __exit__(self, exc, value, tb):
self.close()
@property
def temp_dir(self) -> str:
if not self._temp_dir:
temp_dir_context = TemporaryDirectory( # pylint: disable=consider-using-with
suffix='-sb-parser'
)
self.exit_stack.push(temp_dir_context)
self._temp_dir = temp_dir_context.__enter__()
return self._temp_dir
@property
def temp_path(self) -> Path:
return Path(self.temp_dir)
class _ScienceBeamParserSessionDerivative:
def __init__(
self,
session: 'ScienceBeamParserBaseSession'
):
self.session = session
@property
def parser(self) -> ScienceBeamBaseParser:
return self.session.parser
@property
def temp_dir(self) -> str:
return self.session.temp_dir
@property
def temp_path(self) -> Path:
return self.session.temp_path
class ScienceBeamParserSessionParsedSemanticDocument(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
semantic_document: SemanticDocument
):
super().__init__(session=session)
self.semantic_document = semantic_document
@property
def tei_to_jats_xslt_transformer(self) -> XsltTransformerWrapper:
return self.parser.tei_to_jats_xslt_transformer
def _get_tei_to_jats_xml_root(self, xml_root: etree.ElementBase) -> etree.ElementBase:
start = monotonic()
xml_root = self.tei_to_jats_xslt_transformer(xml_root)
end = monotonic()
LOGGER.info('tei to jats, took=%.3fs', end - start)
return xml_root
def _serialize_xml_to_file(
self,
xml_root: etree.ElementBase,
filename: str
) -> str:
start = monotonic()
serialize_xml_to_file(xml_root, filename=filename)
end = monotonic()
LOGGER.info('serializing xml, took=%.3fs', end - start)
return filename
def get_supported_response_media_type(self) -> Set[str]:
return {
MediaTypes.TEI_XML,
MediaTypes.TEI_ZIP,
MediaTypes.JATS_XML,
MediaTypes.JATS_ZIP
}
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type not in self.get_supported_response_media_type():
raise UnsupportedResponseMediaTypeScienceBeamParserError()
tei_document = get_tei_for_semantic_document(
self.semantic_document
)
xml_root = tei_document.root
relative_xml_filename = 'tei.xml'
if response_media_type in JATS_MEDIA_TYPES:
xml_root = self._get_tei_to_jats_xml_root(xml_root)
relative_xml_filename = 'jats.xml'
local_xml_filename = os.path.join(self.temp_dir, relative_xml_filename)
self._serialize_xml_to_file(xml_root, local_xml_filename)
LOGGER.debug('local_xml_filename: %r', local_xml_filename)
if response_media_type in ASSET_ZIP_MEDIA_TYPES:
zip_filename = os.path.join(self.temp_dir, 'results.zip')
create_asset_zip_for_semantic_document(
zip_filename,
semantic_document=self.semantic_document,
local_xml_filename=local_xml_filename,
relative_xml_filename=relative_xml_filename
)
return zip_filename
return local_xml_filename
class ScienceBeamParserSessionParsedLayoutDocument(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
layout_document: LayoutDocument,
pdf_path: str
):
super().__init__(session=session)
self.layout_document = layout_document
self.pdf_path = pdf_path
@property
def fulltext_models(self) -> FullTextModels:
return self.parser.fulltext_models
@property
def app_features_context(self) -> AppFeaturesContext:
return self.parser.app_features_context
def _get_semantic_document(
self,
fulltext_processor: FullTextProcessor
) -> SemanticDocument:
context = FullTextProcessorDocumentContext(
pdf_path=self.pdf_path,
temp_dir=self.temp_dir
)
semantic_document = (
fulltext_processor
.get_semantic_document_for_layout_document(
self.layout_document,
context=context
)
)
return semantic_document
def get_parsed_semantic_document(
self,
fulltext_processor_config: Optional[FullTextProcessorConfig] = None
) -> ScienceBeamParserSessionParsedSemanticDocument:
if fulltext_processor_config is None:
fulltext_processor_config = self.session.fulltext_processor_config
fulltext_processor = FullTextProcessor(
self.fulltext_models,
app_features_context=self.app_features_context,
config=fulltext_processor_config
)
return ScienceBeamParserSessionParsedSemanticDocument(
self.session,
self._get_semantic_document(fulltext_processor)
)
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type == MediaTypes.PDF:
return self.pdf_path
fulltext_processor_config = self.session.fulltext_processor_config
if response_media_type in ASSET_ZIP_MEDIA_TYPES:
fulltext_processor_config = (
fulltext_processor_config
._replace(
extract_graphic_assets=True,
extract_graphic_bounding_boxes=True
)
)
assert fulltext_processor_config.extract_graphic_assets, \
"extract_graphic_assets required for asset zip"
return (
self.get_parsed_semantic_document(
fulltext_processor_config
).get_local_file_for_response_media_type(
response_media_type
)
)
class ScienceBeamParserSessionSource(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
source_path: str,
source_media_type: str
):
super().__init__(session=session)
self.source_path = source_path
self.source_media_type = source_media_type
self.lazy_pdf_path = LazyLoaded[str](self._get_or_convert_to_pdf_path)
self.lazy_alto_xml_path = LazyLoaded[str](self._parse_to_alto_xml)
self.lazy_parsed_layout_document = LazyLoaded[
ScienceBeamParserSessionParsedLayoutDocument
](self._parse_to_parsed_layout_document)
@property
def parser(self) -> ScienceBeamBaseParser:
return self.session.parser
@property
def doc_to_pdf_enabled(self) -> bool:
return self.parser.doc_to_pdf_enabled
@property
def doc_converter_wrapper(self) -> DocConverterWrapper:
return self.parser.doc_converter_wrapper
@property
def doc_to_pdf_convert_parameters(self) -> dict:
return self.parser.doc_to_pdf_convert_parameters
@property
def pdfalto_wrapper(self) -> PdfAltoWrapper:
return self.parser.pdfalto_wrapper
@property
def document_request_parameters(self) -> DocumentRequestParameters:
return self.session.document_request_parameters
def _get_or_convert_to_pdf_path(
self
) -> str:
LOGGER.info(
'media_type=%r (filename=%r)',
self.source_media_type,
self.source_path
)
if self.source_media_type in DOC_TO_PDF_SUPPORTED_MEDIA_TYPES:
if not self.doc_to_pdf_enabled:
LOGGER.info('doc to pdf not enabled')
raise UnsupportedRequestMediaTypeScienceBeamParserError(
'doc to pdf not enabled'
)
target_temp_file = self.doc_converter_wrapper.convert(
self.source_path,
**self.doc_to_pdf_convert_parameters
)
return target_temp_file
if self.source_media_type != MediaTypes.PDF:
raise UnsupportedRequestMediaTypeScienceBeamParserError(
'unsupported media type: %r' % self.source_media_type
)
return self.source_path
def _parse_to_alto_xml(self) -> str:
output_path = os.path.join(self.temp_dir, TEMP_ALTO_XML_FILENAME)
self.pdfalto_wrapper.convert_pdf_to_pdfalto_xml(
str(self.lazy_pdf_path.get()),
str(output_path),
first_page=self.document_request_parameters.first_page,
last_page=self.document_request_parameters.last_page
)
return output_path
def _parse_to_parsed_layout_document(
self
) -> ScienceBeamParserSessionParsedLayoutDocument:
pdf_path = self.lazy_pdf_path.get()
root = etree.parse(self.lazy_alto_xml_path.get())
layout_document = normalize_layout_document(
parse_alto_root(root),
preserve_empty_pages=True
)
return ScienceBeamParserSessionParsedLayoutDocument(
self.session,
layout_document=layout_document,
pdf_path=pdf_path
)
def get_parsed_layout_document(self) -> ScienceBeamParserSessionParsedLayoutDocument:
return self.lazy_parsed_layout_document.get()
def get_layout_document(self) -> LayoutDocument:
return self.get_parsed_layout_document().layout_document
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type == MediaTypes.PDF:
return self.lazy_pdf_path.get()
if response_media_type == MediaTypes.ALTO_XML:
return self.lazy_alto_xml_path.get()
return self.get_parsed_layout_document().get_local_file_for_response_media_type(
response_media_type
)
class ScienceBeamParserSession(ScienceBeamParserBaseSession):
def __enter__(self) -> 'ScienceBeamParserSession':
super().__enter__()
return self
def get_source(
self,
source_path: str,
source_media_type: str
) -> ScienceBeamParserSessionSource:
return ScienceBeamParserSessionSource(
self,
source_path=source_path,
source_media_type=source_media_type
)
class ScienceBeamParser(ScienceBeamBaseParser):
@staticmethod
def from_config(config: AppConfig) -> 'ScienceBeamParser':
return ScienceBeamParser(config)
def get_new_session(self, **kwargs) -> ScienceBeamParserSession:
return ScienceBeamParserSession(self, **kwargs) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/app/parser.py | 0.703142 | 0.154951 | parser.py | pypi |
import mimetypes
from typing import Optional, Sequence
class MediaTypes:
"""
Media Types used by ScienceBeam Parser.
Where possible, these correspond to official media types.
In some instances, no official media type is defined yet.
"""
PDF = 'application/pdf'
DOC = 'application/msword'
DOCX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
DOTX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.template'
RTF = 'application/rtf'
XML = 'application/xml'
ZIP = 'application/zip'
TEI_XML = 'application/tei+xml'
JATS_XML = 'application/vnd.jats+xml'
TEI_ZIP = 'application/tei+xml+zip'
JATS_ZIP = 'application/vnd.jats+xml+zip'
ALTO_XML = 'application/vnd.alto+xml'
JSON = 'application/json'
OCTET_STREAM = 'application/octet-stream'
WILDCARD_MEDIA_TYPE = '*/*'
MEDIA_TYPE_SUFFIX_MAP = {
# fixed mime type suffix map (which may be incorrectly defined in Python 3.5)
MediaTypes.DOC: '.doc',
# additional types
MediaTypes.TEI_XML: '.tei.xml',
MediaTypes.JATS_XML: '.jats.xml',
MediaTypes.TEI_ZIP: '.tei.zip',
MediaTypes.JATS_ZIP: '.jats.zip'
}
def guess_extension_for_media_type(media_type: str) -> Optional[str]:
ext = MEDIA_TYPE_SUFFIX_MAP.get(media_type)
if not ext:
ext = mimetypes.guess_extension(media_type)
return ext
def guess_media_type_for_filename(filename: str) -> Optional[str]:
return mimetypes.guess_type(filename)[0]
def get_first_matching_media_type(
accept_media_types: Sequence[str],
available_media_types: Sequence[str]
) -> Optional[str]:
if not available_media_types:
return None
if not accept_media_types:
return available_media_types[0]
for accept_media_type in accept_media_types:
if accept_media_type == WILDCARD_MEDIA_TYPE:
return available_media_types[0]
for available_media_type in available_media_types:
if accept_media_type == available_media_type:
return available_media_type
return None | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/utils/media_types.py | 0.699152 | 0.204501 | media_types.py | pypi |
import logging
import re
from itertools import zip_longest
from typing import Mapping, NamedTuple, Optional, Sequence, Tuple, Union
from lxml import etree
from lxml.builder import ElementMaker
LOGGER = logging.getLogger(__name__)
class TagExpression(NamedTuple):
tag: str
attrib: Mapping[str, str]
def create_node(self, *args, element_maker: ElementMaker):
try:
return element_maker(self.tag, self.attrib, *args)
except ValueError as exc:
raise ValueError(
'failed to create node with tag=%r, attrib=%r due to %s' % (
self.tag, self.attrib, exc
)
) from exc
def parse_tag_expression(tag_expression: str) -> TagExpression:
match = re.match(r'^([^\[]+)(\[@?([^=]+)="(.+)"\])?$', tag_expression)
if not match:
raise ValueError('invalid tag expression: %s' % tag_expression)
LOGGER.debug('match: %s', match.groups())
tag_name = match.group(1)
if match.group(2):
attrib = {match.group(3): match.group(4)}
else:
attrib = {}
return TagExpression(tag=tag_name, attrib=attrib)
def _get_last_child_or_none(element: etree.ElementBase) -> Optional[etree.ElementBase]:
try:
return element[-1]
except IndexError:
return None
def _append_text(element: etree.ElementBase, text: Optional[str]) -> None:
if not text:
return
last_child = _get_last_child_or_none(element)
if last_child is not None and last_child.tail:
last_child.tail = last_child.tail + '' + text
elif last_child is not None:
last_child.tail = text
elif element.text:
element.text = element.text + '' + text
else:
element.text = text
def _get_common_path(path1: Sequence[str], path2: Sequence[str]) -> Sequence[str]:
if path1 == path2:
return path1
common_path = []
for path1_element, path2_element in zip_longest(path1, path2):
if path1_element != path2_element:
break
common_path.append(path1_element)
return common_path
def _get_element_at_path(
current_element: etree.ElementBase,
current_path: Sequence[str],
required_path: Sequence[str],
element_maker: ElementMaker
) -> Tuple[etree.ElementBase, Sequence[str]]:
if required_path != current_path:
common_path = _get_common_path(current_path, required_path)
LOGGER.debug(
'required element path: %s -> %s (common path: %s)',
current_path, required_path, common_path
)
for _ in range(len(current_path) - len(common_path)):
current_element = current_element.getparent()
current_path = list(common_path)
for path_fragment in required_path[len(common_path):]:
try:
parsed_path_fragment = parse_tag_expression(path_fragment)
child = parsed_path_fragment.create_node(
element_maker=element_maker
)
except ValueError as exc:
raise ValueError('failed to create node for %r due to %s' % (
path_fragment, exc
)) from exc
current_element.append(child)
current_element = child
current_path.append(path_fragment)
return current_element, current_path
class XmlTreeWriter:
def __init__(
self,
parent: etree.ElementBase,
element_maker: ElementMaker
):
self.current_element = parent
self.current_path: Sequence[str] = []
self.element_maker = element_maker
@property
def root(self) -> etree.ElementBase:
return self.current_element.getroottree().getroot()
def append_text(self, text: str):
_append_text(self.current_element, text)
def append(self, element_or_text: Union[etree.ElementBase, str]):
if isinstance(element_or_text, str):
self.append_text(element_or_text)
else:
self.current_element.append(element_or_text)
def append_all(self, *element_or_text_list: Sequence[Union[etree.ElementBase, str]]):
for element_or_text in element_or_text_list:
self.append(element_or_text)
def require_path(self, required_path: Sequence[str]):
self.current_element, self.current_path = _get_element_at_path(
self.current_element, self.current_path,
required_path,
element_maker=self.element_maker
)
def require_path_or_below(self, required_path: Sequence[str]):
self.require_path(
_get_common_path(self.current_path, required_path)
) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/utils/xml_writer.py | 0.72331 | 0.179064 | xml_writer.py | pypi |
import os
import codecs
from contextlib import contextmanager
from typing import Iterable, Sequence
from urllib.parse import urlparse
import fsspec
from sciencebeam_trainer_delft.utils.io import (
auto_uploading_output_file as _auto_uploading_output_file,
is_external_location,
open_file
)
def get_file_system_protocol_for_url(url: str) -> fsspec.AbstractFileSystem:
parsed_url = urlparse(url)
return parsed_url.scheme or 'file'
def get_file_system_for_url(url: str) -> fsspec.AbstractFileSystem:
return fsspec.filesystem(get_file_system_protocol_for_url(url))
def get_file_system_protocols(
fs: fsspec.AbstractFileSystem
) -> Sequence[str]:
return (fs.protocol,) if isinstance(fs.protocol, str) else fs.protocol
def get_file_system_default_protocol(
fs: fsspec.AbstractFileSystem
) -> str:
return get_file_system_protocols(fs)[0]
def get_fully_qualified_path_for_protocol_and_path(
protocol: str,
path: str
) -> str:
if 'file' in protocol:
return path
return f'{protocol}://{path}'
def iter_fully_qualified_paths_for_protocol_and_paths(
protocol: str,
paths: Iterable[str]
) -> Iterable[str]:
return (
get_fully_qualified_path_for_protocol_and_path(protocol, path)
for path in paths
)
def get_fully_qualified_path_for_fs_and_path(
fs: fsspec.AbstractFileSystem,
path: str
) -> str:
return get_fully_qualified_path_for_protocol_and_path(
get_file_system_default_protocol(fs),
path
)
def glob(
glob_pattern: str
) -> Sequence[str]:
protocol = get_file_system_protocol_for_url(glob_pattern)
fs: fsspec.AbstractFileSystem = fsspec.filesystem(protocol)
return list(iter_fully_qualified_paths_for_protocol_and_paths(
protocol,
fs.glob(glob_pattern)
))
def makedirs(
path: str,
exist_ok: bool = False
):
get_file_system_for_url(path).makedirs(path, exist_ok=exist_ok)
@contextmanager
def auto_uploading_binary_output_file(filepath: str, **kwargs):
if not is_external_location(filepath):
# Note: the upstream implementation doesn't currently auto-compress local files
file_dirname = os.path.dirname(filepath)
if file_dirname:
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open_file(filepath, mode='wb', **kwargs) as fp:
yield fp
return
with _auto_uploading_output_file(filepath, 'wb', **kwargs) as fp:
yield fp
@contextmanager
def auto_uploading_text_output_file(filepath: str, encoding: str, **kwargs):
with auto_uploading_binary_output_file(filepath, **kwargs) as fp:
yield codecs.getwriter(encoding)(fp)
def auto_uploading_output_file(filepath: str, mode: str, encoding: str = 'utf-8', **kwargs):
if mode == 'w':
return auto_uploading_text_output_file(filepath, encoding=encoding, **kwargs)
if mode == 'wb':
return auto_uploading_binary_output_file(filepath, **kwargs)
raise ValueError('invalid mode: %r' % mode)
def write_bytes(filepath: str, data: bytes, **kwargs):
with auto_uploading_output_file(filepath, mode='wb', **kwargs) as fp:
fp.write(data)
def write_text(filepath: str, text: str, encoding: str, **kwargs):
# Note: the upstream implementation doesn't support encoding with compression
write_bytes(
filepath,
codecs.encode(text, encoding=encoding),
**kwargs
) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/utils/io.py | 0.659076 | 0.151467 | io.py | pypi |
import argparse
import logging
import os
from typing import Iterable, List, Optional, Sequence, Tuple
from lxml import etree
from sciencebeam_trainer_delft.utils.io import (
auto_download_input_file
)
from sciencebeam_trainer_delft.sequence_labelling.reader import (
load_data_crf_lines
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
iter_format_tag_result
)
from sciencebeam_parser.utils.io import (
auto_uploading_output_file,
glob
)
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutDocument
)
from sciencebeam_parser.models.data import (
DocumentFeaturesContext,
LabeledLayoutToken,
ModelDataGenerator
)
from sciencebeam_parser.models.training_data import TrainingTeiParser
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.app.parser import ScienceBeamParser
LOGGER = logging.getLogger(__name__)
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
'ScienceBeam Parser: Generate DELFT Training Data'
)
parser.add_argument(
'--model-name',
type=str,
required=True
)
parser.add_argument(
'--tei-source-path',
type=str,
required=True
)
parser.add_argument(
'--raw-source-path',
type=str,
required=False
)
parser.add_argument(
'--delft-output-path',
type=str,
required=True
)
parser.add_argument(
'--debug',
action='store_true',
help='Enable debug logging'
)
return parser.parse_args(argv)
def translate_tags_IOB_to_grobid(tag: str) -> str:
"""
Convert labels from IOB2 to the ones used by GROBID (expected by the wapiti model)
"""
if tag == 'O':
# outside
return '<other>'
if tag.startswith('B-'):
# begin
return 'I-' + tag[2:]
if tag.startswith('I-'):
# inside
return '' + tag[2:]
return tag
def translate_tag_result_tags_IOB_to_grobid(
tag_result: Sequence[Sequence[Tuple[str, str]]]
) -> List[List[Tuple[str, str]]]:
return [
[
(token_text, translate_tags_IOB_to_grobid(tag))
for token_text, tag in doc_tag_result
]
for doc_tag_result in tag_result
]
def get_tag_result_for_labeled_layout_tokens_list(
labeled_layout_tokens_list: Sequence[Sequence[LabeledLayoutToken]]
) -> List[List[Tuple[str, str]]]:
return [
[
(
labeled_layout_token.layout_token.text,
labeled_layout_token.label
)
for labeled_layout_token in labeled_layout_tokens
]
for labeled_layout_tokens in labeled_layout_tokens_list
]
def get_raw_file_for_tei_file(
tei_file: str,
raw_source_path: str
) -> str:
compression_suffix = ''
if tei_file.endswith('.gz'):
compression_suffix = '.gz'
tei_file = tei_file[:-len(compression_suffix)]
tei_suffix = '.tei.xml'
assert tei_file.endswith(tei_suffix)
return os.path.join(
raw_source_path,
os.path.basename(tei_file[:-len(tei_suffix)] + compression_suffix)
)
def get_raw_file_list_for_tei_file_list(
tei_file_list: Iterable[str],
raw_source_path: str
) -> Sequence[str]:
return [
get_raw_file_for_tei_file(tei_file, raw_source_path=raw_source_path)
for tei_file in tei_file_list
]
def get_training_tei_parser_for_model_name(
model_name: str,
sciencebeam_parser: ScienceBeamParser
) -> TrainingTeiParser:
model = sciencebeam_parser.fulltext_models.get_sequence_model_by_name(model_name)
try:
training_tei_parser = model.get_training_tei_parser()
assert training_tei_parser is not None
return training_tei_parser
except NotImplementedError as exc:
training_tei_parser = None
raise RuntimeError('unsupported model: %r' % model_name) from exc
def get_data_generator_for_model_name(
model_name: str,
sciencebeam_parser: ScienceBeamParser
) -> ModelDataGenerator:
model = sciencebeam_parser.fulltext_models.get_sequence_model_by_name(model_name)
return model.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=sciencebeam_parser.app_features_context
)
)
def iter_generate_delft_training_data_lines_for_document( # pylint: disable=too-many-locals
tei_file: str,
raw_file: Optional[str],
training_tei_parser: TrainingTeiParser,
data_generator: ModelDataGenerator
) -> Iterable[str]:
with auto_download_input_file(
tei_file,
auto_decompress=True
) as local_tei_file:
tei_root = etree.parse(local_tei_file).getroot()
labeled_layout_tokens_list = (
training_tei_parser.parse_training_tei_to_labeled_layout_tokens_list(
tei_root
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
translated_tag_result = translate_tag_result_tags_IOB_to_grobid(
get_tag_result_for_labeled_layout_tokens_list(
labeled_layout_tokens_list
)
)
LOGGER.debug('translated_tag_result: %r', translated_tag_result)
if raw_file:
with auto_download_input_file(
raw_file,
auto_decompress=True
) as local_raw_file:
with open(local_raw_file, 'r', encoding='utf-8') as raw_fp:
texts, features = load_data_crf_lines(
raw_fp
)
assert len(texts) == len(translated_tag_result)
for doc_tokens, doc_tag_result in zip(texts, translated_tag_result):
assert len(doc_tokens) == len(doc_tag_result)
else:
layout_documents = [
LayoutDocument.for_blocks([
LayoutBlock.for_tokens([
labeled_layout_token.layout_token
for labeled_layout_token in labeled_layout_tokens
])
])
for labeled_layout_tokens in labeled_layout_tokens_list
]
LOGGER.debug('layout_documents: %r', layout_documents)
data_line_iterable = list(data_generator.iter_data_lines_for_layout_documents(
layout_documents
))
_texts, features = load_data_crf_lines(data_line_iterable)
LOGGER.debug('features: %r', features)
yield from iter_format_tag_result(
tag_result=translated_tag_result,
output_format=TagOutputFormats.DATA,
texts=None,
features=features
)
def generate_delft_training_data(
model_name: str,
tei_source_path: str,
raw_source_path: str,
delft_output_path: str,
sciencebeam_parser: ScienceBeamParser
):
training_tei_parser = get_training_tei_parser_for_model_name(
model_name,
sciencebeam_parser=sciencebeam_parser
)
data_generator = get_data_generator_for_model_name(
model_name,
sciencebeam_parser=sciencebeam_parser
)
LOGGER.debug('tei_source_path: %r', tei_source_path)
tei_file_list = glob(tei_source_path)
if not tei_file_list:
raise RuntimeError('no files found for file pattern %r' % tei_source_path)
LOGGER.info('tei_file_list: %r', tei_file_list)
if raw_source_path:
raw_file_list: Sequence[Optional[str]] = get_raw_file_list_for_tei_file_list(
tei_file_list,
raw_source_path=raw_source_path
)
else:
raw_file_list = [None] * len(tei_file_list)
LOGGER.info('raw_file_list: %r', raw_file_list)
LOGGER.info('writing to : %r', delft_output_path)
with auto_uploading_output_file(
delft_output_path,
mode='w',
encoding='utf-8',
) as data_fp:
for document_index, (tei_file, raw_file) in enumerate(zip(tei_file_list, raw_file_list)):
if document_index > 0:
data_fp.write('\n\n')
data_fp.writelines(iter_generate_delft_training_data_lines_for_document(
tei_file=tei_file,
raw_file=raw_file,
training_tei_parser=training_tei_parser,
data_generator=data_generator
))
def run(args: argparse.Namespace):
LOGGER.info('args: %r', args)
config = AppConfig.load_yaml(
DEFAULT_CONFIG_FILE
)
sciencebeam_parser = ScienceBeamParser.from_config(config)
generate_delft_training_data(
model_name=args.model_name,
tei_source_path=args.tei_source_path,
raw_source_path=args.raw_source_path,
delft_output_path=args.delft_output_path,
sciencebeam_parser=sciencebeam_parser
)
def main(argv: Optional[List[str]] = None):
LOGGER.debug('argv: %r', argv)
args = parse_args(argv)
if args.debug:
for name in [__name__, 'sciencebeam_parser', 'sciencebeam_trainer_delft']:
logging.getLogger(name).setLevel('DEBUG')
run(args)
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main() | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/training/cli/generate_delft_data.py | 0.68215 | 0.190253 | generate_delft_data.py | pypi |
import logging
from typing import Any, Mapping, Optional, Union
from lxml import etree
LOGGER = logging.getLogger(__name__)
T_XSLT_Input = Union[etree.ElementBase, etree.ElementTree]
class XsltTransformerWrapper:
def __init__(
self,
xslt_template: str,
xslt_template_parameters: Optional[Mapping[str, Any]] = None
):
self.xslt_template = xslt_template
if xslt_template_parameters is None:
xslt_template_parameters = {}
self.xslt_template_parameters = xslt_template_parameters
self.__transformer: Optional[etree.XSLT] = None
# validate the XSLT stylesheet
etree.fromstring(self.xslt_template)
@staticmethod
def from_template_string(xslt_template: str, **kwargs) -> 'XsltTransformerWrapper':
return XsltTransformerWrapper(xslt_template, **kwargs)
@staticmethod
def from_template_file(xslt_template_file: str, **kwargs) -> 'XsltTransformerWrapper':
return XsltTransformerWrapper.from_template_string(
etree.tostring(etree.parse(xslt_template_file)),
**kwargs
)
def _get_transformer(self) -> etree.XSLT:
if self.__transformer is None:
# The transform function cannot be pickled and needs to be loaded lazily
transform = etree.XSLT(
etree.fromstring(self.xslt_template)
)
self.__transformer = transform
return self.__transformer
def __call__(
self,
xslt_input: T_XSLT_Input,
xslt_template_parameters: Optional[Mapping[str, Any]] = None
):
xslt_template_parameters = {
**self.xslt_template_parameters,
**(xslt_template_parameters or {})
}
LOGGER.debug(
'xslt_input: %r (xslt_template_parameters=%r)',
xslt_input, xslt_template_parameters
)
_xslt_transformer = self._get_transformer()
return _xslt_transformer(
xslt_input,
**{
key: etree.XSLT.strparam(value)
for key, value in xslt_template_parameters.items()
}
) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/transformers/xslt.py | 0.848659 | 0.244386 | xslt.py | pypi |
# ScienceBeam Trainer DeLFT
Work in-progress..
A thin(ish) wrapper around [DeLFT](https://github.com/kermitt2/delft) to enable training in the cloud.
Some of the main features:
- resources (model, data etc.) can be loaded from remote sources, currently:
- HTTP (`https://`, `http://`)
- Google Storage (`gs://`)
- resources can be saved to remote buckets, currently:
- Google Storage (`gs://`)
- on-demand embedding download
- Docker container(s)
- Support for Wapiti models
## Prerequisites
- Python 3
When using [pyenv](https://github.com/pyenv/pyenv),
you may need `libsqlite3-dev` and have Python installed with the `--enable-shared` flag.
For example:
```bash
apt-get install libsqlite3-dev
```
```bash
PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install --force 3.7.9
```
## Example Notebooks
- [train-header.ipynb](notebooks/train-header.ipynb) ([open in colab](https://colab.research.google.com/github/elifesciences/sciencebeam-trainer-delft/blob/develop/notebooks/train-header.ipynb))
## GROBID Docker Image with DeLFT
The Docker image `elifesciences/sciencebeam-trainer-delft-grobid_unstable`
can be used in-place of the main GROBID image.
It includes DeLFT (currently with CPU support only).
There are several ways to change the configuration or override models.
### Override Models using Docker Image
The `OVERRIDE_MODELS` or `OVERRIDE_MODEL_*` environment variables allow models to be overriden. Both environment variables are equivallent. `OVERRIDE_MODELS` is meant for overriding multiple models via a single environment variable (separated by `|`), whereas `OVERRIDE_MODEL_*` can be used to specify each model separately.
```bash
docker run --rm \
--env "OVERRIDE_MODELS=segmentation=/path/to/segmentation-model|header=/path/to/header-model" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
```
or:
```bash
docker run --rm \
--env "OVERRIDE_MODEL_1=segmentation=/path/to/segmentation-model" \
--env "OVERRIDE_MODEL_2=header=/path/to/header-model" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
```
e.g.:
```bash
docker run --rm \
--env "OVERRIDE_MODEL_1=header=https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/delft-grobid-header-biorxiv-no-word-embedding-2020-05-05.tar.gz" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
```
This functionality is mainly intended for loading models from a compressed file or bucket, such as Google Storage or S3 (you may also need to mount the relevant credentials).
## GROBID Trainer CLI
The GROBID Trainer CLI is the equivallent to [DeLFT's grobidTagger](https://github.com/kermitt2/delft/blob/master/grobidTagger.py). That is the main interface to interact with this project.
To get a list of all of the available parameters:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer --help
```
### Using Docker Image
```bash
docker run --rm elifesciences/sciencebeam-trainer-delft_unstable \
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer --help
```
### Train Sub Command
Training a model comes with many parameters. The following is an example to run the training without recommending parameters.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
An example command using more configurable parameters:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--char-embedding-size="11" \
--char-lstm-units="12" \
--char-input-mask-zero \
--char-input-dropout="0.3" \
--char-lstm-dropout="0.3" \
--max-char-length="13" \
--word-lstm-units="14" \
--dropout="0.1" \
--recurrent-dropout="0.2" \
--max-epoch="50"
```
### Train Eval Sub Command
The `train_eval` sub command is combining the `train` and `eval` command. It is reserving a slice of the input for the evaluation.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
If you rather want to provide separate evaluation data:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--eval-limit="100" \
--eval-max-sequence-length="100" \
--eval-input-window-stride="90" \
--early-stopping-patience="3" \
--max-epoch="50"
```
You can also train without using word embedding:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
### Train with layout features
Layout features are additional features provided with each token, e.g. whether it's the start of the line.
The model needs to support using such features. The following models do:
- `BidLSTM_CRF_FEATURES`
- `CustomBidLSTM_CRF`
- `CustomBidLSTM_CRF_FEATURES`
The features are generally provided. Some of the features are not suitable as input features because there are too many of them (e.g. a variation of the token itself). The features should be specified via `--features-indices`. The `input_info` sub command can help identify useful feature ranges (based on the count of unique values).
Example commands:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="BidLSTM_CRF_FEATURES" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="5" \
--features-lstm-units="7" \
--early-stopping-patience="10" \
--max-epoch="50"
```
or
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF_FEATURES" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="5" \
--features-lstm-units="7" \
--early-stopping-patience="10" \
--max-epoch="50"
```
or
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="0" \
--features-lstm-units="0" \
--early-stopping-patience="10" \
--max-epoch="50"
```
By default features are assumed to be categorical.
But features may also be [continuous](https://en.wikipedia.org/wiki/Continuous_or_discrete_variable).
Those values can be specified via the `--continuous-features-indices` parameter.
In that case they will automatically be part of the `features` and do not need to specified separately.
Continuous features will get [min-max scaled](https://en.wikipedia.org/wiki/Feature_scaling).
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-segmentation.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF" \
--use-features \
--features-indices="6-11" \
--continuous-features-indices="22,23,26" \
--features-embedding-size="0" \
--features-lstm-units="0" \
--early-stopping-patience="10" \
--max-epoch="50"
```
### Training with additional text features
Layout features may also contain additional token or text features.
For example the default GROBID *segmentation* model uses one data row for the whole line. With the first token being the main token, and the second token of the line being the the first feature (index `0`).
Train with additional token features:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--additional-token-feature-indices="0" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-segmentation.train.gz" \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
Additionally, a ScienceBeam modifcation of the GROBID *segmentation* model also contains a text feature containing the whole line (further details below).
Train with text features (using three tokens for word embeddings):
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--text-feature-indices="32" \
--concatenated-embeddings-token-count="3" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-1927-delft-segmentation-with-text-feature-32.train.gz" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--limit="100" \
--eval-limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
In the [referenced training data](https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-1927-delft-segmentation-with-text-feature-32.train.gz), the last feature (`32`) represents the whole line (using non-breaking spaces instead of spaces). To use the model with GROBID, that [feature would need to be enabled](https://github.com/elifesciences/grobid/pull/25).
The same text feature also allows us to explore, whether the model would perform better,
if each token within the text feature was a separate token (data row).
In that case one would specify `--unroll-text-feature-index` with the token index of the text feature
that should get re-tokenized and "unrolled". The features and labels will get copied.
Another feature will get added with the *line status* (`LINESTART`, `LINEIN`, `LINEEND`) - feature index `33` in the example below.
Where the label has a beginning prefix (`B-`), it will get converted to an inside prefix (`I-`) for the remaining tokens
(see [IOB format](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging))).
At the prediction time, the model will receive the "unrolled" data, wheras the original data will get returned,
with the majority label for that line (majority without prefix, a beginning prefix will be used if the label has changed).
Example:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--unroll-text-feature-index="32" \
--use-features \
--feature-indices="6-11,33" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-1927-delft-segmentation-with-text-feature-32.train.gz" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--limit="100" \
--eval-batch-size="1" \
--eval-limit="10" \
--eval-max-sequence-length="100" \
--early-stopping-patience="10" \
--max-epoch="50"
```
To inspect the unrolled predictions further, it is also possible to use the `tag` sub command using
`--tag-transformed`.
That flag will only make a difference for models already trained using the aforementioned
`--unroll-text-feature-index` parameter.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--tag-transformed \
--batch-size="16" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--model-path="data/models/sequenceLabelling/grobid-segmentation" \
--limit="2" \
--tag-output-format="data_unidiff" \
--tag-output-path="/tmp/test.diff"
```
### Resume training
Sometimes it can be useful to continue training a model.
For example an exception was thrown after epoch 42, you could continue training from the last checkpoint.
Or you want to fine-tune an existing model by training it on new data.
Note: the model configuration will be loaded from the checkpoint
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--resume-train-model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--initial-epoch="10" \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--resume-train-model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--initial-epoch="10" \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--eval-limit="100" \
--eval-batch-size="5" \
--early-stopping-patience="3" \
--max-epoch="50"
```
### Auto-resume training
As detailed in the previous section "Resume training",
there are situations where resuming training can be useful.
In particular, when the training process itself is automatically restarted,
then it is usually preferable to resume training rather than start it from
the beginning. By adding the `--auto-resume` flag, the training will be resume from the
the last saved checkpoint. Not surprisingly, saving checkpoints need to be enabled as well.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--auto-resume \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--checkpoint="./data/checkpoints/header-model" \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
### Transfer learning (experimental)
A limited form of transfer learning is also possible by copying selected layers from a previously trained model. e.g.:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--transfer-source-model-path="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/2020-10-04-delft-grobid-header-biorxiv-no-word-embedding.tar.gz" \
--transfer-copy-layers="char_embeddings=char_embeddings|char_lstm=char_lstm|word_lstm=word_lstm|word_lstm_dense=word_lstm_dense" \
--transfer-copy-preprocessor-fields="vocab_char,feature_preprocessor" \
--transfer-freeze-layers="char_embeddings,char_lstm,word_lstm" \
--batch-size="16" \
--architecture="CustomBidLSTM_CRF" \
--no-embedding \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-header.train.gz" \
--limit="1000" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-header.test.gz" \
--eval-limit="100" \
--max-sequence-length="1000" \
--eval-batch-size="5" \
--early-stopping-patience="3" \
--word-lstm-units="200" \
--use-features \
--feature-indices="9-25" \
--max-epoch="50"
```
Or transfer character weights from a different GROBID model:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
affiliation-address \
train_eval \
--transfer-source-model-path="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/2020-10-04-delft-grobid-header-biorxiv-no-word-embedding.tar.gz" \
--transfer-copy-layers="char_embeddings=char_embeddings|char_lstm=char_lstm" \
--transfer-copy-preprocessor-fields="vocab_char" \
--transfer-freeze-layers="char_embeddings,char_lstm" \
--batch-size="32" \
--architecture="CustomBidLSTM_CRF" \
--no-embedding \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-affiliation-address.train.gz" \
--limit="1000" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-affiliation-address.test.gz" \
--eval-limit="100" \
--max-sequence-length="100" \
--eval-batch-size="5" \
--early-stopping-patience="5" \
--word-lstm-units="20" \
--max-epoch="50"
```
### Training very long sequences
Some training sequences can be very long and may exceed the available memory. This is in particular an issue when training the sequences.
Some approches to deal with the issue.
#### Truncate the sequences to a maximum length
By passing in the `--max-sequence-length`, sequences are being truncated.
In that case the model will not be trained on any data beyond the max sequence length.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
#### Training using [truncated BPTT](https://en.wikipedia.org/wiki/Backpropagation_through_time#Pseudocode) (Backpropagation through time)
This requires the LSTMs to be *stateful* (the state from the previous batch is passed on to the next). The `--stateful` flag should be passed in, and the `--input-window-stride` should be the same as `--max-sequence-length`
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input-window-stride="100" \
--stateful \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
Unfortunately the current implementation is very slow and training time might increase significantly.
#### Training using window slices
The alternative to the above is to not use *stateful* LSTMs but still pass in the input data using sliding windows.
To do that, do not pass `--stateful`. But use `--input-window-stride` which is equal or less to `--max-sequence-length`.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input-window-stride="50" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
This will not allow the LSTM to capture long term dependencies beyond the max sequence length but it will allow it to have seen all of the data, in chunks. Therefore max sequence length should be large enough, which depends on the model.
### Eval Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
eval \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="10" \
--quiet
```
The evaluation format can be changed to `json` using the `--eval-output-format`.
It can also be saved using `--eval-output-path`.
### Tag Sub Command
The `tag` sub command supports multiple output formats (`--tag-output-path`):
- `json`: more detailed tagging output
- `data`: data output with features but label being replaced by predicted label
- `text`: not really a tag output as it just outputs the input text
- `xml`: uses predicted labels as XML elements
- `xml_diff`: same as `xml` but it is showing a diff between expected and predicted results
The output will be written to the path specified via `--tag-output-path` if present. Otherwise it will be written to *stdout*.
#### XML Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="xml" \
--quiet
```
With the result:
```xml
<xml>
<p>
<title>Markov Chain Algorithms for Planar Lattice Structures</title>
<author>Michael Luby y Dana Randall z Alistair Sinclair</author>
<abstract>Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 񮽙 2n chessboard : starting from some arbitrary tiling , pick a 2 񮽙 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the 񮽙rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .</abstract>
</p>
</xml>
```
#### XML Diff Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="2" \
--tag-output-format="xml_diff" \
--quiet
```
With the result (the second document contains differences):
```xml
<xml>
<p>
<title>Markov Chain Algorithms for Planar Lattice Structures</title>
<author>Michael Luby y Dana Randall z Alistair Sinclair</author>
<abstract>Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 2n chessboard : starting from some arbitrary tiling , pick a 2 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .</abstract>
</p>
<p>
<title>Translucent Sums : A Foundation for Higher - Order Module Systems</title>
<author>Mark Lillibridge</author>
<date>May , 1997</date>
- <pubnum>- - 95 -</pubnum>
+ <pubnum>- - 95 - of</pubnum>
? +++
- <affiliation>of</affiliation>
</p>
</xml>
```
#### DATA Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="data" \
--quiet \
| head -5
```
With the result:
```text
Markov markov M Ma Mar Mark v ov kov rkov BLOCKSTART LINESTART NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 B-<title>
Chain chain C Ch Cha Chai n in ain hain BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Algorithms algorithms A Al Alg Algo s ms hms thms BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
for for f fo for for r or for for BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Planar planar P Pl Pla Plan r ar nar anar BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
```
#### DATA Unidiff Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="2" \
--tag-output-format="data_unidiff" \
--tag-output-path="/tmp/test.diff"
```
The output can be viewed using a specialised tool (such as [Kompare](https://en.wikipedia.org/wiki/Kompare)).
Example [unidiff](https://en.wikipedia.org/wiki/Diff#Unified_format) result:
```diff
--- header_document_000002.expected
+++ header_document_000002.actual
@@ -1,21 +1,21 @@
Translucent translucent T Tr Tra Tran t nt ent cent BLOCKSTART LINESTART NEWFONT HIGHERFONT 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 B-<title>
Sums sums S Su Sum Sums s ms ums Sums BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
: : : : : : : : : : BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 0 PUNCT 0 0 I-<title>
A a A A A A A A A A BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 ALLCAP NODIGIT 1 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Foundation foundation F Fo Fou Foun n on ion tion BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
for for f fo for for r or for for BLOCKIN LINEEND SAMEFONT SAMEFONTSIZE 1 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
...
- - - - - - - - - - BLOCKIN LINEEND SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
- - - - - - - - - - BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
95 95 9 95 95 95 5 95 95 95 BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<pubnum>
- - - - - - - - - - BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
-of of o of of of f of of of BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<affiliation>
+of of o of of of f of of of BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<pubnum>
```
#### Text Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="text" \
--quiet
```
With the result:
```text
Markov Chain Algorithms for Planar Lattice Structures Michael Luby y Dana Randall z Alistair Sinclair Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 2n chessboard : starting from some arbitrary tiling , pick a 2 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .
```
### Wapiti Sub Commands
The Wapiti sub commands allow to use a similar process for training, evaluating and tagging Wapiti models, as the sub commands for the other DL model(s) above.
Currently you would need to either install [Wapiti](https://wapiti.limsi.fr/) and make the `wapiti` command available in the path, or use the `--wapiti-install-source` switch to download and install a version from source.
#### Wapiti Train Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header wapiti_train \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--wapiti-template=https://raw.githubusercontent.com/kermitt2/grobid/0.5.6/grobid-trainer/resources/dataset/header/crfpp-templates/header.template \
--wapiti-install-source=https://github.com/kermitt2/Wapiti/archive/5f9a52351fddf21916008daa4becd41d56e7f608.tar.gz \
--output="data/models" \
--limit="100" \
--max-epoch="10"
```
#### Wapiti Train Eval Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header wapiti_train_eval \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--wapiti-template=https://raw.githubusercontent.com/kermitt2/grobid/0.5.6/grobid-trainer/resources/dataset/header/crfpp-templates/header.template \
--output="data/models" \
--limit="100" \
--max-epoch="10"
```
#### Wapiti Eval Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
wapiti_eval \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header" \
--quiet
```
#### Wapiti Tag Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
wapiti_tag \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header" \
--limit="1" \
--tag-output-format="xml_diff" \
--quiet
```
### Input Info Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
input_info \
--quiet \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz
```
Result:
```text
number of input sequences: 2538
sequence lengths: {'q.00': 1, 'q.25': 61.0, 'q.50': 178.0, 'q.75': 300.75, 'q1.0': 6606}
token lengths: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 7.0, 'q1.0': 142}
number of features: 31
inconsistent feature length counts: {31: 536893, 30: 12855}
examples with feature length=31:
die D Di Die Die e ie Die Die BLOCKSTART LINESTART NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0
abscheidung A Ab Abs Absc g ng ung dung BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0
strömender s st str strö r er der nder BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0
examples with feature length=30:
gudina G Gu Gud Gudi a na ina dina BLOCKSTART LINESTART LINEINDENT NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 NOPUNCT 0 0
et e et et et t et et et BLOCKIN LINEIN LINEINDENT NEWFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 0 0 0 0 0 0 NOPUNCT 0 0
al a al al al l al al al BLOCKIN LINEIN LINEINDENT SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 1 0 0 0 0 0 0 NOPUNCT 0 0
feature value lengths: {0: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 7.0, 'q1.0': 142}, 1: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 2: {'q.00': 1, 'q.25': 1.0, 'q.50': 2.0, 'q.75': 2.0, 'q1.0': 2}, 3: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 3.0, 'q1.0': 3}, 4: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 4.0, 'q1.0': 4}, 5: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 6: {'q.00': 1, 'q.25': 1.0, 'q.50': 2.0, 'q.75': 2.0, 'q1.0': 2}, 7: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 3.0, 'q1.0': 3}, 8: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 4.0, 'q1.0': 4}, 9: {'q.00': 7, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 10}, 10: {'q.00': 6, 'q.25': 6.0, 'q.50': 6.0, 'q.75': 6.0, 'q1.0': 9}, 11: {'q.00': 7, 'q.25': 8.0, 'q.50': 8.0, 'q.75': 8.0, 'q1.0': 8}, 12: {'q.00': 9, 'q.25': 12.0, 'q.50': 12.0, 'q.75': 12.0, 'q1.0': 12}, 13: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 14: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 15: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 16: {'q.00': 6, 'q.25': 6.0, 'q.50': 6.0, 'q.75': 6.0, 'q1.0': 7}, 17: {'q.00': 7, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 14}, 18: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 19: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 20: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 21: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 22: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 23: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 24: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 25: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 26: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 27: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 28: {'q.00': 3, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 11}, 29: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 30: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}}
feature counts: {0: 1000, 1: 247, 2: 1000, 3: 1000, 4: 1000, 5: 265, 6: 1000, 7: 1000, 8: 1000, 9: 3, 10: 3, 11: 2, 12: 3, 13: 2, 14: 2, 15: 2, 16: 3, 17: 3, 18: 2, 19: 2, 20: 2, 21: 1, 22: 1, 23: 2, 24: 2, 25: 1, 26: 2, 27: 2, 28: 8, 29: 1, 30: 1}
suggested feature indices: 9-30
label counts: {'B-<title>': 2363, 'I-<title>': 24481, 'B-<author>': 2241, 'I-<author>': 25830, 'B-<reference>': 414, 'I-<reference>': 10121, 'B-<submission>': 409, 'I-<submission>': 3729, 'B-<abstract>': 1528, 'I-<abstract>': 269983, 'B-<affiliation>': 2782, 'I-<affiliation>': 23886, 'B-<address>': 2330, 'I-<address>': 13963, 'B-<date>': 658, 'I-<date>': 2204, 'B-<grant>': 105, 'I-<grant>': 4509, 'B-<email>': 891, 'I-<email>': 7796, 'B-<keyword>': 424, 'I-<keyword>': 7804, 'B-<entitle>': 24, 'I-<entitle>': 421, 'B-<pubnum>': 421, 'I-<pubnum>': 3755, 'B-<note>': 1823, 'I-<note>': 26033, 'B-<copyright>': 281, 'I-<copyright>': 5152, 'B-<date-submission>': 29, 'I-<date-submission>': 166, 'B-<intro>': 439, 'I-<intro>': 96944, 'B-<web>': 187, 'I-<web>': 3162, 'B-<phone>': 71, 'I-<phone>': 710, 'B-<dedication>': 22, 'I-<dedication>': 243, 'B-<degree>': 59, 'I-<degree>': 1355}
```
### Other CLI Parameters
#### `--log-file`
Specifying a log file (can also be gzipped by adding the `.gz` extension), will save the logging output to the file. This is mainly intended for cloud usage. Locally you could also use `tee` for that.
If the specified file is a remote file, then it will be uploaded when the program finishes (no streaming logs).
#### `--notification-url`
For a long running training process (`train` and `train_eval` or `wapiti_train` and `wapiti_train_eval`), it is possible to get notified via a Webhook URL
(e.g. [Slack](https://api.slack.com/messaging/webhooks) or [Mattermost](https://docs.mattermost.com/developer/webhooks-incoming.html)).
In that case, a message will be sent when the training completes or in case of an error (although not all error may be caught).
### Environment Variables
Environment variables can be useful when not directly interacting with the CLI, e.g. via GROBID.
The following environment variables can be specified:
| Name | Default | Description
| ---- | ------- | -----------
| `SCIENCEBEAM_DELFT_MAX_SEQUENCE_LENGTH` | *None* | The maximum sequence length to use, e.g. when tagging.
| `SCIENCEBEAM_DELFT_INPUT_WINDOW_STRIDE` | *None* | The window stride to use (if any). If the model is stateless, this could be set to the maximum sequence length. Otherwise this could be a set to a value below the maximum sequence length. The difference will be the overlapping window. If no window stride was specified, the sequence will be truncated at the maximum sequence length.
| `SCIENCEBEAM_DELFT_BATCH_SIZE` | `10` | The batch size to use
| `SCIENCEBEAM_DELFT_STATEFUL` | *None* (*False*) | Whether to enable stateful mode. This may only work with a batch size of `1`. Note: the stateful mode is currently very slow.
## Training in Google's AI Platform
You can train a model using Google's [AI Platform](https://cloud.google.com/ai-platform/). e.g.
```bash
gcloud beta ai-platform jobs submit training \
--job-dir "gs://your-job-bucket/path" \
--scale-tier=custom \
--master-machine-type=n1-highmem-8 \
--master-accelerator=count=1,type=NVIDIA_TESLA_K80 \
--region=europe-west1 \
--stream-logs \
--module-name sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
--package-path sciencebeam_trainer_delft \
-- \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="500" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="10000" \
--early-stopping-patience="10" \
--max-epoch="50"
```
Or using the project's wrapper script which provides some default values:
```bash
./gcloud-ai-platform-submit.sh \
--job-prefix "my_job_prefix" \
--job-dir "gs://your-job-bucket/path" \
--scale-tier=custom \
--master-machine-type=n1-highmem-8 \
--master-accelerator=count=1,type=NVIDIA_TESLA_K80 \
--region=europe-west1 \
-- \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="500" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="10000" \
--early-stopping-patience="10" \
--max-epoch="50"
```
(Alternatively you can train for free using Google Colab, see Example Notebooks above)
## Text Classification
### Train Text Classification
```bash
python -m sciencebeam_trainer_delft.text_classification \
train \
--model-path="data/models/textClassification/toxic" \
--train-input-limit=100 \
--train-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/train.csv"
```
### Eval Text Classification
```bash
python -m sciencebeam_trainer_delft.text_classification \
eval \
--model-path="data/models/textClassification/toxic" \
--eval-input-limit=100 \
--eval-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--eval-label-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test_labels.csv"
```
### Predict Text Classification
```bash
python -m sciencebeam_trainer_delft.text_classification \
predict \
--model-path="data/models/textClassification/toxic" \
--predict-input-limit=100 \
--predict-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--predict-output="./data/toxic_test_predictions.tsv"
```
### Train Eval Text Classification
```bash
python -m sciencebeam_trainer_delft.text_classification \
train_eval \
--model-path="data/models/textClassification/toxic" \
--train-input-limit=100 \
--train-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/train.csv" \
--eval-input-limit=100 \
--eval-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--eval-label-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test_labels.csv"
```
## Checkpoints CLI
The checkpoints CLI tool is there to give you a summary of the saved checkpoints. Checkpoints are optionally saved during training, they allow you to resume model training or further evaluate performance at the individual checkpoints. Usually training will stop after the f1 score hasn't improved for a number of epochs. The last checkpoint may not be the best.
The checkpoints tool will sort by the f1 score and show the *n* (`limit`) top checkpoints.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints --help
```
### Checkpoints Text Output
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints \
--checkpoint="/path/to/checkpoints" \
--limit=3 \
--output-format=text
```
```text
best checkpoints:
00039: 0.5877923107411811 (/path/to/checkpoints/epoch-00039) (last)
00036: 0.5899450117831894 (/path/to/checkpoints/epoch-00036)
00034: 0.591387179996031 (/path/to/checkpoints/epoch-00034) (best)
```
### Checkpoints JSON Output
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints \
--checkpoint="/path/to/checkpoints" \
--limit=3 \
--output-format=json
```
```json
[
{
"loss": 40.520591011530236,
"f1": 0.5877923107411811,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 39,
"path": "/path/to/checkpoints/epoch-00039",
"is_last": true,
"is_best": false
},
{
"loss": 44.48661111276361,
"f1": 0.5899450117831894,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 36,
"path": "/path/to/checkpoints/epoch-00036",
"is_last": false,
"is_best": false
},
{
"loss": 47.80826501711393,
"f1": 0.591387179996031,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 34,
"path": "/path/to/checkpoints/epoch-00034",
"is_last": false,
"is_best": true
}
]
```
| /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/README.md | 0.455199 | 0.902266 | README.md | pypi |
from collections import Counter, defaultdict, OrderedDict
from typing import Dict, Iterable, List
import numpy as np
def iter_flat_batch_tokens(batch_tokens: List[List[str]]):
return (
token
for doc_tokens in batch_tokens
for token in doc_tokens
)
def iter_flat_features(features: np.ndarray):
return (
features_vector
for features_doc in features
for features_vector in features_doc
)
def get_quantiles(values: Iterable[float]) -> Dict[str, float]:
arr = np.array(list(values))
return OrderedDict([
('q.00', np.quantile(arr, 0)),
('q.25', np.quantile(arr, 0.25)),
('q.50', np.quantile(arr, 0.50)),
('q.75', np.quantile(arr, 0.75)),
('q1.0', np.quantile(arr, 1))
])
def get_quantiles_feature_value_length_by_index(features: np.ndarray):
return dict(enumerate(map(
lambda feature_values: get_quantiles(map(len, feature_values)),
zip(*iter_flat_features(features))
)))
def get_feature_value_counts_by_index(features: np.ndarray, max_feature_values: int = 1000):
feature_value_counts_by_index: Dict[int, Counter] = defaultdict(Counter)
for feature_vector in iter_flat_features(features):
for index, value in enumerate(feature_vector):
feature_value_counts = feature_value_counts_by_index[index]
if (
len(feature_value_counts) >= max_feature_values
and value not in feature_value_counts):
continue
feature_value_counts[value] += 1
return feature_value_counts_by_index
def get_feature_counts(features: np.ndarray):
feature_value_counts_by_index = get_feature_value_counts_by_index(features)
return OrderedDict([
(index, len(feature_value_counts_by_index[index]))
for index in sorted(feature_value_counts_by_index.keys())
])
def get_suggested_feature_indices(feature_counts: Dict[int, int], threshold: int = 12):
return [
index
for index in sorted(feature_counts.keys())
if feature_counts[index] <= threshold
]
def format_dict(d: dict) -> str:
return '{' + ', '.join([
'%s: %s' % (
repr(key),
format_dict(value) if isinstance(value, dict) else repr(value)
)
for key, value in d.items()
]) + '}'
def iter_index_groups(indices: List[int]) -> Iterable[List[int]]:
group: List[int] = []
for index in indices:
if not group or group[-1] + 1 == index:
group.append(index)
continue
yield group
group = []
if group:
yield group
def iter_formatted_index_groups(indices: List[int]) -> Iterable[str]:
for group in iter_index_groups(indices):
if len(group) == 1:
yield str(group[0])
continue
yield '%s-%s' % (group[0], group[-1])
def format_indices(indices: List[int]) -> str:
return ','.join(list(iter_formatted_index_groups(indices))) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/input_info.py | 0.76908 | 0.616676 | input_info.py | pypi |
import logging
import os
import time
from functools import partial
from typing import Callable, Iterable, List, Optional, Tuple, Union, cast
import numpy as np
from delft.sequenceLabelling.models import BaseModel
from delft.sequenceLabelling.preprocess import WordPreprocessor, FeaturesPreprocessor
from delft.sequenceLabelling.wrapper import Sequence as _Sequence
from delft.sequenceLabelling.config import TrainingConfig as DelftTrainingConfig
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.numpy import concatenate_or_none
from sciencebeam_trainer_delft.utils.misc import str_to_bool
from sciencebeam_trainer_delft.sequence_labelling.tools.install_models import (
copy_directory_with_source_meta
)
from sciencebeam_trainer_delft.embedding import Embeddings, EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig, TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.data_generator import (
DataGenerator,
iter_batch_text_list,
get_concatenated_embeddings_token_count
)
from sciencebeam_trainer_delft.sequence_labelling.trainer import (
Scorer,
Trainer
)
from sciencebeam_trainer_delft.sequence_labelling.models import (
is_model_stateful,
get_model,
updated_implicit_model_config_props
)
from sciencebeam_trainer_delft.sequence_labelling.preprocess import (
T_FeaturesPreprocessor,
FeaturesPreprocessor as ScienceBeamFeaturesPreprocessor,
faster_preprocessor_fit
)
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelSaver, ModelLoader
from sciencebeam_trainer_delft.sequence_labelling.tagger import Tagger
from sciencebeam_trainer_delft.sequence_labelling.evaluation import ClassificationResult
from sciencebeam_trainer_delft.sequence_labelling.debug import get_tag_debug_reporter_if_enabled
from sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints import (
get_checkpoints_json,
get_last_checkpoint_url
)
from sciencebeam_trainer_delft.sequence_labelling.transfer_learning import (
TransferLearningConfig,
TransferLearningSource,
freeze_model_layers
)
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform import (
DummyDatasetTransformer
)
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform.unroll_transform import (
UnrollingTextFeatureDatasetTransformer
)
LOGGER = logging.getLogger(__name__)
DEFAUT_MODEL_PATH = 'data/models/sequenceLabelling/'
DEFAULT_EMBEDDINGS_PATH = './embedding-registry.json'
DEFAUT_BATCH_SIZE = 10
class EnvironmentVariables:
# environment variables are mainly intended for GROBID, as we can't pass in arguments
MAX_SEQUENCE_LENGTH = 'SCIENCEBEAM_DELFT_MAX_SEQUENCE_LENGTH'
INPUT_WINDOW_STRIDE = 'SCIENCEBEAM_DELFT_INPUT_WINDOW_STRIDE'
BATCH_SIZE = 'SCIENCEBEAM_DELFT_BATCH_SIZE'
STATEFUL = 'SCIENCEBEAM_DELFT_STATEFUL'
def get_typed_env(
key: str,
type_fn: Callable[[str], T],
default_value: Optional[T] = None
) -> Optional[T]:
max_sequence_length_str = os.getenv(key)
if not max_sequence_length_str:
return default_value
return type_fn(max_sequence_length_str)
def get_default_max_sequence_length() -> Optional[int]:
return get_typed_env(EnvironmentVariables.MAX_SEQUENCE_LENGTH, int, default_value=None)
def get_default_input_window_stride() -> Optional[int]:
return get_typed_env(EnvironmentVariables.INPUT_WINDOW_STRIDE, int, default_value=None)
def get_default_batch_size() -> Optional[int]:
return get_typed_env(EnvironmentVariables.BATCH_SIZE, int, default_value=DEFAUT_BATCH_SIZE)
def get_default_stateful() -> Optional[bool]:
return get_typed_env(
EnvironmentVariables.STATEFUL,
str_to_bool,
default_value=None
)
def get_features_preprocessor(
model_config: ModelConfig,
features: np.array = None) -> T_FeaturesPreprocessor:
if not model_config.use_features:
LOGGER.info('features not enabled')
return None
if features is None:
LOGGER.info('no features available')
return None
if model_config.use_features_indices_input:
LOGGER.info(
'using feature indices as input, features_indices=%s, features_vocab_size=%s',
model_config.features_indices, model_config.features_vocabulary_size
)
return FeaturesPreprocessor(
features_indices=model_config.features_indices,
features_vocabulary_size=model_config.features_vocabulary_size
)
LOGGER.info(
'using feature indices=%s', model_config.features_indices
)
return ScienceBeamFeaturesPreprocessor(
features_indices=model_config.features_indices,
continuous_features_indices=model_config.continuous_features_indices
)
def get_preprocessor(
model_config: ModelConfig,
features: np.array = None) -> WordPreprocessor:
feature_preprocessor = get_features_preprocessor(model_config, features=features)
return WordPreprocessor(
max_char_length=model_config.max_char_length,
feature_preprocessor=feature_preprocessor
)
def prepare_preprocessor(
X, y,
model_config: ModelConfig,
features: Optional[List[List[List[str]]]] = None
):
preprocessor = get_preprocessor(model_config, features=features)
batch_text_list_iterable = iter_batch_text_list(
X, features,
additional_token_feature_indices=model_config.additional_token_feature_indices,
text_feature_indices=model_config.text_feature_indices
)
if isinstance(preprocessor, WordPreprocessor):
LOGGER.info('fitting preprocessor (faster)')
faster_preprocessor_fit(preprocessor, batch_text_list_iterable, y)
else:
LOGGER.info('fitting preprocessor (default)')
preprocessor.fit(batch_text_list_iterable, y)
if model_config.use_features and features is not None:
LOGGER.info('fitting features preprocessor')
preprocessor.fit_features(features)
if model_config.features_indices != preprocessor.feature_preprocessor.features_indices:
LOGGER.info('revised features_indices: %s', model_config.features_indices)
model_config.features_indices = preprocessor.feature_preprocessor.features_indices
model_config.features_map_to_index = preprocessor.feature_preprocessor.features_map_to_index
LOGGER.info('done fitting preprocessor')
return preprocessor
def get_model_directory(model_name: str, dir_path: str = None):
return os.path.join(dir_path or DEFAUT_MODEL_PATH, model_name)
class Sequence(_Sequence):
def __init__(
self, *args,
use_features: bool = False,
features_indices: List[int] = None,
features_embedding_size: int = None,
multiprocessing: bool = False,
embedding_registry_path: str = None,
embedding_manager: EmbeddingManager = None,
config_props: dict = None,
training_props: dict = None,
max_sequence_length: int = None,
input_window_stride: int = None,
eval_max_sequence_length: int = None,
eval_input_window_stride: int = None,
batch_size: int = None,
eval_batch_size: int = None,
stateful: bool = None,
transfer_learning_config: TransferLearningConfig = None,
tag_transformed: bool = False,
**kwargs):
# initialise logging if not already initialised
logging.basicConfig(level='INFO')
LOGGER.debug('Sequence, args=%s, kwargs=%s', args, kwargs)
self.embedding_registry_path = embedding_registry_path or DEFAULT_EMBEDDINGS_PATH
if embedding_manager is None:
embedding_manager = EmbeddingManager(
path=self.embedding_registry_path,
download_manager=DownloadManager()
)
self.download_manager = embedding_manager.download_manager
self.embedding_manager = embedding_manager
self.embeddings: Optional[Embeddings] = None
if not batch_size:
batch_size = get_default_batch_size()
if not max_sequence_length:
max_sequence_length = get_default_max_sequence_length()
self.max_sequence_length = max_sequence_length
if not input_window_stride:
input_window_stride = get_default_input_window_stride()
self.input_window_stride = input_window_stride
self.eval_max_sequence_length = eval_max_sequence_length
self.eval_input_window_stride = eval_input_window_stride
self.eval_batch_size = eval_batch_size
self.model_path: Optional[str] = None
if stateful is None:
# use a stateful model, if supported
stateful = get_default_stateful()
self.stateful = stateful
self.transfer_learning_config = transfer_learning_config
self.dataset_transformer_factory = DummyDatasetTransformer
self.tag_transformed = tag_transformed
super().__init__(
*args,
max_sequence_length=max_sequence_length,
batch_size=batch_size,
**kwargs
)
LOGGER.debug('use_features=%s', use_features)
self.model_config: ModelConfig = ModelConfig(
**{ # type: ignore
**vars(self.model_config),
**(config_props or {}),
'features_indices': features_indices,
'features_embedding_size': features_embedding_size
},
use_features=use_features
)
self.update_model_config_word_embedding_size()
updated_implicit_model_config_props(self.model_config)
self.update_dataset_transformer_factor()
self.training_config: TrainingConfig = TrainingConfig(
**vars(cast(DelftTrainingConfig, self.training_config)),
**(training_props or {})
)
LOGGER.info('training_config: %s', vars(self.training_config))
self.multiprocessing = multiprocessing
self.tag_debug_reporter = get_tag_debug_reporter_if_enabled()
self._load_exception = None
self.p: Optional[WordPreprocessor] = None
self.model: Optional[BaseModel] = None
self.models: List[BaseModel] = []
def update_model_config_word_embedding_size(self):
if self.embeddings:
token_count = get_concatenated_embeddings_token_count(
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
additional_token_feature_indices=(
self.model_config.additional_token_feature_indices
)
)
self.model_config.word_embedding_size = (
self.embeddings.embed_size * token_count
)
def update_dataset_transformer_factor(self):
self.dataset_transformer_factory = DummyDatasetTransformer
if self.model_config.unroll_text_feature_index is not None:
LOGGER.info(
'using unrolling text feature dataset transformer, index=%s',
self.model_config.unroll_text_feature_index
)
self.dataset_transformer_factory = partial(
UnrollingTextFeatureDatasetTransformer,
self.model_config.unroll_text_feature_index,
used_features_indices=self.model_config.features_indices
)
def clear_embedding_cache(self):
if not self.embeddings:
return
if self.embeddings.use_ELMo:
self.embeddings.clean_ELMo_cache()
if self.embeddings.use_BERT:
self.embeddings.clean_BERT_cache()
def train( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid=None, y_valid=None,
features_train: np.array = None,
features_valid: np.array = None):
# TBD if valid is None, segment train to get one
dataset_fransformer = self.dataset_transformer_factory()
x_train, y_train, features_train = dataset_fransformer.fit_transform(
x_train, y_train, features_train
)
if x_valid is not None:
x_valid, y_valid, features_valid = dataset_fransformer.fit_transform(
x_valid, y_valid, features_valid
)
x_all = np.concatenate((x_train, x_valid), axis=0)
y_all = np.concatenate((y_train, y_valid), axis=0)
features_all = concatenate_or_none((features_train, features_valid), axis=0)
transfer_learning_source: Optional[TransferLearningSource] = None
if self.p is None or self.model is None:
transfer_learning_source = TransferLearningSource.from_config(
self.transfer_learning_config,
download_manager=self.download_manager
)
if self.p is None:
if transfer_learning_source:
self.p = transfer_learning_source.copy_preprocessor_if_enabled()
if self.p is None:
self.p = prepare_preprocessor(
x_all, y_all,
features=features_all,
model_config=self.model_config
)
if transfer_learning_source:
transfer_learning_source.apply_preprocessor(target_preprocessor=self.p)
self.model_config.char_vocab_size = len(self.p.vocab_char)
self.model_config.case_vocab_size = len(self.p.vocab_case)
if self.model_config.use_features and features_train is not None:
LOGGER.info('x_train.shape: %s', x_train.shape)
LOGGER.info('features_train.shape: %s', features_train.shape)
sample_transformed_features = self.p.transform_features(features_train[:1])
try:
if isinstance(sample_transformed_features, tuple):
sample_transformed_features = sample_transformed_features[0]
LOGGER.info(
'sample_transformed_features.shape: %s',
sample_transformed_features.shape
)
self.model_config.max_feature_size = sample_transformed_features.shape[-1]
LOGGER.info('max_feature_size: %s', self.model_config.max_feature_size)
except Exception: # pylint: disable=broad-except
LOGGER.info('features do not implement shape, set max_feature_size manually')
if self.model is None:
self.model = get_model(self.model_config, self.p, len(self.p.vocab_tag))
if transfer_learning_source:
transfer_learning_source.apply_weights(target_model=self.model)
if self.transfer_learning_config:
freeze_model_layers(self.model, self.transfer_learning_config.freeze_layers)
trainer = Trainer(
self.model,
self.models,
self.embeddings,
self.model_config,
training_config=self.training_config,
model_saver=self.get_model_saver(),
multiprocessing=self.multiprocessing,
checkpoint_path=self.log_dir,
preprocessor=self.p
)
trainer.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
self.clear_embedding_cache()
def get_model_saver(self):
return ModelSaver(
preprocessor=self.p,
model_config=self.model_config
)
def train_nfold( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid=None, y_valid=None, fold_number=10,
features_train: np.array = None,
features_valid: np.array = None):
if x_valid is not None and y_valid is not None:
x_all = np.concatenate((x_train, x_valid), axis=0)
y_all = np.concatenate((y_train, y_valid), axis=0)
features_all = concatenate_or_none((features_train, features_valid), axis=0)
self.p = prepare_preprocessor(
x_all, y_all,
features=features_all,
model_config=self.model_config
)
else:
self.p = prepare_preprocessor(
x_train, y_train,
features=features_train,
model_config=self.model_config
)
self.model_config.char_vocab_size = len(self.p.vocab_char)
self.model_config.case_vocab_size = len(self.p.vocab_case)
self.p.return_lengths = True
self.models = []
for _ in range(0, fold_number):
model = get_model(self.model_config, self.p, len(self.p.vocab_tag))
self.models.append(model)
trainer = Trainer(
self.model,
self.models,
self.embeddings,
self.model_config,
training_config=self.training_config,
model_saver=self.get_model_saver(),
checkpoint_path=self.log_dir,
preprocessor=self.p
)
trainer.train_nfold(
x_train, y_train,
x_valid, y_valid,
features_train=features_train,
features_valid=features_valid
)
if self.embeddings:
if self.embeddings.use_ELMo:
self.embeddings.clean_ELMo_cache()
if self.embeddings.use_BERT:
self.embeddings.clean_BERT_cache()
def eval( # pylint: disable=arguments-differ
self, x_test, y_test, features: np.array = None):
should_eval_nfold = (
self.model_config.fold_number > 1
and self.models
and len(self.models) == self.model_config.fold_number
)
if should_eval_nfold:
self.eval_nfold(x_test, y_test, features=features)
else:
self.eval_single(x_test, y_test, features=features)
def create_eval_data_generator(self, *args, **kwargs) -> DataGenerator:
return DataGenerator( # type: ignore
*args,
batch_size=(
self.eval_batch_size
or self.training_config.batch_size
),
preprocessor=self.p,
additional_token_feature_indices=self.model_config.additional_token_feature_indices,
text_feature_indices=self.model_config.text_feature_indices,
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
char_embed_size=self.model_config.char_embedding_size,
is_deprecated_padded_batch_text_list_enabled=(
self.model_config.is_deprecated_padded_batch_text_list_enabled
),
max_sequence_length=self.eval_max_sequence_length,
embeddings=self.embeddings,
**kwargs
)
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
self._require_model()
if self.model_config.use_features and features is None:
raise ValueError('features required')
tagger = Tagger(
self.model, self.model_config, self.embeddings,
dataset_transformer_factory=self.dataset_transformer_factory,
max_sequence_length=self.eval_max_sequence_length,
input_window_stride=self.eval_input_window_stride,
preprocessor=self.p
)
tag_result = tagger.tag(
list(x_test),
output_format=None,
features=features
)
y_pred = [
[token_tag for _, token_tag in doc_pred]
for doc_pred in tag_result
]
# convert to list, get_entities is type checking for list but not ndarray
y_true = [list(true_doc) for true_doc in y_test]
return ClassificationResult(y_pred=y_pred, y_true=y_true)
def eval_single( # pylint: disable=arguments-differ
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
classification_result = self.get_evaluation_result(
x_test=x_test,
y_test=y_test,
features=features
)
print(classification_result.get_formatted_report(digits=4))
def eval_nfold( # pylint: disable=arguments-differ
self, x_test, y_test, features: np.array = None):
if self.models is not None:
total_f1 = 0
best_f1 = 0
best_index = 0
worst_f1 = 1
worst_index = 0
reports = []
total_precision = 0
total_recall = 0
for i in range(0, self.model_config.fold_number):
print(
'\n------------------------ fold %s --------------------------------------'
% i
)
# Prepare test data(steps, generator)
test_generator = self.create_eval_data_generator(
x_test, y_test,
features=features,
shuffle=False
)
# Build the evaluator and evaluate the model
scorer = Scorer(test_generator, self.p, evaluation=True)
scorer.model = self.models[i]
scorer.on_epoch_end(epoch=-1)
f1 = scorer.f1
precision = scorer.precision
recall = scorer.recall
reports.append(scorer.report)
if best_f1 < f1:
best_f1 = f1
best_index = i
if worst_f1 > f1:
worst_f1 = f1
worst_index = i
total_f1 += f1
total_precision += precision
total_recall += recall
macro_f1 = total_f1 / self.model_config.fold_number
macro_precision = total_precision / self.model_config.fold_number
macro_recall = total_recall / self.model_config.fold_number
print("\naverage over", self.model_config.fold_number, "folds")
print("\tmacro f1 =", macro_f1)
print("\tmacro precision =", macro_precision)
print("\tmacro recall =", macro_recall, "\n")
print("\n** Worst ** model scores - \n")
print(reports[worst_index])
self.model = self.models[best_index]
print("\n** Best ** model scores - \n")
print(reports[best_index])
def iter_tag(
self, texts, output_format, features=None
) -> Union[dict, Iterable[List[Tuple[str, str]]]]:
# annotate a list of sentences, return the list of annotations in the
# specified output_format
self._require_model()
if self.model_config.use_features and features is None:
raise ValueError('features required')
tagger = Tagger(
self.model, self.model_config, self.embeddings,
dataset_transformer_factory=self.dataset_transformer_factory,
max_sequence_length=self.max_sequence_length,
input_window_stride=self.input_window_stride,
preprocessor=self.p
)
LOGGER.debug('tag_transformed: %s', self.tag_transformed)
annotations: Union[dict, Iterable[List[Tuple[str, str]]]]
if output_format == 'json':
start_time = time.time()
annotations = tagger.tag(
list(texts), output_format,
features=features,
tag_transformed=self.tag_transformed
)
runtime = round(time.time() - start_time, 3)
assert isinstance(annotations, dict)
annotations["runtime"] = runtime
else:
annotations = tagger.iter_tag(
list(texts), output_format,
features=features,
tag_transformed=self.tag_transformed
)
if self.tag_debug_reporter:
if not isinstance(annotations, dict):
# the tag debug reporter only supports lists
# additionally should not consume the iterable
annotations = list(annotations)
self.tag_debug_reporter.report_tag_results(
texts=texts,
features=features,
annotations=annotations,
model_name=self._get_model_name()
)
return annotations
def tag(self, *args, **kwargs) -> Union[dict, List[List[Tuple[str, str]]]]:
iterable_or_dict = self.iter_tag(*args, **kwargs)
if isinstance(iterable_or_dict, dict):
return iterable_or_dict
return list(iterable_or_dict)
def _require_model(self):
if not self.model:
try:
raise OSError('Model not loaded: %s (previous load exception: %r)' % (
self._get_model_name(), self._load_exception
)) from self._load_exception
except Exception as exc:
LOGGER.exception('Model required but not loaded: %r', exc, exc_info=exc)
raise
def _get_model_name(self):
return self.model_config.model_name
@property
def last_checkpoint_path(self) -> Optional[str]:
if not self.log_dir:
return None
return get_last_checkpoint_url(get_checkpoints_json(self.log_dir))
@property
def model_summary_props(self) -> dict:
return {
'model_type': 'delft',
'architecture': self.model_config.model_type,
'model_config': vars(self.model_config)
}
def get_model_output_path(self, dir_path: str = None) -> str:
return get_model_directory(model_name=self.model_config.model_name, dir_path=dir_path)
def _get_model_directory(self, dir_path: str = None) -> str:
return self.get_model_output_path(dir_path=dir_path)
def get_embedding_for_model_config(self, model_config: ModelConfig):
embedding_name = model_config.embeddings_name
if not model_config.use_word_embeddings or not embedding_name:
return None
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
embeddings = Embeddings(
embedding_name,
path=self.embedding_registry_path,
use_ELMo=model_config.use_ELMo,
use_BERT=model_config.use_BERT
)
if not embeddings.embed_size > 0:
raise AssertionError(
'invalid embedding size, embeddings not loaded? %s' % embedding_name
)
return embeddings
def get_meta(self):
return {
'training_config': vars(self.training_config)
}
def save(self, dir_path=None):
# create subfolder for the model if not already exists
directory = self._get_model_directory(dir_path)
os.makedirs(directory, exist_ok=True)
self.get_model_saver().save_to(directory, model=self.model, meta=self.get_meta())
def load(self, dir_path=None):
directory = None
try:
directory = self._get_model_directory(dir_path)
self.load_from(directory)
except Exception as exc:
self._load_exception = exc
LOGGER.exception('failed to load model from %r', directory, exc_info=exc)
raise
def download_model(self, dir_path: str) -> str:
if not dir_path.endswith('.tar.gz'):
return dir_path
local_dir_path = str(self.download_manager.get_local_file(
dir_path, auto_uncompress=False
)).replace('.tar.gz', '')
copy_directory_with_source_meta(dir_path, local_dir_path)
return local_dir_path
def load_from(self, directory: str):
model_loader = ModelLoader(download_manager=self.download_manager)
directory = self.download_model(directory)
self.model_path = directory
self.p = model_loader.load_preprocessor_from_directory(directory)
self.model_config = model_loader.load_model_config_from_directory(directory)
self.model_config.batch_size = self.training_config.batch_size
if self.stateful is not None:
self.model_config.stateful = self.stateful
# load embeddings
LOGGER.info('loading embeddings: %s', self.model_config.embeddings_name)
self.embeddings = self.get_embedding_for_model_config(self.model_config)
self.update_model_config_word_embedding_size()
self.model = get_model(self.model_config, self.p, ntags=len(self.p.vocab_tag))
# update stateful flag depending on whether the model is actually stateful
# (and supports that)
self.model_config.stateful = is_model_stateful(self.model)
# load weights
model_loader.load_model_from_directory(directory, model=self.model)
self.update_dataset_transformer_factor() | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/wrapper.py | 0.826187 | 0.161783 | wrapper.py | pypi |
import json
import difflib
import logging
from xml.sax.saxutils import escape as xml_escape
from typing import Optional, Union, Iterable, List, Tuple
import numpy as np
from delft.sequenceLabelling.evaluation import get_entities
LOGGER = logging.getLogger(__name__)
class TagOutputFormats:
JSON = 'json'
DATA = 'data'
DATA_UNIDIFF = 'data_unidiff'
TEXT = 'text'
XML = 'xml'
XML_DIFF = 'xml_diff'
TAG_OUTPUT_FORMATS = [
TagOutputFormats.JSON,
TagOutputFormats.DATA,
TagOutputFormats.DATA_UNIDIFF,
TagOutputFormats.TEXT,
TagOutputFormats.XML,
TagOutputFormats.XML_DIFF,
]
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=arguments-differ, method-hidden
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def get_tag_result(texts: List[List[str]], labels: List[List[str]]):
return [
list(zip(doc_texts, doc_labels))
for doc_texts, doc_labels in zip(texts, labels)
]
def format_json_tag_result_as_json(tag_result: dict) -> str:
return json.dumps(tag_result, indent=2, cls=CustomJsonEncoder)
def format_list_tag_result_as_json(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None,
features: np.array = None,
model_name: str = None) -> str:
output_props = {
'model': model_name,
'texts': np.array(texts).tolist(),
'features': np.array(features).tolist() if features is not None else None,
'annotations': list(tag_result)
}
return json.dumps(output_props, indent=2, cls=CustomJsonEncoder)
def iter_to_data_lines(
features: np.array,
annotations: Iterable[List[Tuple[str, str]]]
) -> Iterable[str]:
for document_lindex, (line_annotations, line_features) in enumerate(
zip(annotations, features.tolist())
):
if document_lindex > 0:
yield '' # blank line separator
yield from (
' '.join([token_annoation[0]] + list(token_features) + [token_annoation[1]])
for token_annoation, token_features in zip(line_annotations, line_features)
)
def to_data_lines(*args, **kwargs) -> List[str]:
return list(iter_to_data_lines(*args, **kwargs))
def iter_format_list_tag_result_as_data(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None,
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert features is not None
data_text_iterable = iter_to_data_lines(
features=features,
annotations=tag_result
)
for line_index, data_text in enumerate(data_text_iterable):
if line_index > 0:
yield '\n'
yield data_text
def format_list_tag_result_as_data(*args, **kwargs) -> str:
return ''.join(iter_format_list_tag_result_as_data(*args, **kwargs))
def iter_simple_unidiff(
a, b, fromfile='', tofile='', lineterm='\n',
force_output: bool = False
) -> Iterable[str]:
if len(a) > len(b):
# truncate expected, as predicted sequences may have truncuated
a = a[:len(b)]
assert len(a) == len(b)
line_count = len(a)
is_diff_list = [
value_1 != value_2
for value_1, value_2 in zip(a, b)
]
LOGGER.debug('is_diff_list: %s', is_diff_list)
diff_count = sum(is_diff_list)
if not diff_count and not force_output:
return
if fromfile:
yield f'--- {fromfile}{lineterm}'
if tofile:
yield f'+++ {tofile}{lineterm}'
removed_with_prefix = f'-{diff_count}' if diff_count else '-0'
added_with_prefix = f'+{diff_count}' if diff_count else '+0'
yield f'@@ {removed_with_prefix},{line_count} {added_with_prefix},{line_count} @@{lineterm}'
for is_diff, value_1, value_2 in zip(is_diff_list, a, b):
if is_diff:
yield f'-{value_1}'
yield f'+{value_2}'
else:
yield f' {value_1}'
def split_lines_with_line_feed(text: str, line_feed: str = '\n') -> List[str]:
# Note: similar to .splitlines(keepends=True), but always adds the line feed
return [
line + line_feed
for line in text.splitlines()
]
def iter_format_document_tag_result_as_data_unidiff(
document_tag_result: List[Tuple[str, str]],
document_expected_tag_result: List[Tuple[str, str]],
document_features: List[List[str]],
document_name: str
) -> Iterable[str]:
actual_data = format_list_tag_result_as_data(
[document_tag_result],
features=np.expand_dims(document_features, axis=0)
)
expected_data = format_list_tag_result_as_data(
[document_expected_tag_result],
features=np.expand_dims(document_features, axis=0)
)
LOGGER.debug('actual_data: %r', actual_data)
LOGGER.debug('expected_data: %r', expected_data)
yield from iter_simple_unidiff(
split_lines_with_line_feed(expected_data),
split_lines_with_line_feed(actual_data),
fromfile=f'{document_name}.expected',
tofile=f'{document_name}.actual'
)
def iter_format_document_list_tag_result_as_data_unidiff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
features: np.ndarray,
document_name_prefix: str
) -> Iterable[str]:
for document_index, document_tag_result in enumerate(tag_result):
yield from iter_format_document_tag_result_as_data_unidiff(
document_tag_result=document_tag_result,
document_expected_tag_result=expected_tag_result[document_index],
document_features=features[document_index],
document_name='%s%06d' % (document_name_prefix, 1 + document_index)
)
def iter_format_list_tag_result_as_data_unidiff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
texts: np.ndarray = None, # pylint: disable=unused-argument
features: np.ndarray = None,
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert expected_tag_result
document_name_prefix = 'document_'
if model_name:
document_name_prefix = model_name + '_' + document_name_prefix
yield from iter_format_document_list_tag_result_as_data_unidiff(
tag_result=tag_result,
expected_tag_result=expected_tag_result,
features=features,
document_name_prefix=document_name_prefix
)
def iter_to_flat_text(texts: np.array) -> Iterable[str]:
for document_index, line_tokens in enumerate(texts):
if document_index > 0:
yield '\n'
yield ' '.join(line_tokens)
def iter_format_list_tag_result_as_text(
tag_result: Iterable[List[Tuple[str, str]]], # pylint: disable=unused-argument
texts: np.array = None,
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert texts is not None
yield from iter_to_flat_text(texts=texts)
def get_xml_tag_for_annotation_label(annotation_label: str) -> str:
return annotation_label.replace('<', '').replace('>', '').split('-', maxsplit=1)[-1]
def iter_add_untagged_token_spans(
entity_chunks: Iterable[Tuple[str, int, int]],
token_count: int,
untagged_chunk_type: str = None
) -> Iterable[Tuple[Optional[str], int, int]]:
prev_chunk_end_excl = 0
for chunk_type, chunk_start, chunk_end in entity_chunks:
if chunk_start > prev_chunk_end_excl:
yield untagged_chunk_type, prev_chunk_end_excl, (chunk_start - 1)
yield chunk_type, chunk_start, chunk_end
prev_chunk_end_excl = chunk_end + 1
if token_count > prev_chunk_end_excl:
yield untagged_chunk_type, prev_chunk_end_excl, (token_count - 1)
def iter_doc_annotations_xml_text(
doc_annotations: List[Tuple[str, str]]) -> Iterable[str]:
LOGGER.debug('doc_annotations: %s', doc_annotations)
text_tokens = [token_text for token_text, _ in doc_annotations]
token_labels = [token_label for _, token_label in doc_annotations]
entity_chunks = list(iter_add_untagged_token_spans(
get_entities(token_labels),
len(token_labels)
))
LOGGER.debug('text_tokens: %s', text_tokens)
LOGGER.debug('token_labels: %s', token_labels)
LOGGER.debug('entity_chunks: %s', entity_chunks)
return '\n'.join((
(
' <{tag}>{text}</{tag}>'.format(
tag=get_xml_tag_for_annotation_label(chunk_type),
text=xml_escape(' '.join(text_tokens[chunk_start:chunk_end + 1]))
)
if chunk_type
else
' {text}'.format(
text=xml_escape(' '.join(text_tokens[chunk_start:chunk_end + 1]))
)
)
for chunk_type, chunk_start, chunk_end in entity_chunks
)) + '\n'
def iter_annotations_xml_text(
annotations: Iterable[List[Tuple[str, str]]]
) -> Iterable[str]:
for doc_index, doc_annotations in enumerate(annotations):
if doc_index > 0:
yield '\n\n'
yield ' <p>\n'
yield from iter_doc_annotations_xml_text(doc_annotations)
yield ' </p>\n'
def iter_format_list_tag_result_as_xml(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
yield '<xml>\n'
yield from iter_annotations_xml_text(
annotations=tag_result
)
yield '</xml>'
def format_list_tag_result_as_xml(*args, **kwargs) -> str:
return ''.join(iter_format_list_tag_result_as_xml(*args, **kwargs))
def iter_format_list_tag_result_as_xml_diff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert expected_tag_result
actual_xml = format_list_tag_result_as_xml(tag_result)
expected_xml = format_list_tag_result_as_xml(expected_tag_result)
yield from difflib.ndiff(
expected_xml.splitlines(keepends=True),
actual_xml.splitlines(keepends=True)
)
def iter_format_list_tag_result(
*args,
output_format: str,
expected_tag_result: Optional[List[List[Tuple[str, str]]]] = None,
**kwargs) -> Iterable[str]:
if output_format == TagOutputFormats.JSON:
yield format_list_tag_result_as_json(*args, **kwargs)
return
if output_format == TagOutputFormats.DATA:
yield from iter_format_list_tag_result_as_data(*args, **kwargs)
return
if output_format == TagOutputFormats.DATA_UNIDIFF:
assert expected_tag_result
yield from iter_format_list_tag_result_as_data_unidiff( # type: ignore
*args,
expected_tag_result=expected_tag_result,
**kwargs
)
return
if output_format == TagOutputFormats.TEXT:
yield from iter_format_list_tag_result_as_text(*args, **kwargs)
return
if output_format == TagOutputFormats.XML:
yield from iter_format_list_tag_result_as_xml(*args, **kwargs)
return
if output_format == TagOutputFormats.XML_DIFF:
assert expected_tag_result
yield from iter_format_list_tag_result_as_xml_diff( # type: ignore
*args,
expected_tag_result=expected_tag_result,
**kwargs
)
return
raise ValueError('unrecognised output format: %s' % output_format)
def iter_format_tag_result(
tag_result: Union[dict, list, Iterable],
output_format: str,
expected_tag_result: Optional[List[List[Tuple[str, str]]]] = None,
texts: np.array = None,
features: np.array = None,
model_name: str = None) -> Iterable[str]:
if isinstance(tag_result, dict):
assert output_format == TagOutputFormats.JSON
yield format_json_tag_result_as_json(tag_result)
return
yield from iter_format_list_tag_result(
tag_result,
output_format=output_format,
expected_tag_result=expected_tag_result,
texts=texts,
features=features,
model_name=model_name
)
def format_tag_result(*args, **kwargs) -> str:
return ''.join(iter_format_tag_result(*args, **kwargs)) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tag_formatter.py | 0.717309 | 0.271206 | tag_formatter.py | pypi |
import logging
import itertools
from functools import partial
from typing import Any, Dict, List, Iterable, Set, Tuple, Union
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import FunctionTransformer
from delft.sequenceLabelling.preprocess import (
FeaturesPreprocessor as DelftFeaturesPreprocessor,
WordPreprocessor as DelftWordPreprocessor,
PAD,
UNK
)
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.utils.progress_logger import logging_tqdm
import sciencebeam_trainer_delft.utils.compat.sklearn # noqa pylint: disable=unused-import
LOGGER = logging.getLogger(__name__)
def to_dict(
value_list_batch: List[list],
feature_indices: Set[int] = None,
exclude_features_indices: Set[int] = None
) -> Iterable[dict]:
# Note: keeping `feature_indices` name for pickle compatibility
# (also matches upstream for `to_dict`)
return (
{
index: value
for index, value in enumerate(value_list)
if (
(not feature_indices or index in feature_indices)
and (not exclude_features_indices or index not in exclude_features_indices)
)
}
for value_list in value_list_batch
)
def to_float_features(
value_list_batch: List[list],
features_indices: Set[int]
) -> Iterable[List[float]]:
return (
[
float(value)
for index, value in enumerate(value_list)
if index in features_indices
]
for value_list in value_list_batch
)
def faster_preprocessor_fit(self: DelftWordPreprocessor, X, y):
chars = {PAD: 0, UNK: 1}
tags = {PAD: 0}
if self.use_char_feature:
temp_chars = {
c
for w in set(itertools.chain(*X))
for c in w
}
sorted_chars = sorted(temp_chars)
sorted_chars_dict = {
c: idx + 2
for idx, c in enumerate(sorted_chars)
}
chars = {**chars, **sorted_chars_dict}
temp_tags = set(itertools.chain(*y))
sorted_tags = sorted(temp_tags)
sorted_tags_dict = {
tag: idx + 1
for idx, tag in enumerate(sorted_tags)
}
tags = {**tags, **sorted_tags_dict}
self.vocab_char = chars
self.vocab_tag = tags
class WordPreprocessor(DelftWordPreprocessor):
# keeping class for pickle compatibility
pass
def iter_batch(iterable: Iterable[T], n: int = 1) -> Iterable[List[T]]:
batch = []
for item in iterable:
batch.append(item)
if len(batch) >= n:
yield batch
batch = []
if batch:
yield batch
class IterableMinMaxScaler(MinMaxScaler):
def fit(self, X, y=None):
batch_size = 1000
for batch in iter_batch(X, batch_size):
self.partial_fit(batch)
def transform(self, X):
return super().transform(list(X))
STATE_ATTRIBUTE_NAMES_BY_TYPE = {
DictVectorizer: ['feature_names_', 'vocabulary_'],
StandardScaler: ['scale_', 'mean_', 'var_', 'n_samples_seen_'],
MinMaxScaler: ['min_', 'scale_', 'data_min_', 'data_max_', 'data_range_', 'n_samples_seen_']
}
STATE_ATTRIBUTE_NAMES_BY_TYPE[IterableMinMaxScaler] = STATE_ATTRIBUTE_NAMES_BY_TYPE[MinMaxScaler]
def _iter_nested_pipeline_steps(steps: List[Tuple[str, Any]]) -> Iterable[Tuple[str, Any]]:
for step_name, step_value in steps:
yield step_name, step_value
if isinstance(step_value, Pipeline):
yield from _iter_nested_pipeline_steps(step_value.steps)
if isinstance(step_value, FeatureUnion):
yield from _iter_nested_pipeline_steps(step_value.transformer_list)
continue
def _find_step_by_name(steps: List[Tuple[str, Any]], name: str):
for step_name, step_value in _iter_nested_pipeline_steps(steps):
if step_name == name:
return step_value
raise ValueError(f'step with name {repr(name)} not found')
def _get_dict_vectorizer_state(vectorizer: DictVectorizer) -> dict:
return {
'vectorizer.feature_names': vectorizer.feature_names_,
'vectorizer.vocabulary': vectorizer.vocabulary_
}
def _get_attributes_state(obj, attribute_names: List[str]) -> dict:
result = {}
for attribute_name in attribute_names:
value = getattr(obj, attribute_name)
if isinstance(value, np.ndarray):
result[attribute_name] = value.tolist()
result[attribute_name + '.is_numpy'] = True
else:
result[attribute_name] = value
return result
def _restore_attributes_state(obj, state: Dict[str, Any]):
for attribute_name, value in state.items():
if '.' in attribute_name:
continue
if state.get(attribute_name + '.is_numpy'):
value = np.asarray(value)
setattr(obj, attribute_name, value)
def _get_pipeline_steps_state(steps: List[Tuple[str, Any]]) -> dict:
result = {}
for step_name, step_value in _iter_nested_pipeline_steps(steps):
state_attribute_names = STATE_ATTRIBUTE_NAMES_BY_TYPE.get(type(step_value))
if not state_attribute_names:
continue
result[step_name] = _get_attributes_state(step_value, state_attribute_names)
return result
def _restore_pipeline_steps_state(steps: List[Tuple[str, Any]], state: dict):
for step_name, step_value in _iter_nested_pipeline_steps(steps):
step_state = state.get(step_name)
if not step_state:
continue
_restore_attributes_state(step_value, step_state)
def _fit_transformer_with_progress_logging(
transformer: TransformerMixin,
X,
logger: logging.Logger,
message_prefix: str,
unit: str,
message_suffx: str = ': '
):
if isinstance(transformer, Pipeline):
steps = transformer.steps
if len(steps) == 1 and isinstance(steps[0][1], FeatureUnion):
feature_union = steps[0][1]
for name, union_transformer in feature_union.transformer_list:
X = logging_tqdm(
iterable=X,
logger=logger,
desc=f'{message_prefix}.{name}{message_suffx}',
unit=unit
)
union_transformer.fit(X)
return
X = logging_tqdm(iterable=X, logger=logger, desc=message_prefix + message_suffx, unit=unit)
transformer.fit(X)
class FeaturesPreprocessor(BaseEstimator, TransformerMixin):
def __init__(
self,
features_indices: Iterable[int] = None,
continuous_features_indices: Iterable[int] = None
):
self.features_indices = features_indices
self.continuous_features_indices = continuous_features_indices
self.features_map_to_index = None
self.pipeline = FeaturesPreprocessor._create_pipeline(
features_indices=features_indices,
continuous_features_indices=continuous_features_indices
)
@staticmethod
def _create_pipeline(
features_indices: Iterable[int] = None,
continuous_features_indices: Iterable[int] = None
):
features_indices_set = None
if features_indices:
features_indices_set = set(features_indices)
continuous_features_indices_set = set(
continuous_features_indices or []
)
to_dict_fn = partial(
to_dict,
feature_indices=features_indices_set,
exclude_features_indices=continuous_features_indices_set
)
pipeline = Pipeline(steps=[
('to_dict', FunctionTransformer(to_dict_fn, validate=False)),
('vectorize', DictVectorizer(sparse=False))
])
if continuous_features_indices_set:
to_float_features_fn = partial(
to_float_features,
features_indices=continuous_features_indices_set
)
continuous_features_pipeline = Pipeline(steps=[
('to_float_features', FunctionTransformer(to_float_features_fn, validate=False)),
('min_max_scalar', IterableMinMaxScaler()),
])
pipeline = Pipeline(steps=[
('union', FeatureUnion([
('continuous', continuous_features_pipeline),
('discreet', pipeline)
]))
])
LOGGER.info('pipeline=%s', pipeline)
return pipeline
@property
def vectorizer(self) -> DictVectorizer:
return _find_step_by_name(self.pipeline.steps, 'vectorize')
@property
def standard_scalar(self) -> StandardScaler:
return _find_step_by_name(self.pipeline.steps, 'standard_scalar')
def __getstate__(self):
return {
**_get_pipeline_steps_state(self.pipeline.steps),
'features_indices': self.features_indices,
'continuous_features_indices': self.continuous_features_indices
}
def __setstate__(self, state):
try:
if 'pipeline' in state:
# original pickle
return super().__setstate__(state)
self.features_indices = state['features_indices']
self.continuous_features_indices = state.get('continuous_features_indices')
self.pipeline = FeaturesPreprocessor._create_pipeline(
features_indices=self.features_indices,
continuous_features_indices=self.continuous_features_indices
)
_restore_pipeline_steps_state(self.pipeline.steps, state)
vectorizer_feature_names = state.get('vectorizer.feature_names')
vectorizer_vocabulary = state.get('vectorizer.vocabulary')
if vectorizer_feature_names is not None:
# restore deprecated state
vectorizer = self.vectorizer
vectorizer.feature_names_ = vectorizer_feature_names
vectorizer.vocabulary_ = vectorizer_vocabulary
except KeyError as exc:
raise KeyError('%r: found %s' % (exc, state.keys())) from exc
return self
def fit(self, X):
flattened_features = [
word_features
for sentence_features in X
for word_features in sentence_features
]
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('flattened_features: %s', flattened_features)
_fit_transformer_with_progress_logging(
self.pipeline,
flattened_features,
logger=LOGGER,
message_prefix='FeaturesPreprocessor.fit',
unit='token-features'
)
# self.pipeline.fit(flattened_features)
vectorizer = self.vectorizer
LOGGER.info('vectorizer.feature_names: %r', vectorizer.feature_names_)
LOGGER.info('vectorizer.vocabulary size: %r', len(vectorizer.vocabulary_))
return self
def transform(self, X, **_):
LOGGER.debug('transform, X: %s', X)
return np.asarray([
self.pipeline.transform(sentence_features)
for sentence_features in X
])
T_FeaturesPreprocessor = Union[FeaturesPreprocessor, DelftFeaturesPreprocessor]
class Preprocessor(WordPreprocessor):
# keeping class for pickle compatibility
pass | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/preprocess.py | 0.742795 | 0.309245 | preprocess.py | pypi |
import argparse
import logging
from typing import Dict, List, Optional, NamedTuple
import keras
import numpy as np
from delft.sequenceLabelling.preprocess import WordPreprocessor
from delft.sequenceLabelling.models import BaseModel
from sciencebeam_trainer_delft.utils.misc import (
parse_comma_separated_str,
parse_dict
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelLoader
from sciencebeam_trainer_delft.sequence_labelling.models import (
get_model
)
LOGGER = logging.getLogger(__name__)
class TransferLearningConfig(NamedTuple):
source_model_path: Optional[str] = None
copy_layers: Optional[Dict[str, str]] = None
copy_preprocessor: bool = False
copy_preprocessor_fields: Optional[List[str]] = None
freeze_layers: Optional[List[str]] = None
class TransferModelWrapper:
def __init__(self, model: BaseModel):
self.model = model
self.keras_model: keras.Model = model.model
self.keras_layers_by_name: Dict[str, keras.layers.Layer] = {
layer.name: layer
for layer in self.keras_model.layers
}
self.layer_names = set(self.keras_layers_by_name.keys())
def get_layer_weights(self, layer_name: str) -> List[np.ndarray]:
return self.keras_layers_by_name[layer_name].get_weights()
def set_layer_weights(self, layer_name: str, weights: List[np.ndarray]):
LOGGER.info('setting weights of layer: %r', layer_name)
LOGGER.debug('setting weights of layer %r to:\n%s', layer_name, weights)
self.keras_layers_by_name[layer_name].set_weights(weights)
def freeze_layer(self, layer_name: str):
LOGGER.info('freezing layer: %r', layer_name)
self.keras_layers_by_name[layer_name].trainable = False
class TransferLearningSource:
def __init__(
self,
transfer_learning_config: TransferLearningConfig,
source_model: BaseModel,
source_preprocessor: WordPreprocessor
):
self.transfer_learning_config = transfer_learning_config
self.source_model = source_model
self.source_preprocessor = source_preprocessor
@staticmethod
def from_config(
transfer_learning_config: Optional[TransferLearningConfig],
download_manager: DownloadManager = None
) -> Optional['TransferLearningSource']:
if not transfer_learning_config:
LOGGER.info('no transfer learning config specified')
return None
if not transfer_learning_config.source_model_path:
LOGGER.info('no transfer learning source model specified')
return None
LOGGER.info('transfer learning config: %s', transfer_learning_config)
model_loader = ModelLoader(download_manager=download_manager)
directory = model_loader.download_model(transfer_learning_config.source_model_path)
source_model_config = model_loader.load_model_config_from_directory(directory)
source_preprocessor = model_loader.load_preprocessor_from_directory(directory)
source_model: BaseModel = get_model(
source_model_config,
source_preprocessor,
ntags=len(source_preprocessor.vocab_tag)
)
model_loader.load_model_from_directory(directory, source_model)
return TransferLearningSource(
transfer_learning_config=transfer_learning_config,
source_model=source_model,
source_preprocessor=source_preprocessor
)
def copy_preprocessor_if_enabled(self) -> Optional[WordPreprocessor]:
if self.transfer_learning_config.copy_preprocessor:
LOGGER.info('copying preprocessor')
return self.source_preprocessor
return None
def apply_preprocessor(self, target_preprocessor: WordPreprocessor):
if not self.transfer_learning_config.copy_preprocessor_fields:
LOGGER.info('no transfer learning preprocessor fields specified')
return
for field_name in self.transfer_learning_config.copy_preprocessor_fields:
LOGGER.info('copying preprocessor field: %r', field_name)
value = getattr(self.source_preprocessor, field_name)
setattr(target_preprocessor, field_name, value)
def apply_weights(self, target_model: BaseModel):
if not self.transfer_learning_config.copy_layers:
LOGGER.info('no transfer learning source layers specified')
return
wrapped_source_model = TransferModelWrapper(self.source_model)
wrapped_target_model = TransferModelWrapper(target_model)
copy_layers_map = self.transfer_learning_config.copy_layers
requested_target_layers = copy_layers_map.keys()
requested_source_layers = copy_layers_map.values()
missing_source_layers = (
set(requested_source_layers) - set(wrapped_source_model.layer_names)
)
if missing_source_layers:
raise ValueError('missing source layers for transfer learning: %s (available: %s)' % (
missing_source_layers, wrapped_source_model.layer_names
))
missing_target_layers = (
set(requested_target_layers) - set(wrapped_target_model.layer_names)
)
if missing_target_layers:
raise ValueError('missing target layers for transfer learning: %s (available: %s)' % (
missing_target_layers, wrapped_target_model.layer_names
))
for target_layer_name, source_layer_name in copy_layers_map.items():
LOGGER.info('copying layer weights: %r -> %r', source_layer_name, target_layer_name)
try:
wrapped_target_model.set_layer_weights(
target_layer_name,
wrapped_source_model.get_layer_weights(source_layer_name)
)
except Exception as exc:
raise RuntimeError(
'failed to copy layer weights (%r -> %r) due to %r' % (
source_layer_name, target_layer_name, exc
)
) from exc
def freeze_model_layers(target_model: BaseModel, layers: Optional[List[str]]):
if not layers:
return
wrapped_target_model = TransferModelWrapper(target_model)
for layer_name in layers:
wrapped_target_model.freeze_layer(layer_name)
def add_transfer_learning_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
'--transfer-source-model-path',
type=str,
help='path to model, that learned layers or parameters should be transfered from'
)
parser.add_argument(
'--transfer-copy-layers',
type=parse_dict,
help='the layers to transfer (mapping from target to source)'
)
parser.add_argument(
'--transfer-copy-preprocessor',
action='store_true',
default=False,
help='copy the whole preprocessor'
)
parser.add_argument(
'--transfer-copy-preprocessor-fields',
type=parse_comma_separated_str,
help='the preprocessor fields to transfer (e.g. "vocab_char")'
)
parser.add_argument(
'--transfer-freeze-layers',
type=parse_comma_separated_str,
help='the layers to freeze'
)
def get_transfer_learning_config_for_parsed_args(
args: argparse.Namespace
) -> TransferLearningConfig:
return TransferLearningConfig(
source_model_path=args.transfer_source_model_path,
copy_layers=args.transfer_copy_layers,
copy_preprocessor=args.transfer_copy_preprocessor,
copy_preprocessor_fields=args.transfer_copy_preprocessor_fields,
freeze_layers=args.transfer_freeze_layers
) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/transfer_learning.py | 0.870721 | 0.190122 | transfer_learning.py | pypi |
import logging
import re
from itertools import islice
from typing import Iterable, List, Tuple
import numpy as np
from delft.sequenceLabelling.reader import _translate_tags_grobid_to_IOB
LOGGER = logging.getLogger(__name__)
# partially copied from delft/sequenceLabelling/reader.py
def iter_load_data_and_labels_crf_lines(
lines: Iterable[str]
) -> Iterable[Tuple[List[str], List[str], List[List[str]]]]:
tokens: List[str] = []
tags: List[str] = []
features: List[List[str]] = []
for line in lines:
line = line.strip()
LOGGER.debug('line: %s', line)
if not line:
if tokens:
yield tokens, tags, features
tokens, tags, features = [], [], []
else:
pieces = re.split(' |\t', line)
token = pieces[0]
tag = pieces[len(pieces)-1]
localFeatures = pieces[1:len(pieces)-1]
tokens.append(token)
tags.append(_translate_tags_grobid_to_IOB(tag))
features.append(localFeatures)
if tokens:
yield tokens, tags, features
def iter_load_data_crf_lines(
lines: Iterable[str]
) -> Iterable[Tuple[List[str], List[List[str]]]]:
tokens: List[str] = []
features: List[List[str]] = []
for line in lines:
line = line.strip()
LOGGER.debug('line: %s', line)
if not line:
if tokens:
yield tokens, features
tokens, features = [], []
else:
pieces = re.split(' |\t', line)
token = pieces[0]
localFeatures = pieces[1:]
tokens.append(token)
features.append(localFeatures)
if tokens:
yield tokens, features
def load_data_and_labels_crf_lines(
lines: Iterable[str],
limit: int = None) -> Tuple[np.array, np.array, np.array]:
"""
Load data, features and label from a CRF matrix string
the format is as follow:
token_0 f0_0 f0_1 ... f0_n label_0
token_1 f1_0 f1_1 ... f1_n label_1
...
token_m fm_0 fm_1 ... fm_n label_m
field separator can be either space or tab
Returns:
tuple(numpy array, numpy array, numpy array): tokens, labels, features
"""
sents = []
labels = []
featureSets = []
documents = iter_load_data_and_labels_crf_lines(lines)
if limit:
LOGGER.info('limiting training data to: %s', limit)
documents = islice(documents, limit)
for tokens, tags, features in documents:
sents.append(tokens)
labels.append(tags)
featureSets.append(features)
# specifying dtype object can significantly reduce the memory consumption
# e.g. for features it could be 20 MB instead of 1 GB
return (
np.asarray(sents, dtype='object'),
np.asarray(labels, dtype='object'),
np.asarray(featureSets, dtype='object')
)
def load_data_crf_lines(
lines: Iterable[str],
limit: int = None) -> Tuple[np.array, np.array]:
"""
Load data, features (no label!) from a CRF matrix file
the format is as follow:
token_0 f0_0 f0_1 ... f0_n
token_1 f1_0 f1_1 ... f1_n
...
token_m fm_0 fm_1 ... fm_n
field separator can be either space or tab
Returns:
tuple(numpy array, numpy array): tokens, features
"""
sents = []
featureSets = []
documents = iter_load_data_crf_lines(lines)
if limit:
LOGGER.info('limiting training data to: %s', limit)
documents = islice(documents, limit)
for tokens, features in documents:
sents.append(tokens)
featureSets.append(features)
# specifying dtype object can significantly reduce the memory consumption
# e.g. for features it could be 20 MB instead of 1 GB
return (
np.asarray(sents, dtype='object'),
np.asarray(featureSets, dtype='object')
)
def load_data_and_labels_crf_file(
filepath: str,
limit: int = None) -> Tuple[np.array, np.array, np.array]:
try:
with open(filepath, 'r', encoding='utf-8') as fp:
return load_data_and_labels_crf_lines(fp, limit=limit)
except Exception as exc:
raise RuntimeError('failed to read file %r' % filepath) from exc
def load_data_crf_string(
crf_string: str,
limit: int = None) -> Tuple[np.array, np.array]:
return load_data_crf_lines(crf_string.splitlines(), limit=limit) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/reader.py | 0.598077 | 0.345436 | reader.py | pypi |
import logging
import os
from typing import NamedTuple, Optional
import numpy as np
from delft.sequenceLabelling.evaluation import (
f1_score,
accuracy_score,
precision_score,
recall_score
)
from delft.sequenceLabelling.trainer import Trainer as _Trainer
from delft.sequenceLabelling.trainer import Scorer as _Scorer
from delft.sequenceLabelling.models import BaseModel
from sciencebeam_trainer_delft.sequence_labelling.utils.types import (
T_Batch_Tokens,
T_Batch_Features,
T_Batch_Labels
)
from sciencebeam_trainer_delft.utils.keras.callbacks import ResumableEarlyStopping
from sciencebeam_trainer_delft.sequence_labelling.evaluation import classification_report
from sciencebeam_trainer_delft.sequence_labelling.config import TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.data_generator import DataGenerator
from sciencebeam_trainer_delft.sequence_labelling.callbacks import ModelWithMetadataCheckpoint
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelSaver
LOGGER = logging.getLogger(__name__)
def get_callbacks(
model_saver: ModelSaver,
log_dir: str = None,
log_period: int = 1,
valid: tuple = (),
early_stopping: bool = True,
early_stopping_patience: int = 5,
initial_meta: Optional[dict] = None,
meta: dict = None
):
"""
Get callbacks.
Args:
log_dir (str): the destination to save logs
valid (tuple): data for validation.
early_stopping (bool): whether to use early stopping.
Returns:
list: list of callbacks
"""
callbacks = []
if valid:
callbacks.append(Scorer(*valid)) # pylint: disable=no-value-for-parameter
if early_stopping:
# Note: ensure we are not restoring weights
# as that would affect saving the model.
# The saving checkpoint need to be last,
# in order to save the state meta data of this checkpoint.
callbacks.append(ResumableEarlyStopping(
initial_meta=initial_meta,
monitor='f1',
patience=early_stopping_patience,
mode='max',
restore_best_weights=False
))
if log_dir:
epoch_dirname = 'epoch-{epoch:05d}'
assert model_saver
save_callback = ModelWithMetadataCheckpoint(
os.path.join(log_dir, epoch_dirname),
period=log_period,
model_saver=model_saver,
monitor='f1',
meta=meta
)
callbacks.append(save_callback)
return callbacks
class PredictedResults(NamedTuple):
y_pred: T_Batch_Labels
y_true: T_Batch_Labels
def get_model_results(model, valid_batches: list, preprocessor=None) -> PredictedResults:
valid_steps = len(valid_batches)
for i, (data, label) in enumerate(valid_batches):
if i == valid_steps:
break
y_true_batch = label
y_true_batch = np.argmax(y_true_batch, -1)
sequence_lengths = data[-1] # shape of (batch_size, 1)
sequence_lengths = np.reshape(sequence_lengths, (-1,))
y_pred_batch = model.predict_on_batch(data)
y_pred_batch = np.argmax(y_pred_batch, -1)
y_pred_batch = [
preprocessor.inverse_transform(y[:l]) for y, l in zip(y_pred_batch, sequence_lengths)
]
y_true_batch = [
preprocessor.inverse_transform(y[:l]) for y, l in zip(y_true_batch, sequence_lengths)
]
if i == 0:
y_pred = y_pred_batch
y_true = y_true_batch
else:
y_pred = y_pred + y_pred_batch
y_true = y_true + y_true_batch
return PredictedResults(y_pred=y_pred, y_true=y_true)
class Scorer(_Scorer):
def on_epoch_end(self, epoch: int, logs: dict = None):
prediction_results = get_model_results(
self.model, self.valid_batches, preprocessor=self.p
)
y_pred = prediction_results.y_pred
y_true = prediction_results.y_true
f1 = f1_score(y_true, y_pred)
print("\tf1 (micro): {:04.2f}".format(f1 * 100))
if self.evaluation:
self.accuracy = accuracy_score(y_true, y_pred)
self.precision = precision_score(y_true, y_pred)
self.recall = recall_score(y_true, y_pred)
self.report = classification_report(y_true, y_pred, digits=4)
print(self.report)
# save eval
if logs:
logs['f1'] = f1
self.f1 = f1
class Trainer(_Trainer):
def __init__(
self,
*args,
model_saver: ModelSaver,
training_config: TrainingConfig,
multiprocessing: bool = True,
**kwargs):
self.model_saver = model_saver
self.multiprocessing = multiprocessing
self.model: Optional[BaseModel] = None
super().__init__(*args, training_config=training_config, **kwargs)
def train( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid, y_valid,
features_train: np.array = None,
features_valid: np.array = None):
assert self.model is not None
self.model.summary()
if self.model_config.use_crf:
self.model.compile(
loss=self.model.crf.loss,
optimizer='adam'
)
else:
self.model.compile(
loss='categorical_crossentropy',
optimizer='adam'
)
self.model = self.train_model(
self.model, x_train, y_train, x_valid, y_valid,
self.training_config.max_epoch,
features_train=features_train, features_valid=features_valid
)
def get_meta(self):
training_config_meta = vars(self.training_config).copy()
try:
training_config_meta.pop('initial_meta')
except KeyError:
pass
return {
'training_config': training_config_meta
}
def create_data_generator(self, *args, name_suffix: str, **kwargs) -> DataGenerator:
return DataGenerator( # type: ignore
*args,
batch_size=self.training_config.batch_size,
input_window_stride=self.training_config.input_window_stride,
stateful=self.model_config.stateful,
preprocessor=self.preprocessor,
additional_token_feature_indices=self.model_config.additional_token_feature_indices,
text_feature_indices=self.model_config.text_feature_indices,
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
char_embed_size=self.model_config.char_embedding_size,
is_deprecated_padded_batch_text_list_enabled=(
self.model_config.is_deprecated_padded_batch_text_list_enabled
),
max_sequence_length=self.model_config.max_sequence_length,
embeddings=self.embeddings,
name='%s.%s' % (self.model_config.model_name, name_suffix),
**kwargs
)
def train_model( # pylint: disable=arguments-differ
self, local_model,
x_train, y_train,
x_valid=None, y_valid=None,
max_epoch: int = 50,
features_train: np.array = None,
features_valid: np.array = None):
""" parameter model local_model must be compiled before calling this method
this model will be returned with trained weights """
# todo: if valid set if None, create it as random segment of the shuffled train set
if self.preprocessor.return_features and features_train is None:
raise ValueError('features required')
if self.training_config.early_stop:
training_generator = self.create_data_generator(
x_train, y_train,
shuffle=True,
features=features_train,
name_suffix='training_generator'
)
validation_generator = self.create_data_generator(
x_valid, y_valid,
shuffle=False,
features=features_valid,
name_suffix='validation_generator'
)
callbacks = get_callbacks(
model_saver=self.model_saver,
log_dir=self.checkpoint_path,
log_period=self.training_config.checkpoint_epoch_interval,
early_stopping=True,
early_stopping_patience=self.training_config.patience,
initial_meta=self.training_config.initial_meta,
valid=(validation_generator, self.preprocessor),
meta=self.get_meta()
)
else:
x_train = np.concatenate((x_train, x_valid), axis=0)
y_train = np.concatenate((y_train, y_valid), axis=0)
features_all = None
if features_train is not None:
features_all = np.concatenate((features_train, features_valid), axis=0)
training_generator = self.create_data_generator(
x_train, y_train,
shuffle=True,
features=features_all,
name_suffix='training_generator'
)
callbacks = get_callbacks(
model_saver=self.model_saver,
log_dir=self.checkpoint_path,
early_stopping=False,
meta=self.get_meta()
)
nb_workers = 6
multiprocessing = self.multiprocessing
# multiple workers will not work with ELMo due to GPU memory limit (with GTX 1080Ti 11GB)
if self.embeddings and (self.embeddings.use_ELMo or self.embeddings.use_BERT):
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
# dump token context independent data for train set, done once for the training
local_model.fit_generator(
generator=training_generator,
initial_epoch=self.training_config.initial_epoch or 0,
epochs=max_epoch,
use_multiprocessing=multiprocessing,
workers=nb_workers,
callbacks=callbacks
)
return local_model
def train_nfold( # pylint: disable=arguments-differ
self,
x_train: T_Batch_Tokens,
y_train: T_Batch_Labels,
x_valid: Optional[T_Batch_Tokens] = None,
y_valid: Optional[T_Batch_Labels] = None,
features_train: Optional[T_Batch_Features] = None,
features_valid: Optional[T_Batch_Features] = None
):
""" n-fold training for the instance model
the n models are stored in self.models, and self.model left unset at this stage """
fold_count = len(self.models)
fold_size = len(x_train) // fold_count
for fold_id in range(0, fold_count):
print(
'\n------------------------ fold %s--------------------------------------'
% fold_id
)
if x_valid is None:
# segment train and valid
fold_start = fold_size * fold_id
fold_end = fold_start + fold_size
if fold_id == fold_size - 1:
fold_end = len(x_train)
train_x = np.concatenate([x_train[:fold_start], x_train[fold_end:]])
train_y = np.concatenate([y_train[:fold_start], y_train[fold_end:]])
val_x = x_train[fold_start:fold_end]
val_y = y_train[fold_start:fold_end]
if features_train is not None:
train_features = np.concatenate(
[features_train[:fold_start], features_train[fold_end:]]
)
val_features = features_train[fold_start:fold_end]
else:
train_features = None
val_features = None
else:
# reuse given segmentation
train_x = x_train
train_y = y_train
train_features = features_train
val_x = x_valid
val_y = y_valid
val_features = features_valid
foldModel = self.models[fold_id]
foldModel.summary()
if self.model_config.use_crf:
foldModel.compile(
loss=foldModel.crf.loss,
optimizer='adam'
)
else:
foldModel.compile(
loss='categorical_crossentropy',
optimizer='adam'
)
foldModel = self.train_model(
foldModel,
train_x,
train_y,
val_x,
val_y,
features_train=train_features,
features_valid=val_features,
max_epoch=self.training_config.max_epoch
)
self.models[fold_id] = foldModel | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/trainer.py | 0.873728 | 0.280898 | trainer.py | pypi |
import logging
import json
from typing import List, Type, Union
from keras.models import Model
from keras.layers.merge import Concatenate
from keras.layers import (
Dense, LSTM, Bidirectional, Embedding, Input, Dropout,
TimeDistributed
)
import delft.sequenceLabelling.wrapper
from delft.utilities.layers import ChainCRF
from delft.sequenceLabelling.models import BaseModel
from delft.sequenceLabelling.models import get_model as _get_model, BidLSTM_CRF_FEATURES
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig
LOGGER = logging.getLogger(__name__)
class CustomModel(BaseModel):
def __init__(
self, config, ntags,
require_casing: bool = False,
use_crf: bool = False,
supports_features: bool = False,
require_features_indices_input: bool = False,
stateful: bool = False):
super().__init__(config, ntags)
self.require_casing = require_casing
self.use_crf = use_crf
self.supports_features = supports_features
self.require_features_indices_input = require_features_indices_input
self.stateful = stateful
def _concatenate_inputs(inputs: list, **kwargs):
if len(inputs) == 1:
return inputs[0]
return Concatenate(**kwargs)(inputs)
# renamed copy of BidLSTM_CRF to demonstrate a custom model
class CustomBidLSTM_CRF(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
def __init__(self, config: ModelConfig, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
stateful=config.stateful
)
stateful = self.stateful
# stateful RNNs require the batch size to be passed in
input_batch_size = config.batch_size if stateful else None
model_inputs = []
lstm_inputs = []
# build input, directly feed with word embedding by the data generator
word_input = Input(
shape=(None, config.word_embedding_size),
batch_shape=(input_batch_size, None, config.word_embedding_size),
name='word_input'
)
model_inputs.append(word_input)
lstm_inputs.append(word_input)
# build character based embedding
char_input = Input(
shape=(None, config.max_char_length),
batch_shape=(input_batch_size, None, config.max_char_length),
dtype='int32',
name='char_input'
)
model_inputs.append(char_input)
if config.char_embedding_size:
assert config.char_vocab_size, 'config.char_vocab_size required'
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=config.char_input_mask_zero,
name='char_embeddings_embedding'
), name='char_embeddings')(char_input)
chars = TimeDistributed(
Bidirectional(LSTM(
config.num_char_lstm_units,
dropout=config.char_input_dropout,
recurrent_dropout=config.char_lstm_dropout,
return_sequences=False
)),
name='char_lstm'
)(char_embeddings)
lstm_inputs.append(chars)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
LOGGER.debug('model, config.use_features: %s', config.use_features)
if config.use_features:
LOGGER.info('model using features')
assert config.max_feature_size > 0
features_input = Input(
batch_shape=(input_batch_size, None, config.max_feature_size),
name='features_input'
)
model_inputs.append(features_input)
features = features_input
if config.features_embedding_size:
features = TimeDistributed(Dense(
config.features_embedding_size,
name='features_embeddings_dense'
), name='features_embeddings')(features)
LOGGER.info(
'word_input=%s, chars=%s, features=%s',
word_input, chars, features
)
lstm_inputs.append(features)
x = _concatenate_inputs(lstm_inputs, name='word_lstm_input')
x = Dropout(config.dropout, name='word_lstm_input_dropout')(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout,
stateful=stateful,
), name='word_lstm')(x)
x = Dropout(config.dropout, name='word_lstm_output_dropout')(x)
x = Dense(
config.num_word_lstm_units, name='word_lstm_dense', activation='tanh'
)(x)
x = Dense(ntags, name='dense_ntags')(x)
self.crf = ChainCRF(name='crf')
pred = self.crf(x)
model_inputs.append(length_input)
self.model = Model(inputs=model_inputs, outputs=[pred])
self.config = config
# copied from
# https://github.com/kermitt2/delft/blob/d2f8390ac01779cab959f57aa6e1a8f1d2723505/
# delft/sequenceLabelling/models.py
class CustomBidLSTM_CRF_FEATURES(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling which create features
from additional orthogonal information generated by GROBID.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
name = 'CustomBidLSTM_CRF_FEATURES'
def __init__(self, config, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
require_features_indices_input=True
)
# build input, directly feed with word embedding by the data generator
word_input = Input(shape=(None, config.word_embedding_size), name='word_input')
# build character based embedding
char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=True,
name='char_embeddings'
))(char_input)
chars = TimeDistributed(Bidirectional(LSTM(
config.num_char_lstm_units,
return_sequences=False
)))(char_embeddings)
# layout features input and embeddings
features_input = Input(
shape=(None, len(config.features_indices)),
dtype='float32',
name='features_input'
)
assert config.features_vocabulary_size, "config.features_vocabulary_size required"
assert config.features_embedding_size, "config.features_embedding_size required"
# features_vocabulary_size (default 12) * number_of_features + 1
# (the zero is reserved for masking / padding)
features_embedding = TimeDistributed(
Embedding(
input_dim=config.features_vocabulary_size * len(config.features_indices) + 1,
output_dim=config.features_embedding_size,
mask_zero=True,
trainable=True,
name='features_embedding'),
name="features_embedding_td_1"
)(features_input)
assert config.features_lstm_units, "config.features_lstm_units required"
features_embedding_bd = TimeDistributed(
Bidirectional(LSTM(config.features_lstm_units, return_sequences=False)),
name="features_embedding_td_2"
)(features_embedding)
features_embedding_out = Dropout(config.dropout)(features_embedding_bd)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
x = Concatenate()([word_input, chars, features_embedding_out])
x = Dropout(config.dropout)(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout
))(x)
x = Dropout(config.dropout)(x)
x = Dense(config.num_word_lstm_units, activation='tanh')(x)
x = Dense(ntags)(x)
self.crf = ChainCRF()
pred = self.crf(x)
self.model = Model(
inputs=[word_input, char_input, features_input, length_input],
outputs=[pred]
)
self.config = config
DEFAULT_MODEL_NAMES = [
'BidLSTM_CRF', 'BidLSTM_CNN', 'BidLSTM_CNN_CRF', 'BidGRU_CRF', 'BidLSTM_CRF_CASING',
BidLSTM_CRF_FEATURES.name
]
MODEL_MAP = {
'CustomBidLSTM_CRF': CustomBidLSTM_CRF,
CustomBidLSTM_CRF_FEATURES.name: CustomBidLSTM_CRF_FEATURES
}
IMPLICIT_MODEL_CONFIG_PROPS_MAP = {
BidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
),
CustomBidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
)
}
def register_model(name: str, model_class: Type[CustomModel]):
MODEL_MAP[name] = model_class
def updated_implicit_model_config_props(model_config: ModelConfig):
implicit_model_config_props = IMPLICIT_MODEL_CONFIG_PROPS_MAP.get(model_config.model_type)
if not implicit_model_config_props:
return
for key, value in implicit_model_config_props.items():
setattr(model_config, key, value)
def _create_model(
model_class: Type[CustomModel],
config: ModelConfig,
ntags=None) -> CustomModel:
return model_class(config, ntags=ntags)
def is_model_stateful(model: Union[BaseModel, CustomModel]) -> bool:
try:
return model.stateful
except AttributeError:
return False
def get_model(config, preprocessor, ntags=None):
LOGGER.info(
'get_model, config: %s, ntags=%s',
json.dumps(vars(config), indent=4),
ntags
)
model_class = MODEL_MAP.get(config.model_type)
if not model_class:
return _get_model(config, preprocessor, ntags=ntags)
model = _create_model(model_class, config, ntags=ntags)
config.use_crf = model.use_crf
preprocessor.return_casing = model.require_casing
if config.use_features and not model.supports_features:
LOGGER.warning('features enabled but not supported by model (disabling)')
config.use_features = False
preprocessor.return_features = config.use_features
return model
def get_model_names() -> List[str]:
return sorted(set(DEFAULT_MODEL_NAMES) | set(MODEL_MAP.keys()))
def patch_get_model():
delft.sequenceLabelling.wrapper.get_model = get_model | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/models.py | 0.912782 | 0.230432 | models.py | pypi |
import logging
from collections import Counter
from itertools import zip_longest
from typing import List, Optional
import numpy as np
from delft.utilities.Tokenizer import tokenizeAndFilterSimple
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform import (
DatasetTransformer
)
from sciencebeam_trainer_delft.sequence_labelling.typing import (
T_Batch_Tokens,
T_Batch_Features,
T_Batch_Labels
)
LOGGER = logging.getLogger(__name__)
NBSP = '\u00A0'
class LineStatus:
# replicate line status used in GROBID
LINESTART = 'LINESTART'
LINEIN = 'LINEIN'
LINEEND = 'LINEEND'
def strip_tag_prefix(tag: Optional[str]) -> str:
if tag and (tag.startswith('B-') or tag.startswith('I-')):
return tag[2:]
return tag or ''
def get_next_transform_token_y(token_y: str) -> str:
if token_y and token_y.startswith('B-'):
return 'I-' + token_y[2:]
return token_y
def inverse_transform_token_y(unrolled_token_y: List[str], previous_tag: Optional[str]) -> str:
tags_with_stripped_prefix = [strip_tag_prefix(tag) for tag in unrolled_token_y]
tag_counts = Counter(tags_with_stripped_prefix)
top_tag = tag_counts.most_common(1)[0][0]
LOGGER.debug('tag_counts: %s, top_tag=%r', tag_counts, top_tag)
if f'B-{top_tag}' in unrolled_token_y or f'I-{top_tag}' in unrolled_token_y:
if top_tag != strip_tag_prefix(previous_tag):
return f'B-{top_tag}'
return f'I-{top_tag}'
return top_tag
def get_line_status(
token_index: int,
line_length: int
):
if token_index == 0:
return LineStatus.LINESTART
if token_index == line_length - 1:
return LineStatus.LINEEND
return LineStatus.LINEIN
def get_transformed_features(
token_features: List[str],
unrolled_token_index: int,
unrolled_tokens_length: int,
line_status_enabled: bool = True
):
if not line_status_enabled:
return token_features
return list(token_features) + [get_line_status(unrolled_token_index, unrolled_tokens_length)]
class UnrollingTextFeatureDatasetTransformer(DatasetTransformer):
def __init__(
self,
unroll_text_feature_index: int,
used_features_indices: Optional[List[int]] = None
):
# Note: used_features_indices is used to determine, whether to add the line status
# (i.e. no need to add it if it is not used)
self.unroll_text_feature_index = unroll_text_feature_index
self.used_features_indices = used_features_indices
self._saved_x: Optional[T_Batch_Tokens] = None
self._saved_features: Optional[T_Batch_Features] = None
self._unrolled_token_lengths: Optional[List[List[int]]] = None
def tokenize(self, text: str) -> List[str]:
return tokenizeAndFilterSimple(text.replace(NBSP, ' '))
def fit_transform(
self,
x: T_Batch_Tokens,
y: Optional[T_Batch_Labels],
features: Optional[T_Batch_Features]
):
assert features is not None
x_transformed = []
_y_transformed = []
features_transformed = []
line_status_enabled: Optional[bool] = None
unrolled_token_lengths = []
for y_doc, features_doc in zip_longest(
y if y is not None else [],
features,
fillvalue=[]
):
x_doc_transformed = []
y_doc_transformed = []
features_doc_transformed = []
unrolled_token_lengths_doc = []
for features_row, y_row in zip_longest(features_doc, y_doc, fillvalue=None):
text = features_row[self.unroll_text_feature_index]
if line_status_enabled is None:
line_status_enabled = (
self.used_features_indices is not None
and len(features_row) in self.used_features_indices
)
tokens = self.tokenize(text)
assert tokens
assert y is None or y_row is not None
tokens_length = len(tokens)
for unrolled_token_index, token in enumerate(tokens):
x_doc_transformed.append(token)
y_doc_transformed.append(y_row)
features_doc_transformed.append(
get_transformed_features(
features_row,
unrolled_token_index=unrolled_token_index,
unrolled_tokens_length=tokens_length,
line_status_enabled=line_status_enabled
)
)
y_row = get_next_transform_token_y(y_row)
unrolled_token_lengths_doc.append(tokens_length)
x_transformed.append(x_doc_transformed)
_y_transformed.append(y_doc_transformed)
features_transformed.append(features_doc_transformed)
unrolled_token_lengths.append(unrolled_token_lengths_doc)
LOGGER.debug('x_transformed: %s', x_transformed)
LOGGER.debug('y_transformed: %s', _y_transformed)
LOGGER.debug('features_transformed: %s', features_transformed)
y_transformed = _y_transformed if y is not None else None
self._saved_x = x
self._saved_features = features
self._unrolled_token_lengths = unrolled_token_lengths
if isinstance(x, np.ndarray):
x_transformed = np.asarray(x_transformed, dtype='object')
if isinstance(y, np.ndarray):
y_transformed = np.asarray(y_transformed, dtype='object')
if isinstance(features, np.ndarray):
features_transformed = np.asarray(features_transformed, dtype='object')
return x_transformed, y_transformed, features_transformed
def inverse_transform(
self,
x: Optional[T_Batch_Tokens],
y: Optional[T_Batch_Labels],
features: Optional[T_Batch_Features]
):
if x is not None:
x = self._saved_x
if features is not None:
features = self._saved_features
inverse_transformed_y = None
if y is not None:
inverse_transformed_y = []
assert self._saved_x is not None
assert self._saved_features is not None
assert self._unrolled_token_lengths is not None
for x_doc, features_doc, y_doc, unrolled_token_lengths_doc in zip(
self._saved_x, self._saved_features, y, self._unrolled_token_lengths
):
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('unrolled_token_lengths_doc: %s', unrolled_token_lengths_doc)
LOGGER.debug('y_doc: %s', y_doc)
LOGGER.debug('xy_doc: %s', list(zip(x_doc, y_doc)))
index = 0
inverse_transformed_y_doc = []
previous_tag = None
for x_token, features_token, unrolled_token_length in zip(
x_doc, features_doc, unrolled_token_lengths_doc
):
if index >= len(y_doc):
# y_doc may be truncated using max sequence length
break
y_tokens = y_doc[index:index + unrolled_token_length]
if LOGGER.isEnabledFor(logging.DEBUG):
tokens = self.tokenize(features_token[self.unroll_text_feature_index])
LOGGER.debug(
'inverse transforming: indices=[%d:%d], x=%r, f=%r, tokens_y=%r',
index, index + unrolled_token_length,
x_token, features_token, list(zip_longest(tokens, y_tokens))
)
y_token = inverse_transform_token_y(y_tokens, previous_tag=previous_tag)
previous_tag = y_token
inverse_transformed_y_doc.append(y_token)
index += unrolled_token_length
inverse_transformed_y.append(inverse_transformed_y_doc)
if isinstance(y, np.ndarray):
inverse_transformed_y = np.asarray(inverse_transformed_y, dtype='object')
return x, inverse_transformed_y, features | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/dataset_transform/unroll_transform.py | 0.821868 | 0.254257 | unroll_transform.py | pypi |
import logging
import tempfile
import os
from pathlib import Path
from typing import Iterable, IO, List, Optional, Tuple
import numpy as np
from delft.sequenceLabelling.reader import (
_translate_tags_grobid_to_IOB as translate_tags_grobid_to_IOB
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import ClassificationResult
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.io import copy_file
from sciencebeam_trainer_delft.sequence_labelling.config import TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import (
WapitiModel,
WapitiWrapper,
format_feature_line
)
LOGGER = logging.getLogger(__name__)
def translate_tags_IOB_to_grobid(tag: str) -> str:
"""
Convert labels from IOB2 to the ones used by GROBID (expected by the wapiti model)
"""
if tag == 'O':
# outside
return '<other>'
elif tag.startswith('B-'):
# begin
return 'I-' + tag[2:]
elif tag.startswith('I-'):
# inside
return '' + tag[2:]
else:
return tag
def iter_doc_formatted_input_data(
x_doc: np.array, features_doc: np.array) -> Iterable[str]:
for x_token, f_token in zip(x_doc, features_doc):
try:
yield format_feature_line([x_token] + list(f_token))
except TypeError as error:
raise RuntimeError(
'failed to concatenate: x=<%s>, f=<%s>' % (x_token, f_token)
) from error
# blank lines to mark the end of the document
yield ''
yield ''
def iter_formatted_input_data(
x: np.array, features: np.array) -> Iterable[str]:
return (
line + '\n'
for x_doc, f_doc in zip(x, features)
for line in iter_doc_formatted_input_data(x_doc, f_doc)
)
def write_wapiti_input_data(fp: IO, x: np.array, features: np.array):
fp.writelines(iter_formatted_input_data(
x, features
))
def iter_read_tagged_result(fp: IO) -> Iterable[List[Tuple[str, str]]]:
token_and_label_pairs: List[Tuple[str, str]] = []
for line in fp:
LOGGER.debug('line: %r', line)
line = line.rstrip()
if not line:
if token_and_label_pairs:
yield token_and_label_pairs
token_and_label_pairs = []
continue
values = line.replace('\t', ' ').split(' ')
if len(values) < 2:
raise ValueError('should have multiple values, but got: [%s]' % line)
token_and_label_pairs.append((
values[0],
translate_tags_grobid_to_IOB(values[-1])
))
if token_and_label_pairs:
yield token_and_label_pairs
def convert_wapiti_model_result_to_document_tagged_result(
x_doc: List[str],
wapiti_model_result: List[List[str]]) -> List[Tuple[str, str]]:
return [
(
x_token,
translate_tags_grobid_to_IOB(result_token[-1])
)
for x_token, result_token in zip(x_doc, wapiti_model_result)
]
class WapitiModelAdapter:
def __init__(self, wapiti_wrapper: WapitiWrapper, model_file_path: str, model_path: str = None):
self.wapiti_wrapper = wapiti_wrapper
self.model_file_path = model_file_path
self.model_path = model_path
self._wapiti_model: Optional[WapitiModel] = None
@property
def wapiti_model(self) -> WapitiModel:
if self._wapiti_model is not None:
return self._wapiti_model
wapiti_model = self.wapiti_wrapper.load_model(self.model_file_path)
self._wapiti_model = wapiti_model
return wapiti_model
@staticmethod
def load_from(
model_path: str,
download_manager: DownloadManager,
wapiti_binary_path: str = None) -> 'WapitiModelAdapter':
model_file_path = os.path.join(model_path, 'model.wapiti.gz')
local_model_file_path = None
try:
local_model_file_path = download_manager.download_if_url(model_file_path)
except FileNotFoundError:
pass
if not local_model_file_path or not os.path.isfile(str(local_model_file_path)):
model_file_path = os.path.splitext(model_file_path)[0]
local_model_file_path = download_manager.download_if_url(model_file_path)
LOGGER.debug('local_model_file_path: %s', local_model_file_path)
if local_model_file_path.endswith('.gz'):
local_uncompressed_file_path = os.path.splitext(local_model_file_path)[0]
copy_file(local_model_file_path, local_uncompressed_file_path, overwrite=False)
local_model_file_path = local_uncompressed_file_path
return WapitiModelAdapter(
WapitiWrapper(
wapiti_binary_path=wapiti_binary_path
),
model_file_path=local_model_file_path,
model_path=model_path
)
def _get_model_name(self) -> str:
return os.path.basename(os.path.dirname(self.model_file_path))
def iter_tag_using_model(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
# Note: this method doesn't currently seem to work reliable and needs to be investigated
# The evaluation always shows zero.
assert not output_format, 'output_format not supported'
for x_doc, f_doc in zip(x, features):
LOGGER.debug('x_doc=%s, f_doc=%s', x_doc, f_doc)
result = self.wapiti_model.label_features([
[x_token] + list(f_token)
for x_token, f_token in zip(x_doc, f_doc)
])
yield convert_wapiti_model_result_to_document_tagged_result(
x_doc,
result
)
def iter_tag_using_wrapper(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
assert not output_format, 'output_format not supported'
with tempfile.TemporaryDirectory(suffix='wapiti') as temp_dir:
data_path = Path(temp_dir).joinpath('input.data')
output_data_path = Path(temp_dir).joinpath('output.data')
with data_path.open(mode='w') as fp:
write_wapiti_input_data(
fp, x=x, features=features
)
self.wapiti_wrapper.label(
model_path=self.model_file_path,
data_path=str(data_path),
output_data_path=str(output_data_path),
output_only_labels=False
)
with output_data_path.open(mode='r') as output_data_fp:
yield from iter_read_tagged_result(output_data_fp)
def iter_tag(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
return self.iter_tag_using_wrapper(x, features, output_format)
def tag(
self,
x: np.array,
features: np.array,
output_format: str = None) -> List[List[Tuple[str, str]]]:
assert not output_format, 'output_format not supported'
return list(self.iter_tag(x, features))
def eval(self, x_test, y_test, features: np.array = None):
self.eval_single(x_test, y_test, features=features)
@property
def model_summary_props(self) -> dict:
return {
'model_type': 'wapiti'
}
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
tag_result = self.tag(x_test, features)
y_true = [
y_token
for y_doc in y_test
for y_token in y_doc
]
y_pred = [
tag_result_token[-1]
for tag_result_doc in tag_result
for tag_result_token in tag_result_doc
]
return ClassificationResult(
y_pred=y_pred,
y_true=y_true
)
def eval_single(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
classification_result = self.get_evaluation_result(
x_test=x_test,
y_test=y_test,
features=features
)
print(classification_result.get_formatted_report(digits=4))
def iter_doc_formatted_training_data(
x_doc: np.array, y_doc: np.array, features_doc: np.array) -> Iterable[str]:
for x_token, y_token, f_token in zip(x_doc, y_doc, features_doc):
yield format_feature_line([x_token] + f_token + [translate_tags_IOB_to_grobid(y_token)])
# blank lines to mark the end of the document
yield ''
yield ''
def iter_formatted_training_data(
x: np.array, y: np.array, features: np.array) -> Iterable[str]:
return (
line + '\n'
for x_doc, y_doc, f_doc in zip(x, y, features)
for line in iter_doc_formatted_training_data(x_doc, y_doc, f_doc)
)
def write_wapiti_train_data(fp: IO, x: np.array, y: np.array, features: np.array):
fp.writelines(iter_formatted_training_data(
x, y, features
))
class WapitiModelTrainAdapter:
def __init__(
self,
model_name: str,
template_path: str,
temp_model_path: str,
max_epoch: int,
download_manager: DownloadManager,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
self.model_name = model_name
self.template_path = template_path
self.temp_model_path = temp_model_path
self.max_epoch = max_epoch
self.download_manager = download_manager
self.gzip_enabled = gzip_enabled
self.wapiti_binary_path = wapiti_binary_path
self.wapiti_train_args = wapiti_train_args
self._model_adapter: Optional[WapitiModelAdapter] = None
# additional properties to keep "compatibility" with wrapper.Sequence
self.log_dir = None
self.model_path = None
self.training_config = TrainingConfig(initial_epoch=0)
def train(
self,
x_train: np.array,
y_train: np.array,
x_valid: np.array = None,
y_valid: np.array = None,
features_train: np.array = None,
features_valid: np.array = None):
local_template_path = self.download_manager.download_if_url(self.template_path)
LOGGER.info('local_template_path: %s', local_template_path)
if not self.temp_model_path:
self.temp_model_path = '/tmp/model.wapiti'
with tempfile.TemporaryDirectory(suffix='wapiti') as temp_dir:
data_path = Path(temp_dir).joinpath('train.data')
with data_path.open(mode='w') as fp:
write_wapiti_train_data(
fp, x=x_train, y=y_train, features=features_train
)
if x_valid is not None:
write_wapiti_train_data(
fp, x=x_valid, y=y_valid, features=features_valid
)
WapitiWrapper(wapiti_binary_path=self.wapiti_binary_path).train(
data_path=str(data_path),
output_model_path=self.temp_model_path,
template_path=local_template_path,
max_iter=self.max_epoch,
**(self.wapiti_train_args or {})
)
LOGGER.info('wapiti model trained: %s', self.temp_model_path)
def get_model_adapter(self) -> WapitiModelAdapter:
if self._model_adapter is not None:
return self._model_adapter
assert self.temp_model_path, "temp_model_path required"
model_adapter = WapitiModelAdapter.load_from(
os.path.dirname(self.temp_model_path),
download_manager=self.download_manager,
wapiti_binary_path=self.wapiti_binary_path
)
self._model_adapter = model_adapter
return model_adapter
@property
def last_checkpoint_path(self) -> Optional[str]:
return None
@property
def model_summary_props(self) -> dict:
return self.get_model_adapter().model_summary_props
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
return self.get_model_adapter().get_evaluation_result(
x_test, y_test, features=features
)
def eval(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
self.get_model_adapter().eval(
x_test, y_test, features=features
)
def get_model_output_path(self, output_path: str = None) -> str:
assert output_path, "output_path required"
return os.path.join(output_path, self.model_name)
def save(self, output_path: str = None):
model_output_path = self.get_model_output_path(output_path)
assert self.temp_model_path, "temp_model_path required"
if not Path(self.temp_model_path).exists():
raise FileNotFoundError("temp_model_path does not exist: %s" % self.temp_model_path)
model_file_path = os.path.join(model_output_path, 'model.wapiti')
if self.gzip_enabled:
model_file_path += '.gz'
LOGGER.info('saving to %s', model_file_path)
copy_file(self.temp_model_path, model_file_path) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/engines/wapiti_adapters.py | 0.690768 | 0.297062 | wapiti_adapters.py | pypi |
import logging
import threading
import os
import sys
from collections import Counter
from itertools import islice
from multiprocessing import cpu_count
from typing import IO, List, Iterable, Optional, cast
import subprocess
LOGGER = logging.getLogger(__name__)
DEFAULT_STOP_EPSILON_VALUE = '0.00001'
DEFAULT_STOP_WINDOW_SIZE = 20
DEFAULT_INVALID_CHARACTER_PLACEHOLDER = '?'
INVAID_CHARACTER_START_ORD = 0x6EE80
def format_feature_line(feature_line: List[str]) -> str:
return '\t'.join(feature_line)
def replace_invalid_characters(text: str, placeholder: str = DEFAULT_INVALID_CHARACTER_PLACEHOLDER):
return ''.join((
c if ord(c) < INVAID_CHARACTER_START_ORD else placeholder
for c in text
))
def lines_to_log(logger: logging.Logger, level: int, message: str, lines: Iterable[str]):
LOGGER.debug('lines: %s', lines)
for line in lines:
if isinstance(line, bytes):
line = line.decode('utf-8')
line = line.rstrip()
logger.log(level, message, line)
class WapitiModel:
def __init__(self, process: subprocess.Popen):
self.process = process
@property
def process_stdin(self) -> IO:
stdin = self.process.stdin
assert stdin
return stdin
@property
def process_stdout(self) -> IO:
stdout = self.process.stdout
assert stdout
return stdout
def iter_read_lines(self) -> Iterable[str]:
while self.process.poll() is None:
line = self.process_stdout.readline().decode('utf-8').rstrip()
LOGGER.debug('read line: %s', line)
yield line
def iter_label(self, data: str) -> Iterable[str]:
self.process_stdin.write((data + '\n\n\n').encode('utf-8'))
self.process_stdin.flush()
yield from self.iter_read_lines()
def label_lines(self, lines: List[str], clean_input: bool = False) -> List[str]:
LOGGER.debug('lines: %s', lines)
for line in lines + ['', '']:
if clean_input:
cleaned_line = replace_invalid_characters(line, placeholder='?')
else:
cleaned_line = line
try:
LOGGER.debug('writing line: %s', cleaned_line)
LOGGER.debug('line counts: %s', Counter(cleaned_line))
self.process_stdin.write(
(cleaned_line + '\n').encode('utf-8')
)
self.process_stdin.flush()
except BrokenPipeError:
LOGGER.error('failed to write line: %s', [(c, hex(ord(c))) for c in cleaned_line])
raise
self.process_stdin.flush()
labelled_lines = list(islice(self.iter_read_lines(), len(lines) + 1))
LOGGER.debug('labelled_lines: %s', labelled_lines)
return labelled_lines[:-1]
def label_raw_text(self, data: str) -> str:
return '\n'.join(self.label_lines(data.splitlines()))
def label_features(self, features: List[List[str]]) -> List[List[str]]:
lines = [
format_feature_line(feature_line)
for feature_line in features
]
return [
[
token_features[0],
labelled_line.rsplit('\t', maxsplit=1)[-1]
]
for labelled_line, token_features in zip(self.label_lines(lines), features)
]
class WapitiWrapper:
def __init__(self, wapiti_binary_path: str = None):
self.wapiti_binary_path = wapiti_binary_path or 'wapiti'
def check_available(self):
self.run_wapiti(['--version'])
def load_model(
self,
model_path: str,
output_only_labels: bool = True,
stderr_to_log_enabled: bool = True) -> WapitiModel:
if not os.path.isfile(str(model_path)):
raise FileNotFoundError('wapiti model not found: %s' % model_path)
args = [
'label',
'--model',
str(model_path)
]
if output_only_labels:
args.append('--label')
command = [self.wapiti_binary_path] + args
LOGGER.debug('running wapiti: %s', command)
process = subprocess.Popen( # pylint: disable=consider-using-with
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=sys.stderr if not stderr_to_log_enabled else subprocess.PIPE
)
process.poll()
if stderr_to_log_enabled:
t = threading.Thread(target=lambda: lines_to_log(
LOGGER, logging.INFO, 'wapiti, stderr: %s',
cast(Iterable[str], process.stderr)
))
t.daemon = True
t.start()
return WapitiModel(process=process)
def run_wapiti(self, args: List[str]):
command = [self.wapiti_binary_path] + args
LOGGER.info('calling wapiti: %s', command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
) as process:
assert process.stdout
with process.stdout:
lines_to_log(
LOGGER,
logging.INFO,
'wapiti: %s',
cast(Iterable[str], process.stdout)
)
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode,
command
)
LOGGER.debug('wapiti call succeeded')
def label(
self,
model_path: str,
data_path: str,
output_data_path: str,
output_only_labels: bool = True):
if not os.path.isfile(str(model_path)):
raise FileNotFoundError('model file not found: %s' % model_path)
if not os.path.isfile(str(data_path)):
raise FileNotFoundError('data file not found: %s' % data_path)
args = [
'label',
'--model',
str(model_path)
]
if output_only_labels:
args.append('--label')
args.append(str(data_path))
args.append(str(output_data_path))
self.run_wapiti(args)
def train(
self,
data_path: str,
output_model_path: str,
template_path: Optional[str] = None,
max_iter: Optional[int] = None,
num_threads: Optional[int] = None,
stop_epsilon_value: Optional[str] = None,
stop_window_size: Optional[int] = None
):
if not os.path.isfile(str(data_path)):
raise FileNotFoundError('data file not found: %s' % data_path)
if not num_threads:
num_threads = cpu_count()
if not stop_epsilon_value:
stop_epsilon_value = DEFAULT_STOP_EPSILON_VALUE
if not stop_window_size:
stop_window_size = DEFAULT_STOP_WINDOW_SIZE
args = ['train']
if template_path:
if not os.path.isfile(str(template_path)):
raise FileNotFoundError('template file not found: %s' % template_path)
args.append('--pattern')
args.append(str(template_path))
if max_iter:
args.append('--maxiter')
args.append(str(max_iter))
args.append('--nthread')
args.append(str(num_threads))
args.append('--stopeps')
args.append(str(stop_epsilon_value))
args.append('--stopwin')
args.append(str(stop_window_size))
args.append(str(data_path))
args.append(str(output_model_path))
self.run_wapiti(args) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/engines/wapiti.py | 0.557845 | 0.169612 | wapiti.py | pypi |
import argparse
import logging
from typing import Optional
import requests
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
ClassificationResult
)
LOGGER = logging.getLogger(__name__)
DEFAULT_TRAIN_START_MESSAGE_FORMAT = '\n'.join([
'Model training started',
'model_path: `{model_path}`',
'checkpoints_path: `{checkpoints_path}`',
'resume_train_model_path: `{resume_train_model_path}`',
'initial_epoch: `{initial_epoch}`'
])
DEFAULT_TRAIN_SUCCESS_MESSAGE_FORMAT = '\n'.join([
'Model training complete',
'model_path: `{model_path}`',
'last_checkpoint_path: `{last_checkpoint_path}`'
])
DEFAULT_TRAIN_EVAL_SUCCESS_MESSAGE_FORMAT = '\n'.join([
'Model training complete, f1: `{classification_result.f1:.4f}`',
'model_path: `{model_path}`',
'last_checkpoint_path: `{last_checkpoint_path}`',
'```\n{classification_result.text_formatted_report}\n```'
])
DEFAULT_TRAIN_ERROR_MESSAGE_FORMAT = (
'Model training failed due to: `{error}`\nmodel_path: `{model_path}`'
)
def get_rendered_notification_message(message_format: str, **kwargs):
return message_format.format(**kwargs)
def get_fallback_notification_message(message_format: str, conversion_error: str, args: dict):
return 'failed to format %r due to %s (args: %s)' % (message_format, conversion_error, args)
def safe_rendered_notification_message(message_format: str, **kwargs):
try:
return get_rendered_notification_message(message_format, **kwargs)
except Exception as exc: # pylint: disable=broad-except
LOGGER.warning(
'failed to convert message due to: %s', exc, exc_info=exc
)
return get_fallback_notification_message(message_format, str(exc), kwargs)
class TrainNotificationManager:
def __init__(
self,
notification_url: str,
notification_train_start_message: str,
notification_train_success_message: str,
notification_train_eval_success_message: str,
notification_error_message: str):
self.notification_url = notification_url
self.notification_train_start_message = notification_train_start_message
self.notification_train_success_message = notification_train_success_message
self.notification_train_eval_success_message = notification_train_eval_success_message
self.notification_error_message = notification_error_message
def send_notification(self, message_format: str, **kwargs):
message = safe_rendered_notification_message(message_format, **kwargs)
if not message or not self.notification_url:
LOGGER.info('not sending notification: %r (url: %r)', message, self.notification_url)
return
data = {
'text': message
}
LOGGER.info('sending notification: %r (url: %r)', message, self.notification_url)
requests.post(self.notification_url, json=data)
def notify_error(self, model_path: str, error: str):
self.send_notification(
self.notification_error_message,
model_path=model_path,
error=error
)
def notify_start(
self,
model_path: str,
checkpoints_path: Optional[str],
resume_train_model_path: Optional[str],
initial_epoch: int
):
self.send_notification(
self.notification_train_start_message,
model_path=model_path,
checkpoints_path=checkpoints_path,
resume_train_model_path=resume_train_model_path,
initial_epoch=initial_epoch
)
def notify_success(
self,
model_path: str,
last_checkpoint_path: str = None,
classification_result: ClassificationResult = None):
if classification_result is None:
self.send_notification(
self.notification_train_success_message,
model_path=model_path,
last_checkpoint_path=last_checkpoint_path
)
else:
self.send_notification(
self.notification_train_eval_success_message,
model_path=model_path,
last_checkpoint_path=last_checkpoint_path,
classification_result=classification_result
)
def add_train_notification_arguments(parser: argparse.ArgumentParser):
notification_group = parser.add_argument_group('notification')
notification_group.add_argument(
"--notification-url",
help="A URL to post to on success error (e.g. a Slack Webhook URL)"
)
notification_group.add_argument(
"--notification-train-start-message",
default=DEFAULT_TRAIN_START_MESSAGE_FORMAT,
help="Model training start notification message"
)
notification_group.add_argument(
"--notification-train-success-message",
default=DEFAULT_TRAIN_SUCCESS_MESSAGE_FORMAT,
help="Model training success notification message"
)
notification_group.add_argument(
"--notification-train-eval-success-message",
default=DEFAULT_TRAIN_EVAL_SUCCESS_MESSAGE_FORMAT,
help="Model training and evaluation success notification message"
)
notification_group.add_argument(
"--notification-error-message",
default=DEFAULT_TRAIN_ERROR_MESSAGE_FORMAT,
help="Model training failed notification message"
)
def get_train_notification_manager(args: argparse.Namespace) -> TrainNotificationManager:
return TrainNotificationManager(
notification_url=args.notification_url,
notification_train_start_message=args.notification_train_start_message,
notification_train_success_message=args.notification_train_success_message,
notification_train_eval_success_message=args.notification_train_eval_success_message,
notification_error_message=args.notification_error_message
)
def notify_train_start(
train_notification_manager: Optional[TrainNotificationManager] = None,
**kwargs
):
if train_notification_manager is not None:
train_notification_manager.notify_start(**kwargs)
def notify_train_success(
train_notification_manager: TrainNotificationManager = None,
**kwargs):
if train_notification_manager is not None:
train_notification_manager.notify_success(**kwargs)
def notify_train_error(
train_notification_manager: TrainNotificationManager = None,
**kwargs):
if train_notification_manager is not None:
train_notification_manager.notify_error(**kwargs) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/utils/train_notify.py | 0.804021 | 0.310028 | train_notify.py | pypi |
import logging
from pathlib import Path
from typing import List, Optional, NamedTuple, Union
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints import (
get_checkpoints_json,
get_checkpoint_meta
)
LOGGER = logging.getLogger(__name__)
class CheckPoint(NamedTuple):
path: str
epoch: int
meta: dict
def get_sorted_checkpoint_json_list(checkpoints_json: dict) -> List[dict]:
return sorted(
checkpoints_json.get('checkpoints', []),
key=lambda checkpoint: checkpoint['path']
)
def get_checkpoint_meta_or_none(path: str) -> Optional[dict]:
try:
return get_checkpoint_meta(path)
except FileNotFoundError:
LOGGER.info('meta not found for: %r', path)
return None
def get_checkpoint_for_json(checkpoint_json: Optional[dict]) -> Optional[CheckPoint]:
if not checkpoint_json:
return None
path = checkpoint_json.get('path')
assert path
epoch = checkpoint_json.get('epoch')
assert epoch
meta = get_checkpoint_meta_or_none(path) or {}
return CheckPoint(path=path, epoch=epoch, meta=meta)
def get_last_or_none(a_list: List[T]) -> Optional[T]:
try:
return a_list[-1]
except IndexError:
return None
class CheckPoints:
def __init__(self, log_dir: Union[str, Path]):
assert log_dir
self.log_dir = str(log_dir)
self._checkpoints_json: Optional[dict] = None
def _load_checkpoints_json(self) -> dict:
try:
return get_checkpoints_json(
self.log_dir
)
except FileNotFoundError:
return {}
@property
def checkpoints_json(self) -> dict:
if self._checkpoints_json is None:
self._checkpoints_json = self._load_checkpoints_json()
return self._checkpoints_json
@property
def latest_checkpoint(self) -> Optional[CheckPoint]:
return get_checkpoint_for_json(
get_last_or_none(
get_sorted_checkpoint_json_list(self.checkpoints_json)
)
)
@property
def latest_checkpoint_url(self) -> Optional[str]:
latest_checkpoint = self.latest_checkpoint
return latest_checkpoint.path if latest_checkpoint else None
class ResumeTrainModelParams(NamedTuple):
model_path: str
initial_epoch: int
initial_meta: dict
def get_resume_train_model_params(
log_dir: Optional[str],
auto_resume: bool = True,
resume_train_model_path: Optional[str] = None,
initial_epoch: Optional[int] = None
) -> Optional[ResumeTrainModelParams]:
if auto_resume and log_dir:
latest_checkpoint = CheckPoints(log_dir=log_dir).latest_checkpoint
if latest_checkpoint:
LOGGER.info('auto resuming using latest checkpoint: %r', latest_checkpoint)
return ResumeTrainModelParams(
model_path=latest_checkpoint.path,
initial_epoch=latest_checkpoint.epoch,
initial_meta=latest_checkpoint.meta
)
if resume_train_model_path:
LOGGER.info('using passed in resume train model path: %r', resume_train_model_path)
return ResumeTrainModelParams(
model_path=resume_train_model_path,
initial_epoch=initial_epoch or 0,
initial_meta={}
)
return None | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/utils/checkpoints.py | 0.898133 | 0.27282 | checkpoints.py | pypi |
import argparse
import concurrent.futures
import logging
import json
import os
from collections import OrderedDict
from typing import Dict, List, Optional
from tqdm.auto import tqdm
from sciencebeam_trainer_delft.utils.io import open_file
from sciencebeam_trainer_delft.utils.cli import (
add_default_arguments,
process_default_args,
initialize_and_call_main
)
LOGGER = logging.getLogger(__name__)
class OutputFormats:
TEXT = 'text'
JSON = 'json'
ALL_OUTPUT_FORMATS = [OutputFormats.TEXT, OutputFormats.JSON]
def read_json(path: str) -> dict:
with open_file(path, mode='r') as fp:
return json.load(fp)
def get_checkpoints_json(checkpoint_path: str) -> dict:
return read_json(os.path.join(checkpoint_path, 'checkpoints.json'))
def get_checkpoint_urls(checkpoints_json: dict) -> List[str]:
return sorted({
checkpoint['path']
for checkpoint in checkpoints_json.get('checkpoints', {})
})
def get_last_checkpoint_url(checkpoints_json: dict) -> Optional[str]:
checkpoint_urls = get_checkpoint_urls(checkpoints_json)
return checkpoint_urls[-1] if checkpoint_urls else None
def get_checkpoint_meta(checkpoint_path: str) -> dict:
return read_json(os.path.join(checkpoint_path, 'meta.json'))
def get_checkpoint_meta_map(
checkpoint_urls: List[str],
max_workers: int) -> Dict[str, dict]:
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(
lambda path: (path, get_checkpoint_meta(path)),
checkpoint_urls
)
return dict(tqdm(results, total=len(checkpoint_urls)))
def get_checkpoint_meta_map_sorted_by_f1(checkpoint_meta_map: Dict[str, dict]):
return OrderedDict(sorted(
checkpoint_meta_map.items(),
key=lambda item: item[1].get('f1') or 0
))
def get_checkpoint_summary_list(
checkpoint_meta_map_sorted_by_f1: Dict[str, dict],
last_checkpoint: dict,
limit: int
) -> List[dict]:
last_checkpoint_path = last_checkpoint.get('path')
best_meta = list(checkpoint_meta_map_sorted_by_f1.values())[-1]
best_f1 = best_meta.get('f1')
return [
{
**meta,
'path': path,
'is_last': path == last_checkpoint_path,
'is_best': meta.get('f1') == best_f1
}
for path, meta in (
list(checkpoint_meta_map_sorted_by_f1.items())[-limit:]
)
]
def format_checkpoint_summary_as_text(
checkpoint_summary_list: List[dict]) -> str:
return 'best checkpoints:\n%s' % '\n\n'.join([
'%05d: %s (%s)%s%s' % (
int(checkpoint_summary.get('epoch', 0)),
checkpoint_summary.get('f1'),
checkpoint_summary.get('path'),
' (last)' if checkpoint_summary.get('is_last') else '',
' (best)' if checkpoint_summary.get('is_best') else ''
)
for checkpoint_summary in checkpoint_summary_list
])
def format_checkpoint_summary(
checkpoint_summary_list: List[dict],
output_format: str) -> str:
if output_format == OutputFormats.TEXT:
return format_checkpoint_summary_as_text(
checkpoint_summary_list
)
if output_format == OutputFormats.JSON:
return json.dumps(checkpoint_summary_list, indent=2)
raise ValueError('unsupported output format: %s' % output_format)
def checkpoint_summary(
checkpoint_path: str,
max_workers: int,
limit: int,
output_format: str):
LOGGER.info('checkpoint_path: %s', checkpoint_path)
checkpoints_json = get_checkpoints_json(checkpoint_path)
LOGGER.debug('checkpoints_json: %s', checkpoints_json)
checkpoint_urls = get_checkpoint_urls(checkpoints_json)
LOGGER.debug('checkpoint_urls: %s', checkpoint_urls)
last_checkpoint = checkpoints_json.get('last_checkpoint')
if last_checkpoint:
LOGGER.info('last checkpoint: %s', last_checkpoint)
if not checkpoint_urls:
raise RuntimeError('no checkpoints found')
checkpoint_meta_map = get_checkpoint_meta_map(
checkpoint_urls,
max_workers=max_workers
)
LOGGER.debug('checkpoint_meta_map: %s', checkpoint_meta_map)
checkpoint_meta_map_sorted_by_f1 = get_checkpoint_meta_map_sorted_by_f1(
checkpoint_meta_map
)
checkpoint_summary_list = get_checkpoint_summary_list(
checkpoint_meta_map_sorted_by_f1=checkpoint_meta_map_sorted_by_f1,
last_checkpoint=last_checkpoint,
limit=limit,
)
formatted_summary = format_checkpoint_summary(
checkpoint_summary_list=checkpoint_summary_list,
output_format=output_format
)
print(formatted_summary)
def parse_args(argv: List[str] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Checkpoints related summary"
)
parser.add_argument(
"--output-format",
choices=ALL_OUTPUT_FORMATS,
default=OutputFormats.TEXT,
help="The desired output format."
)
parser.add_argument(
"--checkpoint",
required=True,
help="The base path of the checkpoints."
)
parser.add_argument(
"--max-workers",
type=int,
default=20,
help="Maximum number of workers for IO requests"
)
parser.add_argument(
"--limit",
type=int,
default=5,
help="Maximum number results to show"
)
add_default_arguments(parser)
return parser.parse_args(argv)
def run(args: argparse.Namespace):
checkpoint_summary(
checkpoint_path=args.checkpoint,
max_workers=args.max_workers,
limit=args.limit,
output_format=args.output_format
)
def main(argv: List[str] = None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == "__main__":
initialize_and_call_main(main) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tools/checkpoints.py | 0.718496 | 0.162413 | checkpoints.py | pypi |
import logging
import argparse
from argparse import _ActionsContainer as ArgParseActionsContainer
from typing import List
from sciencebeam_trainer_delft.utils.misc import parse_number_ranges
from sciencebeam_trainer_delft.sequence_labelling.utils.train_notify import (
add_train_notification_arguments
)
from sciencebeam_trainer_delft.sequence_labelling.wrapper import (
get_default_batch_size,
get_default_stateful
)
from sciencebeam_trainer_delft.sequence_labelling.config import (
DEFAULT_CHAR_INPUT_DROPOUT,
DEFAULT_CHAR_LSTM_DROPOUT
)
from sciencebeam_trainer_delft.sequence_labelling.models import get_model_names
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import (
DEFAULT_STOP_EPSILON_VALUE,
DEFAULT_STOP_WINDOW_SIZE
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
TAG_OUTPUT_FORMATS
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
EvaluationOutputFormats,
EVALUATION_OUTPUT_FORMATS
)
from sciencebeam_trainer_delft.sequence_labelling.transfer_learning import (
add_transfer_learning_arguments
)
LOGGER = logging.getLogger(__name__)
GROBID_MODEL_NAMES = [
'affiliation-address', 'citation', 'date', 'figure', 'fulltext', 'header',
'name', 'name-citation', 'name-header', 'patent', 'reference-segmenter',
'segmentation', 'software', 'table'
]
DEFAULT_RANDOM_SEED = 42
DEFAULT_TAG_OUTPUT_FORMAT = TagOutputFormats.XML
def add_common_arguments(
parser: argparse.ArgumentParser,
max_sequence_length_default: int = None):
input_group = parser.add_argument_group('input')
input_group.add_argument(
"--input",
nargs='+',
action='append',
help="provided training file"
)
input_group.add_argument(
"--shuffle-input",
action="store_true",
help="Shuffle the input before splitting"
)
input_group.add_argument(
"--limit",
type=int,
help=(
"limit the number of training samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
parser.add_argument(
"--random-seed",
type=int,
default=DEFAULT_RANDOM_SEED,
help="Set the random seed for reproducibility"
)
parser.add_argument(
"--batch-size", type=int, default=get_default_batch_size(),
help="batch size"
)
parser.add_argument(
"--max-sequence-length", type=int,
default=max_sequence_length_default,
help="maximum sequence length"
)
parser.add_argument(
"--no-use-lmdb", action="store_true",
help="Do not use LMDB embedding cache (load embeddings into memory instead)"
)
parser.add_argument("--multiprocessing", action="store_true", help="Use multiprocessing")
parser.add_argument("--quiet", action="store_true", help="Only log errors")
parser.add_argument(
"--save-input-to-and-exit",
help=(
"If set, saves the input to the specified path and exits."
" This can be useful to retrain the model outside GROBID."
)
)
parser.add_argument(
"--log-file",
help=(
"If set, saves the output to the specified log file."
" This may also be a file in a bucket, in which case it will be uploaded at the end."
" Add the .gz extension if you wish to compress the file."
)
)
parser.add_argument("--job-dir", help="job dir (only used when running via ai platform)")
def add_model_path_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument("--model-path", **kwargs)
def add_fold_count_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument("--fold-count", type=int, default=1, **kwargs)
def add_eval_output_format_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-output-format",
choices=EVALUATION_OUTPUT_FORMATS,
default=EvaluationOutputFormats.TEXT
)
def add_eval_first_entity_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-first-entity",
action="store_true",
help=''.join([
'If set, additional evaluates the first entity (e.g. first_<author>).'
])
)
def add_eval_output_path_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-output-path",
help='If specified, saves the evaluation to the specified path in the JSON format'
)
def add_eval_output_arguments(parser: argparse.ArgumentParser):
add_eval_output_format_argument(parser)
add_eval_first_entity_argument(parser)
add_eval_output_path_argument(parser)
def add_eval_input_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-input",
nargs='+',
action='append',
help=' '.join([
"Evaluation data at the end of training. If not specified,",
"it will use a slice of the training data"
])
)
parser.add_argument(
"--eval-limit",
type=int,
help=' '.join([
"Limit the number of documents to use for evaluation.",
"This is mostly for testing to make evaluation faster."
])
)
def add_dl_eval_model_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-max-sequence-length",
type=int,
help=' '.join([
"Maximum sequence length to use for evaluation.",
"If not specified, no limit will be applied."
])
)
parser.add_argument(
"--eval-input-window-stride",
type=int,
help="Should be equal or less than eval max sequence length"
)
parser.add_argument(
"--eval-batch-size",
type=int,
help=' '.join([
"The batch size to be used for evaluation.",
"If not specified, the training batch size is used.",
"This may be useful to evaluate on longer sequences",
"that could require a smaller batch size."
])
)
def add_tag_output_format_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--tag-output-format",
default=DEFAULT_TAG_OUTPUT_FORMAT,
choices=TAG_OUTPUT_FORMATS,
help="output format for tag results",
**kwargs
)
def add_tag_output_path_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--tag-output-path",
help='If specified, saves the tag result to the specified path'
)
def add_tag_transformed_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--tag-transformed",
action='store_true',
help=(
'If enabled, the output will contain the transformed dataset (if any).'
' More specifically, that will for example contain the "unrolled" data.'
)
)
def add_output_argument(parser: ArgParseActionsContainer, **kwargs):
parser.add_argument("--output", help="directory where to save a trained model", **kwargs)
def add_max_epoch_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--max-epoch", type=int, default=10,
help="max epoch to train to",
**kwargs
)
def add_stateful_argument(parser: argparse.ArgumentParser, **kwargs):
default_value = get_default_stateful()
parser.add_argument(
"--stateful",
dest="stateful",
default=default_value,
action="store_true",
help="Make RNNs stateful (required for truncated BPTT)",
**kwargs
)
parser.add_argument(
"--no-stateful",
dest="stateful",
default=default_value,
action="store_false",
help="Disable statefulness (default)",
**kwargs
)
def add_input_window_stride_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--input-window-stride",
type=int,
help="Should be equal or less than max sequence length",
**kwargs
)
def add_train_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--architecture", default='BidLSTM_CRF',
choices=get_model_names(),
help="type of model architecture to be used"
)
parser.add_argument("--use-ELMo", action="store_true", help="Use ELMo contextual embeddings")
parser.add_argument(
"--max-char-length",
type=int,
default=30,
help="The maximum number of chars used by the model"
)
parser.add_argument(
"--additional-token-feature-indices",
type=parse_number_ranges,
help="".join([
"Additional feature values that should be used as tokens.",
" e.g. 0 or 0-3.",
" If blank, no additional token features will be used."
])
)
parser.add_argument(
"--text-feature-indices",
type=parse_number_ranges,
help="".join([
"Feature values that should be treated as text input.",
" e.g. 0 or 0-3.",
" If blank, no additext features will be used.",
" Cannot be used together with --additional-token-feature-indices.",
" Text features will get tokenized."
" If word embeddings are used, then the number of tokens will depend on",
" --concatenated-embeddings-token-count.",
" Tokens from text features replace regular tokens from the training data."
])
)
parser.add_argument(
"--unroll-text-feature-index",
type=int,
help="".join([
"Tokenizes the text at the specified index.",
" Each token will become a separate token.",
" The features will be duplicated for each token.",
" Labels will also be duplicated for each token.",
" Where a label refers to the beginning of a tag,",
" this will only be used for the first token.",
" All other labels will be the intermediate version of the tag."
" The max sequence length will get applied to the unrolled tokens."
" Additionally a new token will be added, with the values:"
" LINESTART, LINEIN, LINEEND"
])
)
parser.add_argument(
"--concatenated-embeddings-token-count",
type=int,
help="".join([
"The number of tokens to concatenate as word embeddings.",
" If not specified, it concatenate the main token with any",
" --additional-token-feature-indices (if any).",
" This option is mainly useful in combination with --text-feature-indices.",
" It has no effect, if no word embeddings are used."
])
)
features_group = parser.add_argument_group('features')
features_group.add_argument("--use-features", action="store_true", help="Use features")
features_group.add_argument(
"--features-indices", "--feature-indices",
type=parse_number_ranges,
help="The feature indices to use. e.g. 7-10. If blank, all of the features will be used."
)
features_group.add_argument(
"--continuous-features-indices",
type=parse_number_ranges,
help=(
"The feature indices to use that are continous. e.g. 7-10."
" If blank, features will be assumed to be categorical."
)
)
features_group.add_argument(
"--features-embedding-size", "--feature-embedding-size",
type=int,
help="size of feature embedding, use 0 to disable embedding"
)
features_group.add_argument(
"--use-features-indices-input", action="store_true",
help="Use features indices values (should be inferred from the model)"
)
features_group.add_argument(
"--features-lstm-units", type=int,
help="Number of LSTM units used by the features"
)
add_stateful_argument(parser)
add_input_window_stride_argument(parser)
output_group = parser.add_argument_group('output')
add_output_argument(output_group)
output_group.add_argument("--checkpoint", help="directory where to save a checkpoint model")
output_group.add_argument(
"--checkpoint-epoch-interval",
type=int,
default=1,
help="save checkpoints every n epochs"
)
parser.add_argument(
"--embedding", default="glove-6B-50d",
help="name of word embedding"
)
parser.add_argument(
"--preload-embedding",
help=" ".join([
"Name or URL to embedding to preload.",
"This can be useful in combination with resuming model training."
])
)
features_group.add_argument(
"--no-embedding",
dest="use_word_embeddings",
default=True,
action="store_false",
help="Disable the use of word embedding"
)
parser.add_argument(
"--char-embedding-size", type=int, default=25,
help="size of char embedding"
)
parser.add_argument(
"--char-lstm-units", type=int, default=25,
help="number of list units for chars"
)
parser.add_argument(
"--char-input-mask-zero", action='store_true',
help="enables masking of zero for the char input"
)
parser.add_argument(
"--char-input-dropout", type=float, default=DEFAULT_CHAR_INPUT_DROPOUT,
help="dropout for char input"
)
parser.add_argument(
"--char-lstm-dropout", type=float, default=DEFAULT_CHAR_LSTM_DROPOUT,
help="dropout for char lstm"
)
parser.add_argument(
"--word-lstm-units", type=int, default=100,
help="number of lstm units for words"
)
parser.add_argument(
"--dropout", type=float, default=0.5,
help="main dropout"
)
parser.add_argument(
"--recurrent-dropout", type=float, default=0.5,
help="recurrent dropout"
)
add_max_epoch_argument(parser)
parser.add_argument(
"--early-stopping-patience", type=int, default=10,
help="how many epochs to continue training after the f1 score hasn't improved"
)
parser.add_argument(
"--resume-train-model-path",
help="path to the model training should be resumed from (e.g. path to checkpoint)"
)
parser.add_argument(
"--initial-epoch",
type=int,
default=0,
help="Sets the initial epoch for model training."
)
parser.add_argument(
"--auto-resume", action='store_true',
help="enables auto-resuming training using checkpoints"
)
add_transfer_learning_arguments(parser)
add_train_notification_arguments(parser)
def add_wapiti_train_arguments(parser: argparse.ArgumentParser):
add_output_argument(parser)
add_max_epoch_argument(parser)
parser.add_argument("--wapiti-template", required=True)
parser.add_argument(
"--wapiti-gzip",
action="store_true",
help="whether to gzip wapiti models before saving"
)
parser.add_argument(
"--wapiti-stop-epsilon-value",
default=DEFAULT_STOP_EPSILON_VALUE
)
parser.add_argument(
"--wapiti-stop-window-size",
type=int,
default=DEFAULT_STOP_WINDOW_SIZE
)
add_train_notification_arguments(parser)
def get_wapiti_train_args(args: argparse.Namespace) -> dict:
return dict(
stop_epsilon_value=args.wapiti_stop_epsilon_value,
stop_window_size=args.wapiti_stop_window_size
)
def add_wapiti_install_arguments(parser: argparse.ArgumentParser):
example_url = "https://github.com/kermitt2/Wapiti/archive/master.tar.gz"
parser.add_argument(
"--wapiti-install-source",
help="source file to install wapiti from, e.g. %s" % example_url
)
def add_all_non_positional_arguments(parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
def add_model_positional_argument(parser: argparse.ArgumentParser):
parser.add_argument("model", nargs='?', choices=GROBID_MODEL_NAMES)
def _flatten_input_paths(input_paths_list: List[List[str]]) -> List[str]:
if not input_paths_list:
return []
return [input_path for input_paths in input_paths_list for input_path in input_paths]
def process_args(args: argparse.Namespace) -> None:
args.input = _flatten_input_paths(args.input)
try:
args.eval_input = _flatten_input_paths(args.eval_input)
except AttributeError:
pass
def create_argument_parser() -> argparse.ArgumentParser:
return argparse.ArgumentParser(
description="Trainer for GROBID models",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tools/grobid_trainer/cli_args.py | 0.803097 | 0.150496 | cli_args.py | pypi |
import logging
import time
import tempfile
import os
from collections import Counter
from datetime import datetime, timezone
from itertools import islice
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.numpy import shuffle_arrays
from sciencebeam_trainer_delft.utils.io import (
write_text,
auto_uploading_output_file
)
from sciencebeam_trainer_delft.embedding import EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.utils.train_notify import (
TrainNotificationManager,
notify_train_start,
notify_train_success,
notify_train_error
)
from sciencebeam_trainer_delft.sequence_labelling.wrapper import (
Sequence
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_and_labels_crf_file
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti_adapters import (
WapitiModelAdapter,
WapitiModelTrainAdapter
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
get_tag_result,
iter_format_tag_result
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
EvaluationOutputFormats,
ClassificationResult
)
from sciencebeam_trainer_delft.sequence_labelling.input_info import (
iter_flat_batch_tokens,
iter_flat_features,
get_quantiles,
get_quantiles_feature_value_length_by_index,
get_feature_counts,
get_suggested_feature_indices,
format_dict,
format_indices
)
from sciencebeam_trainer_delft.sequence_labelling.utils.checkpoints import (
get_resume_train_model_params
)
LOGGER = logging.getLogger(__name__)
DEFAULT_RANDOM_SEED = 42
DEFAULT_TAG_OUTPUT_FORMAT = TagOutputFormats.XML
def set_random_seeds(random_seed: int):
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
def get_default_training_data(model: str) -> str:
return 'data/sequenceLabelling/grobid/' + model + '/' + model + '-060518.train'
def log_data_info(x: np.array, y: np.array, features: np.array):
LOGGER.info('x sample: %s (y: %s)', x[:1][:10], y[:1][:1])
LOGGER.info(
'feature dimensions of first sample, word: %s',
[{index: value for index, value in enumerate(features[0][0])}] # noqa pylint: disable=unnecessary-comprehension
)
def _load_data_and_labels_crf_files(
input_paths: List[str], limit: int = None) -> Tuple[np.array, np.array, np.array]:
if len(input_paths) == 1:
return load_data_and_labels_crf_file(input_paths[0], limit=limit)
x_list = []
y_list = []
features_list = []
for input_path in input_paths:
LOGGER.debug('calling load_data_and_labels_crf_file: %s', input_path)
x, y, f = load_data_and_labels_crf_file(
input_path,
limit=limit
)
x_list.append(x)
y_list.append(y)
features_list.append(f)
return np.concatenate(x_list), np.concatenate(y_list), np.concatenate(features_list)
def get_clean_features_mask(features_all: np.array) -> List[bool]:
feature_lengths = Counter((
len(features_vector)
for features_doc in features_all
for features_vector in features_doc
))
if len(feature_lengths) <= 1:
return [True] * len(features_all)
expected_feature_length = next(feature_lengths.keys().__iter__())
LOGGER.info('cleaning features, expected_feature_length=%s', expected_feature_length)
return [
all(len(features_vector) == expected_feature_length for features_vector in features_doc)
for features_doc in features_all
]
def get_clean_x_y_features(x: np.array, y: np.array, features: np.array):
clean_features_mask = get_clean_features_mask(features)
if sum(clean_features_mask) != len(clean_features_mask):
LOGGER.info(
'ignoring %d documents with inconsistent features',
len(clean_features_mask) - sum(clean_features_mask)
)
return (
x[clean_features_mask],
y[clean_features_mask],
features[clean_features_mask]
)
return x, y, features
def load_data_and_labels(
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
clean_features: bool = True,
random_seed: int = DEFAULT_RANDOM_SEED,
download_manager: DownloadManager = None):
assert download_manager
assert input_paths
LOGGER.info('loading data from: %s', input_paths)
downloaded_input_paths = [
download_manager.download_if_url(input_path)
for input_path in input_paths
]
x_all, y_all, f_all = _load_data_and_labels_crf_files(
downloaded_input_paths,
limit=limit
)
if shuffle_input:
shuffle_arrays([x_all, y_all, f_all], random_seed=random_seed)
log_data_info(x_all, y_all, f_all)
if clean_features:
(x_all, y_all, f_all) = get_clean_x_y_features(
x_all, y_all, f_all
)
return x_all, y_all, f_all
def notify_model_train_start(
model: Union[Sequence, WapitiModelTrainAdapter],
train_notification_manager: Optional[TrainNotificationManager],
output_path: Optional[str]
):
notify_train_start(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
checkpoints_path=model.log_dir,
resume_train_model_path=model.model_path,
initial_epoch=model.training_config.initial_epoch
)
def do_train(
model: Union[Sequence, WapitiModelTrainAdapter],
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
x_train, x_valid, y_train, y_valid, features_train, features_valid = train_test_split(
x_all, y_all, features_all, test_size=0.1, shuffle=False
)
LOGGER.info('%d train sequences', len(x_train))
LOGGER.info('%d validation sequences', len(x_valid))
notify_model_train_start(
model,
train_notification_manager,
output_path=output_path
)
start_time = time.time()
model.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
runtime = round(time.time() - start_time, 3)
LOGGER.info("training runtime: %s seconds ", runtime)
# saving the model
if output_path:
LOGGER.info('saving model to: %s', output_path)
model.save(output_path)
else:
model.save()
notify_train_success(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
last_checkpoint_path=model.last_checkpoint_path
)
def do_train_with_error_notification(
model: Union[Sequence, WapitiModelTrainAdapter],
output_path: str = None,
train_notification_manager: TrainNotificationManager = None,
**kwargs):
model_path = model.get_model_output_path(output_path)
try:
do_train(
model=model,
output_path=output_path,
train_notification_manager=train_notification_manager,
**kwargs
)
except BaseException as error: # pylint: disable=broad-except
notify_train_error(
train_notification_manager,
model_path=model_path,
error=repr(error)
)
raise
def process_resume_train_model_params(
model: Sequence,
auto_resume: bool,
resume_train_model_path: Optional[str]
):
resume_train_model_params = get_resume_train_model_params(
log_dir=model.log_dir,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path,
initial_epoch=model.training_config.initial_epoch
)
if resume_train_model_params:
model.load_from(resume_train_model_params.model_path)
model.training_config.initial_epoch = resume_train_model_params.initial_epoch
model.training_config.initial_meta = resume_train_model_params.initial_meta
# train a GROBID model with all available data
def train(
model_name: str,
embeddings_name, architecture='BidLSTM_CRF', use_ELMo=False,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = 100,
max_epoch=100,
resume_train_model_path: str = None,
auto_resume: bool = False,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model_name = get_model_name(
model_name, output_path=output_path, use_ELMo=use_ELMo
)
model = Sequence(
model_name,
max_epoch=max_epoch,
embeddings_name=embeddings_name,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
model_type=architecture,
use_ELMo=use_ELMo,
**kwargs
)
process_resume_train_model_params(
model,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path
)
do_train_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def wapiti_train(
model_name: str,
template_path: str,
output_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_epoch: int = 100,
train_notification_manager: TrainNotificationManager = None,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
with tempfile.TemporaryDirectory(suffix='-wapiti') as temp_dir:
temp_model_path = os.path.join(temp_dir, 'model.wapiti')
model = WapitiModelTrainAdapter(
model_name=model_name,
template_path=template_path,
temp_model_path=temp_model_path,
max_epoch=max_epoch,
download_manager=download_manager,
gzip_enabled=gzip_enabled,
wapiti_binary_path=wapiti_binary_path,
wapiti_train_args=wapiti_train_args
)
do_train_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def output_classification_result(
classification_result: ClassificationResult,
eval_output_args: Optional[dict],
eval_input_paths: List[str] = None,
model_path: str = None,
model_summary_props: dict = None):
eval_output_args = eval_output_args or dict()
assert eval_output_args is not None
output_format = eval_output_args.get('eval_output_args')
output_path = eval_output_args.get('eval_output_path')
eval_first_entity = eval_output_args.get('eval_first_entity')
if not output_format:
output_format = EvaluationOutputFormats.TEXT
if eval_first_entity:
classification_result = classification_result.with_first_entities()
meta: Dict[str, Any] = {}
meta['eval_timestamp'] = datetime.now(timezone.utc).isoformat()
if eval_input_paths:
meta['eval_input_paths'] = eval_input_paths
if model_path:
meta['model_path'] = model_path
if model_summary_props:
meta.update(model_summary_props)
if output_path:
LOGGER.info('writing evaluation to: %s', output_path)
write_text(output_path, classification_result.get_json_formatted_report(meta=meta))
if output_format == EvaluationOutputFormats.TEXT:
print("\nEvaluation:\n%s" % classification_result.get_text_formatted_report(
digits=4
))
elif output_format == EvaluationOutputFormats.JSON:
print(classification_result.get_json_formatted_report(meta=meta))
else:
print(classification_result.get_formatted_report(
output_format=output_format
))
def do_train_eval(
model: Union[Sequence, WapitiModelTrainAdapter],
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
fold_count: int = 1,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
if eval_input_paths:
x_eval, y_eval, features_eval = load_data_and_labels(
input_paths=eval_input_paths, limit=eval_limit,
download_manager=download_manager
)
x_train_all, y_train_all, features_train_all = (
x_all, y_all, features_all
)
else:
x_train_all, x_eval, y_train_all, y_eval, features_train_all, features_eval = (
train_test_split(x_all, y_all, features_all, test_size=0.1, shuffle=False)
)
x_train, x_valid, y_train, y_valid, features_train, features_valid = train_test_split(
x_train_all, y_train_all, features_train_all, test_size=0.1, shuffle=False
)
LOGGER.info('%d train sequences', len(x_train))
LOGGER.info('%d validation sequences', len(x_valid))
LOGGER.info('%d evaluation sequences', len(x_eval))
notify_model_train_start(
model,
train_notification_manager,
output_path=output_path
)
start_time = time.time()
if fold_count == 1:
model.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
else:
assert isinstance(model, Sequence), \
'nfold evaluation currently only supported for DL models'
model.train_nfold(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid,
fold_number=fold_count
)
runtime = round(time.time() - start_time, 3)
LOGGER.info("training runtime: %s seconds ", runtime)
# evaluation
classification_result = model.get_evaluation_result(
x_eval, y_eval, features=features_eval
)
output_classification_result(
classification_result,
eval_output_args=eval_output_args,
eval_input_paths=eval_input_paths,
model_path=model.get_model_output_path(output_path),
model_summary_props=model.model_summary_props
)
# saving the model
if output_path:
model.save(output_path)
else:
model.save()
notify_train_success(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
last_checkpoint_path=model.last_checkpoint_path,
classification_result=classification_result
)
def do_train_eval_with_error_notification(
model: Union[Sequence, WapitiModelTrainAdapter],
output_path: str = None,
train_notification_manager: TrainNotificationManager = None,
**kwargs):
model_path = model.get_model_output_path(output_path)
try:
do_train_eval(
model=model,
output_path=output_path,
train_notification_manager=train_notification_manager,
**kwargs
)
except BaseException as error: # pylint: disable=broad-except
notify_train_error(
train_notification_manager,
model_path=model_path,
error=repr(error)
)
raise
# split data, train a GROBID model and evaluate it
def train_eval(
model_name: str,
embeddings_name, architecture='BidLSTM_CRF', use_ELMo=False,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
max_sequence_length: int = 100,
fold_count=1, max_epoch=100, batch_size=20,
resume_train_model_path: str = None,
auto_resume: bool = False,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model_name = get_model_name(
model_name, output_path=output_path, use_ELMo=use_ELMo
)
model = Sequence(
model_name,
max_epoch=max_epoch,
embeddings_name=embeddings_name,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
model_type=architecture,
use_ELMo=use_ELMo,
batch_size=batch_size,
fold_number=fold_count,
**kwargs
)
process_resume_train_model_params(
model,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path
)
do_train_eval_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
eval_input_paths=eval_input_paths,
eval_limit=eval_limit,
eval_output_args=eval_output_args,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def wapiti_train_eval(
model_name: str,
template_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
fold_count: int = 1,
max_epoch: int = 100,
train_notification_manager: TrainNotificationManager = None,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
assert fold_count == 1, 'only fold_count == 1 supported'
with tempfile.TemporaryDirectory(suffix='-wapiti') as temp_dir:
temp_model_path = os.path.join(temp_dir, 'model.wapiti')
model = WapitiModelTrainAdapter(
model_name=model_name,
template_path=template_path,
temp_model_path=temp_model_path,
max_epoch=max_epoch,
download_manager=download_manager,
gzip_enabled=gzip_enabled,
wapiti_binary_path=wapiti_binary_path,
wapiti_train_args=wapiti_train_args
)
do_train_eval_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
eval_input_paths=eval_input_paths,
eval_limit=eval_limit,
eval_output_args=eval_output_args,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def do_eval_model(
model: Union[Sequence, WapitiModelAdapter],
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_output_args: dict = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
if split_input:
_, x_eval, _, y_eval, _, features_eval = train_test_split(
x_all, y_all, features_all, test_size=0.1, shuffle=False
)
else:
x_eval = x_all
y_eval = y_all
features_eval = features_all
LOGGER.info('%d evaluation sequences', len(x_eval))
# evaluation
classification_result = model.get_evaluation_result(
x_eval, y_eval, features=features_eval
)
output_classification_result(
classification_result,
eval_output_args=eval_output_args,
eval_input_paths=input_paths,
model_path=model.model_path,
model_summary_props=model.model_summary_props
)
def get_model_name(
model_name: str,
use_ELMo: bool = False,
output_path: str = None,
model_path: str = None):
if output_path or model_path:
pass
else:
model_name = 'grobid-' + model_name
if use_ELMo:
model_name += '-with_ELMo'
return model_name
def load_delft_model(
model_name: str,
use_ELMo: bool = False,
output_path: str = None,
model_path: str = None,
max_sequence_length: Optional[int] = 100,
fold_count: int = 1,
batch_size: int = 20,
embedding_manager: EmbeddingManager = None,
**kwargs):
model = Sequence(
get_model_name(
model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path
),
embeddings_name=None,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
batch_size=batch_size,
fold_number=fold_count,
**kwargs
)
assert model_path
model.load_from(model_path)
return model
def eval_model(
model_name: str,
use_ELMo: bool = False,
input_paths: List[str] = None,
output_path: str = None,
model_path: str = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = 100,
fold_count: int = 1,
batch_size: int = 20,
eval_output_args: dict = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model = load_delft_model(
model_name=model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path,
max_sequence_length=max_sequence_length,
fold_count=fold_count,
batch_size=batch_size,
embedding_manager=embedding_manager,
**kwargs
)
do_eval_model(
model,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
split_input=split_input,
eval_output_args=eval_output_args,
download_manager=download_manager
)
def wapiti_eval_model(
model_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
fold_count: int = 1,
eval_output_args: dict = None,
wapiti_binary_path: str = None):
assert fold_count == 1, 'only fold_count == 1 supported'
model = WapitiModelAdapter.load_from(
model_path,
download_manager=download_manager,
wapiti_binary_path=wapiti_binary_path
)
do_eval_model(
model,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
split_input=split_input,
eval_output_args=eval_output_args,
download_manager=download_manager
)
def do_tag_input(
model: Union[Sequence, WapitiModelAdapter],
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
LOGGER.info('%d input sequences', len(x_all))
tag_result = model.iter_tag(
x_all,
output_format=None,
features=features_all
)
if LOGGER.isEnabledFor(logging.DEBUG):
if not isinstance(tag_result, dict):
tag_result = list(tag_result)
LOGGER.debug('actual raw tag_result: %s', tag_result)
if isinstance(model, Sequence) and model.tag_transformed:
dataset_transformer = model.dataset_transformer_factory()
expected_x_all, expected_y_all, expected_features_all = dataset_transformer.fit_transform(
x_all, y_all, features=features_all
)
else:
expected_x_all = x_all
expected_y_all = y_all
expected_features_all = features_all
expected_tag_result = get_tag_result(
texts=expected_x_all,
labels=expected_y_all
)
LOGGER.debug('actual raw expected_tag_result: %s', expected_tag_result)
formatted_tag_result_iterable = iter_format_tag_result(
tag_result,
output_format=tag_output_format,
expected_tag_result=expected_tag_result,
texts=expected_x_all,
features=expected_features_all,
model_name=model._get_model_name() # pylint: disable=protected-access
)
if tag_output_path:
LOGGER.info('writing tag results to: %r', tag_output_path)
with auto_uploading_output_file(tag_output_path) as fp:
for text in formatted_tag_result_iterable:
fp.write(text)
LOGGER.info('tag results written to: %r', tag_output_path)
else:
LOGGER.info('writing tag_result to stdout')
try:
for text in formatted_tag_result_iterable:
print(text, end='')
except BrokenPipeError:
LOGGER.info('received broken pipe error')
def tag_input(
model_name: str,
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
use_ELMo: bool = False,
input_paths: List[str] = None,
output_path: str = None,
model_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = None,
input_window_stride: int = None,
stateful: bool = None,
fold_count: int = 1,
batch_size: int = 20,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs
):
model = load_delft_model(
model_name=model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path,
max_sequence_length=max_sequence_length,
input_window_stride=input_window_stride,
stateful=stateful,
fold_count=fold_count,
batch_size=batch_size,
embedding_manager=embedding_manager,
**kwargs
)
do_tag_input(
model,
tag_output_format=tag_output_format,
tag_output_path=tag_output_path,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
def wapiti_tag_input(
model_path: str,
download_manager: DownloadManager,
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
input_paths: List[str] = None,
limit: int = None,
random_seed: int = DEFAULT_RANDOM_SEED,
shuffle_input: bool = False,
wapiti_binary_path: str = None
):
model: WapitiModelAdapter = WapitiModelAdapter.load_from(
model_path,
download_manager=download_manager,
wapiti_binary_path=wapiti_binary_path
)
do_tag_input(
model=model,
tag_output_format=tag_output_format,
tag_output_path=tag_output_path,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
def print_input_info(
input_paths: List[str],
limit: int = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit,
download_manager=download_manager,
clean_features=False
)
seq_lengths = np.array([len(seq) for seq in x_all])
y_counts = Counter(
y_row
for y_doc in y_all
for y_row in y_doc
)
flat_features = list(iter_flat_features(features_all))
feature_lengths = Counter(map(len, flat_features))
print('number of input sequences: %d' % len(x_all))
print('sequence lengths: %s' % format_dict(get_quantiles(seq_lengths)))
print('token lengths: %s' % format_dict(get_quantiles(
map(len, iter_flat_batch_tokens(x_all))
)))
print('number of features: %d' % len(features_all[0][0]))
if len(feature_lengths) > 1:
print('inconsistent feature length counts: %s' % format_dict(feature_lengths))
for feature_length in feature_lengths:
print('examples with feature length=%d:\n%s' % (
feature_length,
'\n'.join(islice((
' '.join(features_vector)
for features_vector in flat_features
if len(features_vector) == feature_length
), 3))
))
(x_all, y_all, features_all) = get_clean_x_y_features(
x_all, y_all, features_all
)
quantiles_feature_value_lengths = get_quantiles_feature_value_length_by_index(features_all)
feature_counts = get_feature_counts(features_all)
print('feature value lengths: %s' % format_dict(quantiles_feature_value_lengths))
print('feature counts: %s' % format_dict(feature_counts))
print('suggested feature indices: %s' % format_indices(
get_suggested_feature_indices(feature_counts)
))
print('label counts: %s' % format_dict(y_counts)) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tools/grobid_trainer/utils.py | 0.646572 | 0.268309 | utils.py | pypi |
import logging
import time
from functools import partial
from typing import List, Tuple
import pandas as pd
import delft.textClassification.models
import delft.textClassification.wrapper
from sciencebeam_trainer_delft.text_classification.wrapper import Classifier
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.models.Attention import Attention
from sciencebeam_trainer_delft.text_classification.models import (
get_callbacks,
train_model
)
from sciencebeam_trainer_delft.text_classification.saving import (
ModelSaver
)
from sciencebeam_trainer_delft.text_classification.evaluation import (
ClassificationResult
)
from sciencebeam_trainer_delft.text_classification.reader import (
load_data_frame,
load_texts_and_classes_pandas,
load_classes_pandas
)
from sciencebeam_trainer_delft.text_classification.config import (
AppConfig,
ModelConfig,
TrainingConfig
)
LOGGER = logging.getLogger(__name__)
def get_downloaded_input_paths(
input_paths: List[str],
download_manager: DownloadManager) -> List[str]:
return [
download_manager.download_if_url(input_path)
for input_path in input_paths
]
def load_input_data_frame(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> pd.DataFrame:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
df = load_data_frame(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(df))
return df
def load_input_data(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> Tuple[List[str], List[List[str]], List[str]]:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
xtr, y, y_names = load_texts_and_classes_pandas(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(xtr))
return xtr, y, y_names
def load_label_data(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> Tuple[List[List[str]], List[str]]:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
y, y_names = load_classes_pandas(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(y))
return y, y_names
def _patch_delft():
delft.textClassification.models.Attention = Attention
delft.textClassification.wrapper.train_model = train_model
def train(
app_config: AppConfig,
model_config: ModelConfig,
training_config: TrainingConfig,
train_input_texts: List[str],
train_input_labels: List[List[str]],
model_path: str):
_patch_delft()
model = Classifier(
embeddings_name=model_config.embeddings_name,
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.embeddings_name = model_config.embeddings_name
model.model_config = model_config
model.model_config.word_embedding_size = model.embeddings.embed_size
model.training_config = training_config
model_saver = ModelSaver(model_config)
callbacks = get_callbacks(
model_saver=model_saver,
log_dir=training_config.log_dir
)
delft.textClassification.wrapper.train_model = partial(
train_model,
callbacks=callbacks
)
model.train(train_input_texts, train_input_labels)
LOGGER.info('saving model to: %s', model_path)
model.save_to(model_path)
def predict(
app_config: AppConfig,
eval_input_texts: List[str],
model_path: str):
model = Classifier(
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.load_from(model_path)
LOGGER.info('number of texts to classify: %s', len(eval_input_texts))
start_time = time.time()
result = model.predict(eval_input_texts, output_format="csv")
LOGGER.info("runtime: %s seconds", round(time.time() - start_time, 3))
return {
'labels': model.model_config.list_classes,
'prediction': result
}
def evaluate(
app_config: AppConfig,
eval_input_texts: List[str],
eval_input_labels: List[List[str]],
model_path: str):
model = Classifier(
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.load_from(model_path)
LOGGER.info('number of texts to classify: %s', len(eval_input_texts))
start_time = time.time()
result = model.predict(eval_input_texts, output_format="csv")
LOGGER.info("runtime: %s seconds", round(time.time() - start_time, 3))
return ClassificationResult(
y_true=eval_input_labels,
y_pred=result,
label_names=model.model_config.list_classes
) | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/cli_utils.py | 0.703549 | 0.267387 | cli_utils.py | pypi |
import logging
from collections import OrderedDict
from typing import List
import numpy as np
from sklearn.metrics import (
log_loss,
roc_auc_score,
f1_score,
precision_score,
recall_score
)
LOGGER = logging.getLogger(__name__)
class ClassificationResult:
def __init__(
self,
y_true: List[List[str]],
y_pred: List[List[str]],
label_names: List[str]
):
y_true_array: np.ndarray = np.asarray(y_true)
y_pred_array: np.ndarray = np.asarray(y_pred)
LOGGER.info('y_true: %s', y_true)
LOGGER.info('y_pred: %s', y_pred)
self.scores = OrderedDict()
for j, label_name in enumerate(label_names):
labels = [0, 1]
y_true_class = y_true_array[:, j]
y_pred_class = y_pred_array[:, j]
y_true_binary_class = y_true_array[:, j] >= 0.5
y_pred_binary_class = y_pred_array[:, j] >= 0.5
loss = log_loss(y_true_class, y_pred_class, labels=labels)
precision = precision_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
recall = recall_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
f1 = f1_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
try:
roc_auc = roc_auc_score(y_true_class, y_pred_class)
except ValueError as e:
LOGGER.warning('could not calculate roc (index=%d): %s', j, e)
roc_auc = np.nan
self.scores[label_name] = {
'precision': precision,
'recall': recall,
'f1': f1,
'loss': loss,
'roc_auc': roc_auc,
'support': np.sum(y_true_binary_class)
}
self.macro_averages = {
'precision': np.mean([score['precision'] for score in self.scores.values()]),
'recall': np.mean([score['recall'] for score in self.scores.values()]),
'f1': np.mean([score['f1'] for score in self.scores.values()]),
'loss': np.mean([score['loss'] for score in self.scores.values()]),
'roc_auc': np.mean([score['roc_auc'] for score in self.scores.values()]),
'support': np.sum([score['support'] for score in self.scores.values()]),
}
@property
def text_formatted_report(self):
return self.get_text_formatted_report().rstrip()
def get_text_formatted_report(
self,
digits: int = 4,
exclude_no_support: bool = False):
name_width = max(map(len, self.scores.keys()))
last_line_heading = 'all (macro avg. / mean)'
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support", "roc_auc"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}' + u' {:>9.{digits}f}\n'
for type_name in sorted(self.scores.keys()):
item_scores = self.scores[type_name]
if exclude_no_support and not item_scores['support']:
continue
report += row_fmt.format(
*[
type_name,
item_scores['precision'],
item_scores['recall'],
item_scores['f1'],
item_scores['support'],
item_scores['roc_auc']
],
width=width,
digits=digits
)
report += u'\n'
report += row_fmt.format(
*[
last_line_heading,
self.macro_averages['precision'],
self.macro_averages['recall'],
self.macro_averages['f1'],
self.macro_averages['support'],
self.macro_averages['roc_auc']
],
width=width,
digits=digits
)
return report | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/evaluation.py | 0.86898 | 0.383237 | evaluation.py | pypi |
from typing import Tuple, List
import pandas as pd
import numpy as np
from sciencebeam_trainer_delft.utils.io import auto_uploading_output_file
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/textClassification/reader.py
def get_filepath_csv_separator(filepath: str):
if filepath.endswith('.tsv') or filepath.endswith('.tsv.gz'):
return '\t'
return ','
def load_data_frame(
filepath: str,
limit: int = None,
**kwargs) -> pd.DataFrame:
sep = get_filepath_csv_separator(filepath)
return pd.read_csv(filepath, nrows=limit, sep=sep, **kwargs)
def save_data_frame(
df: pd.DataFrame,
filepath: str,
index: bool = False,
**kwargs) -> pd.DataFrame:
sep = get_filepath_csv_separator(filepath)
with auto_uploading_output_file(filepath, mode='w') as fp:
return df.to_csv(fp, sep=sep, index=index, **kwargs)
def get_texts_and_classes_from_data_frame(
df: pd.DataFrame) -> Tuple[List[str], List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id text class_0 ... class_n
id_0 text_0 class_00 ... class_n0
id_1 text_1 class_01 ... class_n1
...
id_m text_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
df = df.copy()
df.iloc[:, 1].fillna('MISSINGVALUE', inplace=True)
texts_list = []
for j in range(0, df.shape[0]):
texts_list.append(df.iloc[j, 1])
classes = df.iloc[:, 2:]
classes_list = classes.values.tolist()
classes_label_names = list(classes.columns.values)
return np.asarray(texts_list), np.asarray(classes_list), classes_label_names
def load_texts_and_classes_pandas(
filepath: str,
limit: int = None,
**kwargs) -> Tuple[List[str], List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id text class_0 ... class_n
id_0 text_0 class_00 ... class_n0
id_1 text_1 class_01 ... class_n1
...
id_m text_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
return get_texts_and_classes_from_data_frame(
load_data_frame(filepath, limit=limit, **kwargs)
)
def load_classes_pandas(
filepath: str,
limit: int = None,
**kwargs) -> Tuple[List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id class_0 ... class_n
id_0 class_00 ... class_n0
id_1 class_01 ... class_n1
...
id_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
df = load_data_frame(filepath, limit=limit, **kwargs)
classes = df.iloc[:, 1:]
classes_list = classes.values.tolist()
classes_label_names = list(classes.columns.values)
return np.asarray(classes_list), classes_label_names | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/reader.py | 0.785925 | 0.337913 | reader.py | pypi |
import logging
import math
import os
from typing import List
import numpy as np
from sklearn.metrics import log_loss, roc_auc_score
from keras.models import Model
from keras.callbacks import Callback
from sciencebeam_trainer_delft.text_classification.saving import (
ModelSaver
)
from sciencebeam_trainer_delft.text_classification.callbacks import (
ModelWithMetadataCheckpoint
)
LOGGER = logging.getLogger(__name__)
def get_callbacks(
model_saver: ModelSaver,
log_dir: str = None,
meta: dict = None) -> List[Callback]:
callbacks = []
if log_dir:
epoch_dirname = 'epoch-{epoch:05d}'
assert model_saver
save_callback = ModelWithMetadataCheckpoint(
os.path.join(log_dir, epoch_dirname),
model_saver=model_saver,
monitor='f1',
meta=meta
)
callbacks.append(save_callback)
return callbacks
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/textClassification/models.py
def train_model( # pylint: disable=too-many-statements
model: Model,
list_classes: List[str],
batch_size: int, # pylint: disable=unused-argument
max_epoch: int,
use_roc_auc: bool,
class_weights,
training_generator,
validation_generator,
val_y,
use_ELMo=False,
use_BERT=False,
multiprocessing: bool = True,
nb_workers: int = 6,
callbacks: List[Callback] = None):
best_loss = -1.0
best_roc_auc = -1.0
best_weights = None
best_epoch = 0
current_epoch = 1
if use_ELMo or use_BERT:
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
while current_epoch <= max_epoch:
model.fit_generator(
generator=training_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers,
class_weight=class_weights,
epochs=current_epoch,
initial_epoch=(current_epoch - 1),
callbacks=callbacks)
y_pred = model.predict_generator(
generator=validation_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers)
total_loss = 0.0
total_roc_auc = 0.0
# we distinguish 1-class and multiclass problems
if len(list_classes) == 1:
total_loss = log_loss(val_y, y_pred)
total_roc_auc = roc_auc_score(val_y, y_pred)
else:
for j in range(0, len(list_classes)):
labels = [0, 1]
loss = log_loss(val_y[:, j], y_pred[:, j], labels=labels)
total_loss += loss
try:
roc_auc = roc_auc_score(val_y[:, j], y_pred[:, j])
except ValueError as e:
LOGGER.debug('could not calculate roc (index=%d): %s', j, e)
roc_auc = np.nan
total_roc_auc += roc_auc
total_loss /= len(list_classes)
total_roc_auc /= len(list_classes)
if np.isnan(total_roc_auc):
use_roc_auc = False
if use_roc_auc:
LOGGER.info(
"Epoch %s loss %s best_loss %s (for info)",
current_epoch, total_loss, best_loss
)
LOGGER.info(
"Epoch %s roc_auc %s best_roc_auc %s (for early stop)",
current_epoch, total_roc_auc, best_roc_auc
)
else:
LOGGER.info(
"Epoch %s loss %s best_loss %s (for early stop)",
current_epoch, total_loss, best_loss
)
LOGGER.info(
"Epoch %s roc_auc %s best_roc_auc %s (for info)",
current_epoch, total_roc_auc, best_roc_auc
)
current_epoch += 1
if total_loss < best_loss or best_loss == -1 or math.isnan(best_loss) is True:
best_loss = total_loss
if use_roc_auc is False:
best_weights = model.get_weights()
best_epoch = current_epoch
elif use_roc_auc is False:
if current_epoch - best_epoch == 5:
break
if total_roc_auc > best_roc_auc or best_roc_auc == -1:
best_roc_auc = total_roc_auc
if use_roc_auc:
best_weights = model.get_weights()
best_epoch = current_epoch
elif use_roc_auc:
if current_epoch - best_epoch == 5:
break
model.set_weights(best_weights)
if use_roc_auc:
return model, best_roc_auc
else:
return model, best_loss | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/models.py | 0.699357 | 0.183942 | models.py | pypi |
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/utilities/Attention.py
# - updated to be compatible with newer Keras version
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
self.W = None
self.b = None
super().__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight(shape=(input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None, **kwargs): # pylint: disable=arguments-differ
x = inputs
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(
K.dot(
K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))
),
(-1, step_dim)
)
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/utils/models/Attention.py | 0.922023 | 0.362095 | Attention.py | pypi |
from __future__ import absolute_import
import logging
from io import StringIO
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
import apache_beam as beam
from apache_beam.io.textio import WriteToText
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filebasedsource import FileBasedSource
from sciencebeam_utils.beam_utils.utils import (
TransformAndLog
)
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename
)
def get_logger():
return logging.getLogger(__name__)
def DictToList(fields):
def wrapper(x):
get_logger().debug('DictToList: %s -> %s', fields, x)
return [x.get(field) for field in fields]
return wrapper
def _to_text(value):
try:
return text_type(value, encoding='utf-8')
except TypeError:
return text_type(value)
def format_csv_rows(rows, delimiter=','):
get_logger().debug('format_csv_rows, rows: %s', rows)
out = StringIO()
writer = csv.writer(out, delimiter=text_type(delimiter))
writer.writerows([
[_to_text(x) for x in row]
for row in rows
])
result = out.getvalue().rstrip('\r\n')
get_logger().debug('format_csv_rows, result: %s', result)
return result
class WriteDictCsv(beam.PTransform):
def __init__(self, path, columns, file_name_suffix=None):
super(WriteDictCsv, self).__init__()
self.path = path
self.columns = columns
self.file_name_suffix = file_name_suffix
self.delimiter = csv_delimiter_by_filename(path + file_name_suffix)
def expand(self, input_or_inputs):
return (
input_or_inputs |
"ToList" >> beam.Map(DictToList(self.columns)) |
"Format" >> TransformAndLog(
beam.Map(lambda x: format_csv_rows([x], delimiter=self.delimiter)),
log_prefix='formatted csv: ',
log_level='debug'
) |
"Utf8Encode" >> beam.Map(lambda x: x.encode('utf-8')) |
"Write" >> WriteToText(
self.path,
file_name_suffix=self.file_name_suffix,
header=format_csv_rows([self.columns], delimiter=self.delimiter).encode('utf-8')
)
)
def _strip_quotes(s):
return s[1:-1] if len(s) >= 2 and s[0] == '"' and s[-1] == '"' else s
# copied and modified from https://github.com/pabloem/beam_utils
# (move back if still active)
class ReadLineIterator:
def __init__(self, obj):
self._obj = obj
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
line = self._obj.readline().decode('utf-8')
if line is None or line == '':
raise StopIteration
return line
class CsvFileSource(FileBasedSource):
""" A source for a GCS or local comma-separated-file
Parses a text file assuming newline-delimited lines,
and comma-delimited fields. Assumes UTF-8 encoding.
"""
def __init__( # pylint: disable=too-many-arguments
self, file_pattern,
compression_type=CompressionTypes.AUTO,
delimiter=',', header=True, dictionary_output=True,
validate=True, limit=None):
""" Initialize a CsvFileSource.
Args:
delimiter: The delimiter character in the CSV file.
header: Whether the input file has a header or not.
Default: True
dictionary_output: The kind of records that the CsvFileSource outputs.
If True, then it will output dict()'s, if False it will output list()'s.
Default: True
Raises:
ValueError: If the input arguments are not consistent.
"""
super(CsvFileSource, self).__init__(
file_pattern,
compression_type=compression_type,
validate=validate,
splittable=False # Can't just split anywhere
)
self.delimiter = delimiter
self.header = header
self.dictionary_output = dictionary_output
self.limit = limit
self._file = None
if not self.header and dictionary_output:
raise ValueError(
'header is required for the CSV reader to provide dictionary output'
)
def read_records(self, file_name, offset_range_tracker):
# If a multi-file pattern was specified as a source then make sure the
# start/end offsets use the default values for reading the entire file.
headers = None
self._file = self.open_file(file_name)
reader = csv.reader(ReadLineIterator(self._file), delimiter=text_type(self.delimiter))
line_no = 0
for i, row in enumerate(reader):
if self.header and i == 0:
headers = row
continue
if self.limit and line_no >= self.limit:
break
line_no += 1
if self.dictionary_output:
yield dict(zip(headers, row))
else:
yield row
class ReadDictCsv(beam.PTransform):
"""
Simplified CSV parser, which does not support:
* multi-line values
* delimiter within value
"""
def __init__(self, filename, header=True, limit=None):
super(ReadDictCsv, self).__init__()
if not header:
raise RuntimeError('header required')
self.filename = filename
self.columns = None
self.delimiter = csv_delimiter_by_filename(filename)
self.limit = limit
self.row_num = 0
def expand(self, input_or_inputs):
return (
input_or_inputs |
beam.io.Read(CsvFileSource(
self.filename,
delimiter=self.delimiter,
limit=self.limit
))
) | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/beam_utils/csv.py | 0.630799 | 0.184217 | csv.py | pypi |
import logging
from random import getrandbits
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
def get_logger():
return logging.getLogger(__name__)
def Spy(f):
def spy_wrapper(x):
f(x)
return x
return spy_wrapper
def MapSpy(f):
return beam.Map(Spy(f))
def _default_exception_log_fn(exception, value):
get_logger().warning(
'caught exception (ignoring item): %s, input: %.100s...',
exception, value, exc_info=exception
)
def MapOrLog(fn, log_fn=None, error_count=None):
if log_fn is None:
log_fn = _default_exception_log_fn
error_counter = (
Metrics.counter('MapOrLog', error_count)
if error_count
else None
)
def wrapper(x):
try:
yield fn(x)
except Exception as e: # pylint: disable=broad-except
if error_counter:
error_counter.inc()
log_fn(e, x)
return beam.FlatMap(wrapper)
LEVEL_MAP = {
'info': logging.INFO,
'debug': logging.DEBUG
}
def Count(name, counter_value_fn):
counter = Metrics.counter('Count', name)
def wrapper(x):
counter.inc(counter_value_fn(x) if counter_value_fn else 1)
return x
return name >> beam.Map(wrapper)
class GroupTransforms(beam.PTransform):
"""
Convenience method to allow a PTransform for grouping purpose
to be defined using a lambda function.
(Completely unrelated to GroupBy transforms)
"""
def __init__(self, expand_fn):
super(GroupTransforms, self).__init__()
self.expand_fn = expand_fn
def expand(self, input_or_inputs):
return self.expand_fn(input_or_inputs)
def TransformAndCount(transform, counter_name, counter_value_fn=None):
return GroupTransforms(lambda pcoll: (
pcoll |
transform |
"Count" >> Count(counter_name, counter_value_fn)
))
def _identity(x):
return x
def _get_default_output_log_fn(log_level, log_prefix, log_value_fn):
if log_value_fn is None:
log_value_fn = _identity
log_level = LEVEL_MAP.get(log_level, log_level)
def _log_fn(x):
get_logger().log(
log_level, '%s%.50s...', log_prefix, log_value_fn(x)
)
return _log_fn
def TransformAndLog(transform, log_fn=None, log_prefix='', log_value_fn=None, log_level='info'):
if log_fn is None:
log_fn = _get_default_output_log_fn(log_level, log_prefix, log_value_fn)
return GroupTransforms(lambda pcoll: (
pcoll |
transform |
"Log" >> MapSpy(log_fn)
))
def random_key():
return getrandbits(32)
def _default_random_key_fn(_):
return random_key()
def PreventFusion(key_fn=None, name="PreventFusion"):
"""
Prevents fusion to allow better distribution across workers.
See:
https://cloud.google.com/dataflow/service/dataflow-service-desc#preventing-fusion
TODO Replace by: https://github.com/apache/beam/pull/4040
"""
if key_fn is None:
key_fn = _default_random_key_fn
return name >> GroupTransforms(lambda pcoll: (
pcoll |
"AddKey" >> beam.Map(lambda x: (key_fn(x), x)) |
"GroupByKey" >> beam.GroupByKey() |
"Ungroup" >> beam.FlatMap(lambda element: element[1])
)) | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/beam_utils/utils.py | 0.639398 | 0.198763 | utils.py | pypi |
import logging
import os
from functools import reduce # pylint: disable=redefined-builtin
from typing import Iterable, List, Tuple
from apache_beam.io.filesystems import FileSystems
from sciencebeam_utils.utils.collection import (
groupby_to_dict,
sort_and_groupby_to_dict
)
from .file_path import strip_ext
LOGGER = logging.getLogger(__name__)
def find_matching_filenames(pattern):
return (x.path for x in FileSystems.match([pattern])[0].metadata_list)
def group_files_by_parent_directory(filenames):
return groupby_to_dict(sorted(filenames), os.path.dirname)
def group_files_by_name_excl_ext(filenames):
return sort_and_groupby_to_dict(filenames, strip_ext)
def zip_by_keys(*dict_list):
keys = reduce(lambda agg, v: agg | set(v.keys()), dict_list, set())
return (
[d.get(k) for d in dict_list]
for k in sorted(keys)
)
def group_file_pairs_by_parent_directory_or_name(
files_by_type: List[List[str]]
) -> Iterable[Tuple[str, ...]]:
grouped_files_by_pattern = [
group_files_by_parent_directory(files) for files in files_by_type
]
for files_in_group_by_pattern in zip_by_keys(*grouped_files_by_pattern):
if all(len(files or []) == 1 for files in files_in_group_by_pattern):
yield tuple([ # pylint: disable=consider-using-generator
files[0] for files in files_in_group_by_pattern
])
else:
grouped_by_name = [
group_files_by_name_excl_ext(files or [])
for files in files_in_group_by_pattern
]
for files_by_name in zip_by_keys(*grouped_by_name):
if all(len(files or []) == 1 for files in files_by_name):
yield tuple([ # pylint: disable=consider-using-generator
files[0] for files in files_by_name
])
else:
LOGGER.info(
'no exclusively matching files found: %s',
list(files_by_name)
)
def find_file_pairs_grouped_by_parent_directory_or_name(patterns):
matching_files_by_pattern = [
list(find_matching_filenames(pattern)) for pattern in patterns
]
LOGGER.info(
'found number of files %s',
', '.join(
'%s: %d' % (pattern, len(files))
for pattern, files in zip(patterns, matching_files_by_pattern)
)
)
patterns_without_files = [
pattern
for pattern, files in zip(patterns, matching_files_by_pattern)
if len(files) == 0
]
if patterns_without_files:
raise RuntimeError('no files found for: %s' % patterns_without_files)
return group_file_pairs_by_parent_directory_or_name(
matching_files_by_pattern
) | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/utils/file_pairs.py | 0.514888 | 0.170854 | file_pairs.py | pypi |
import argparse
import logging
import errno
from math import trunc
from random import shuffle
from datetime import datetime
from itertools import chain
from typing import List
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
from sciencebeam_utils.beam_utils.io import open_file
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename,
write_csv_rows
)
from sciencebeam_utils.utils.file_path import (
strip_ext,
get_ext
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
Row = List[str]
def extract_proportions_from_args(args):
digits = 3
proportions = [
(name, round(p, digits))
for name, p in [
('train', args.train),
('test', args.test),
('validation', args.validation)
]
if p and p > 0
]
if sum(p for _, p in proportions) > 1.0:
raise ValueError('proportions add up to more than 1.0')
if not args.test:
proportions.append(('test', 1.0 - sum(p for _, p in proportions)))
elif not args.validation:
proportions.append(('validation', round(1.0 - sum(p for _, p in proportions), digits)))
proportions = [(name, p) for name, p in proportions if p > 0]
return proportions
def get_chunk_size_list(size, percentages, fill=False):
chunk_size_list = [int(trunc(p * size)) for p in percentages]
if fill:
chunk_size_list[-1] = size - sum(chunk_size_list[:-1])
return chunk_size_list
def split_row_chunks(rows, chunk_size_list):
chunk_offset_list = [0]
for chunk_size in chunk_size_list[0:-1]:
chunk_offset_list.append(chunk_offset_list[-1] + chunk_size)
LOGGER.debug('chunk_offset_list: %s', chunk_offset_list)
LOGGER.debug('chunk_size_list: %s', chunk_size_list)
return [
rows[chunk_offset:chunk_offset + chunk_size]
for chunk_offset, chunk_size in zip(chunk_offset_list, chunk_size_list)
]
def _to_hashable(value):
return tuple(value)
def _to_row_set(rows):
return {_to_hashable(row) for row in rows}
def _split_rows_without_existing_split(rows, percentages, fill=False):
chunk_size_list = get_chunk_size_list(len(rows), percentages, fill=fill)
return split_row_chunks(rows, chunk_size_list)
def _substract_list(list1, list2):
return [a - b for a, b in zip(list1, list2)]
def split_rows(
rows: List[Row],
percentages: List[float],
fill: bool = False,
existing_split: List[List[Row]] = None) -> List[List[Row]]:
if not existing_split:
return _split_rows_without_existing_split(rows, percentages, fill=fill)
LOGGER.debug('existing_split: %s', existing_split)
all_current_rows = _to_row_set(rows)
all_existing_rows = _to_row_set(chain(*existing_split))
not_existing_rows = all_existing_rows - all_current_rows
if not_existing_rows:
LOGGER.warning(
'some rows (%d of %d) from the existing split do not exist'
' in the source list and will be removed, e.g.: %s',
len(not_existing_rows), len(all_existing_rows), list(not_existing_rows)[:3]
)
existing_split = [
[row for row in existing_rows if _to_hashable(row) in all_current_rows]
for existing_rows in existing_split
]
remaining_rows = [row for row in rows if _to_hashable(row) not in all_existing_rows]
chunk_size_list = get_chunk_size_list(len(rows), percentages, fill=fill)
existing_chunk_size_list = [len(existing_rows) for existing_rows in existing_split]
remaining_chunk_size_list = _substract_list(chunk_size_list, existing_chunk_size_list)
return [
existing_rows + new_split
for existing_rows, new_split in zip(
existing_split, split_row_chunks(remaining_rows, remaining_chunk_size_list)
)
]
def output_filenames_for_names(names, prefix, ext):
return [
prefix + ('' if prefix.endswith('/') else '-') + name + ext
for name in names
]
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input', type=str, required=True,
help='input csv/tsv file'
)
parser.add_argument(
'--train', type=float, required=True,
help='Train dataset proportion'
)
parser.add_argument(
'--test', type=float, required=False,
help='Test dataset proportion '
'(if not specified it is assumed to be the remaining percentage)'
)
parser.add_argument(
'--validation', type=float, required=False,
help='Validation dataset proportion (requires test-proportion)'
)
parser.add_argument(
'--random', action='store_true', default=False,
help='randomise samples before doing the split'
)
parser.add_argument(
'--fill', action='store_true', default=False,
help='use up all of the remaining data rows for the last set'
)
parser.add_argument(
'--no-extend-existing', action='store_true', default=False,
help='do not extend and preserve the existing split (new entries will be addedby default)'
)
parser.add_argument(
'--no-header', action='store_true', default=False,
help='input file does not contain a header'
)
parser.add_argument(
'--out', type=str, required=False,
help='output csv/tsv file prefix or directory (if ending with slash)'
' will use input file name by default'
)
add_default_args(parser)
return parser.parse_args(argv)
def process_args(args):
if not args.out:
args.out = strip_ext(args.input)
def read_csv_with_header(input_filename, delimiter, no_header):
with open_file(input_filename, 'r') as f:
reader = csv.reader(f, delimiter=text_type(delimiter))
header_row = None if no_header else next(reader)
data_rows = list(reader)
return header_row, data_rows
def read_csv_data(input_filename, delimiter, no_header):
_, data_rows = read_csv_with_header(input_filename, delimiter, no_header)
return data_rows
def load_file_sets(filenames, delimiter, no_header):
return [
read_csv_data(filename, delimiter, no_header)
for filename in filenames
]
def load_file_sets_or_none(filenames, delimiter, no_header):
try:
return load_file_sets(filenames, delimiter, no_header)
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise e
def save_file_set(output_filename, delimiter, header_row, set_data_rows):
mime_type = 'text/tsv' if delimiter == '\t' else 'text/csv'
with open_file(output_filename, 'w', mime_type=mime_type) as f:
writer = csv.writer(f, delimiter=text_type(delimiter))
if header_row:
write_csv_rows(writer, [header_row])
write_csv_rows(writer, set_data_rows)
def save_file_sets(output_filenames, delimiter, header_row, data_rows_by_set):
for output_filename, set_data_rows in zip(output_filenames, data_rows_by_set):
LOGGER.info('set size: %d (%s)', len(set_data_rows), output_filename)
save_file_set(output_filename, delimiter, header_row, set_data_rows)
def get_backup_file_suffix():
return '.backup-%s' % datetime.utcnow().strftime(r'%Y%m%d-%H%M%S')
def run(args):
LOGGER.debug('args: %s', args)
process_args(args)
ext = get_ext(args.input)
proportions = extract_proportions_from_args(args)
output_filenames = output_filenames_for_names(
[name for name, _ in proportions],
args.out,
ext
)
LOGGER.info('proportions: %s', proportions)
LOGGER.info('output_filenames: %s', output_filenames)
delimiter = csv_delimiter_by_filename(args.input)
header_row, data_rows = read_csv_with_header(args.input, delimiter, args.no_header)
LOGGER.info('number of rows: %d', len(data_rows))
if args.random:
shuffle(data_rows)
existing_file_sets = load_file_sets_or_none(output_filenames, delimiter, args.no_header)
data_rows_by_set = split_rows(
data_rows,
[p for _, p in proportions],
fill=args.fill,
existing_split=existing_file_sets if not args.no_extend_existing else None
)
if existing_file_sets:
backup_suffix = get_backup_file_suffix()
save_file_sets(
[s + backup_suffix for s in output_filenames],
delimiter,
header_row,
existing_file_sets
)
save_file_sets(
output_filenames,
delimiter,
header_row,
data_rows_by_set
)
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/tools/split_csv_dataset.py | 0.499512 | 0.259914 | split_csv_dataset.py | pypi |
# In[1]:
from datetime import datetime, timedelta
# In[2]:
def create_date_from_str(date_str, date_format='%Y%m%d'):
'''
Create a Datetime object from a string with specific date_format.
date_str: a date string (required).
date_format: the date format of date_str. Default is %Y%m%d.
'''
return datetime.strptime(date_str, date_format)
# In[3]:
def get_today(date_format='%Y%m%d', date_object=False):
'''
Create a Date Object or String of Today.
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = datetime.today()
tody = datetime(t.year, t.month, t.day)
if (date_object == True):
return tody
else:
return datetime.strftime(tody, date_format)
# In[4]:
def get_yesterday(date_format='%Y%m%d', date_object=False):
'''
Create a Date Object or String of Yesterday.
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = datetime.today() - timedelta(days=1)
yesterday = datetime(t.year, t.month, t.day)
if (date_object == True):
return yesterday
else:
return datetime.strftime(yesterday, date_format)
# In[5]:
def get_current_month(date_format='%Y%m', date_object=False):
'''
Return current month.
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
t = datetime.today()
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[6]:
def get_last_month(date_format='%Y%m', date_object=False):
'''
Return last month date.
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
current_month = get_current_month(date_object=True)
t = current_month - timedelta(days=current_month.day)
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[7]:
def get_previous_nmonth(n, date_format='%Y%m', date_object=False):
'''
Return last n-month.
n: number of previous month, n >= 0. (Required)
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
t = get_current_month(date_object=True)
for i in range(n):
t = t - timedelta(days=t.day)
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[16]:
def get_same_day_last_week(date, date_format='%Y%m%d', date_object=False):
'''
Return the same day of the week of last week.
date: a date object. (Required)
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = date - timedelta(days=7)
if (date_object == True):
return t
else:
return datetime.strftime(t, date_format)
# In[9]:
def get_same_day_last_month(date, date_format='%Y%m%d', date_object=False):
'''
Return the same day of last month.
date: a date object. (Required)
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = date - timedelta(days=date.day)
t = t.replace(day=date.day)
if (date_object == True):
return t
else:
return datetime.strftime(t, date_format)
# In[ ]:
#!jupyter nbconvert --to script human_date.ipynb | /scienceindata_dates-0.0.3.tar.gz/scienceindata_dates-0.0.3/src/scienceindata_dates/scienceindata_dates.py | 0.789356 | 0.374076 | scienceindata_dates.py | pypi |
# ScienceIO API Demo
In this demo, we'll:
- Log in with our user account
- Make our first request
- Put the request in a pandas dataframe and analyze
```
import pandas as pd
import yaml
from IPython.display import display, JSON
from analytics import *
from scienceio import ScienceIO
```
## Initialize client
```
scio = ScienceIO()
```
If the account already exists (based on the login), you will be notified and can proceed to logging in.
## Make a request to the API
```
query_text = (
"""
The patient is a 21-day-old Caucasian male here for 2 days of congestion -
mom has been suctioning yellow discharge from the patient's nares, plus she has noticed
some mild problems with his breathing while feeding (but negative for any perioral cyanosis or retractions).
One day ago, mom also noticed a tactile temperature and gave the patient Tylenol. Baby also has
had some decreased p.o. intake. His normal breast-feeding is down from 20 minutes q.2h.
to 5 to 10 minutes secondary to his respiratory congestion. He sleeps well, but has been more tired
and has been fussy over the past 2 days. The parents noticed no improvement with albuterol treatments given
in the ER. His urine output has also decreased; normally he has 8 to 10 wet and 5 dirty diapers per 24 hours,
now he has down to 4 wet diapers per 24 hours. Mom denies any diarrhea. His bowel movements are yellow colored and soft in nature.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `spans` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
display(df)
report(df)
# This is the text we'll send to the API
query_text = (
"""
The COVID-19 pandemic has shown a markedly low proportion of
cases among children. Age disparities in observed COVID-19 cases could be
explained by children having lower susceptibility to infection, lower
propensity to show clinical symptoms or both. COVID-19 has shown to induce
Kawasaki syndrome in some children. Kawasaki (KD), or Mucocutaneous Lymph Node Syndrome
as it is often called, is an acute illness that affects mostly children 6 months to 5 years
old—median age in the KD cohort was 25 months—and features inflammation of small and medium
blood vessels. Although the cause of KD is unknown, it is believed to occur in genetically
predisposed children after exposure to an environmental trigger such as an infection.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `spans` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
display(df)
```
## Viewing the results
```
report(df)
# top mentions
get_top(df, "text")
# top concepts
get_top(df, "concept_name")
# top concept types
get_top(df, "concept_type")
```
| /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/example-analytics-2.ipynb | 0.479747 | 0.902395 | example-analytics-2.ipynb | pypi |
import argparse
import pandas as pd
from convert_data_model import convert_data_model
def count_text(df) -> int:
"""len(df) = # of text spans"""
return len(df)
def count_text_unique(df) -> int:
"""unique text spans (no correction for caps/lower/etc.)"""
return df.text.nunique()
def count_concepts_unique(df) -> int:
"""unique biomedical concepts"""
return df.concept_id.nunique()
def count_types_unique(df) -> int:
"""unique concept types"""
return df.concept_type.nunique()
def quantizer(score):
"""
Quantizes scores with desired range
Run with:
df[col] = df[col].apply(lambda x: quantizer(x))
to transform column into quantized values (or set to new column)
"""
if score >= 0.99:
return "Very High"
elif score >= 0.9:
return "High"
elif score >= 0.7:
return "Moderate"
elif score >= 0.5:
return "Low"
else:
return "Very Low"
def quantize_scores(df):
"""Quantize the scores in the dataframe"""
df["score_id"] = df["score_id"].apply(lambda x: quantizer(x))
df["score_type"] = df["score_type"].apply(lambda x: quantizer(x))
return df
def get_score_counts(df, col="score_id"):
"""Returns counts by score"""
return (
df.groupby(col)
.count()["pos_start"]
.reset_index()
.sort_values(by="pos_start", ascending=False)
.rename(columns={"pos_start": "mentions"})
.reset_index(drop=True)
)
def get_score_dict(df, col="score_id"):
"""Returns a dict of counts by score_id"""
# get counts
conf = get_score_counts(df, col)
# zip two columns to create dict
conf_dict = dict(zip(conf[col], conf["mentions"]))
# add zero values
for k in ["Very High", "High", "Moderate", "Low", "Very Low"]:
if not k in conf_dict:
conf_dict[k] = 0
return conf_dict
def get_top(df, col="concept_name", N: int = 10):
"""get top N values by count"""
return (
df.groupby(col)
.count()["pos_start"]
.reset_index()
.sort_values(by="pos_start", ascending=False)
.rename(columns={"pos_start": "mentions"})
.reset_index(drop=True)
.head(n=N)
)
def get_top_dict(df, col="concept_name", N: int = 10):
"""Get top values as dict with ordered lists"""
return get_top(df, col, N).to_dict("list")
def report(df):
"""get report of basic summary stats"""
print(
f"Found {count_text(df)} mentions of healthcare information ({count_text_unique(df)} unique)."
)
print(
f"Found {count_concepts_unique(df)} unique concepts, spanning {count_types_unique(df)} categories."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str, default=None, help="File to analyze")
args = parser.parse_args()
# read file
df = pd.read_csv(args.filename, sep=None, engine="python")
# convert and quantize
df = convert_data_model(df)
df = quantize_scores(df) | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/analytics.py | 0.758242 | 0.430506 | analytics.py | pypi |
# ScienceIO API Analytics
In this demo, we'll:
- Log in with our user account
- Make our first request
- Put the request in a pandas dataframe and analyze
```
import pandas as pd
import yaml
from IPython.display import display, JSON
from analytics import *
from scienceio import ScienceIO
```
## Initialize client
```
scio = ScienceIO()
```
If the account already exists (based on the login), you will be notified and can proceed to logging in.
## Make a request to the API
```
# This is the text we'll send to the API
query_text = (
"""
The COVID-19 pandemic has shown a markedly low proportion of
cases among children. Age disparities in observed COVID-19 cases could be
explained by children having lower susceptibility to infection, lower
propensity to show clinical symptoms or both. COVID-19 has shown to induce
Kawasaki syndrome in some children. Kawasaki (KD), or Mucocutaneous Lymph Node Syndrome
as it is often called, is an acute illness that affects mostly children 6 months to 5 years
old—median age in the KD cohort was 25 months—and features inflammation of small and medium
blood vessels. Although the cause of KD is unknown, it is believed to occur in genetically
predisposed children after exposure to an environmental trigger such as an infection.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `annotations` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
df = quantize_scores(df)
display(df.head())
```
## Viewing the results
```
report(df)
# top mentions
get_top_dict(df, "text")
# top concepts
get_top_dict(df, "concept_name")
# top concept types
get_top_dict(df, "concept_type")
# count for score_id
get_score_dict(df)
```
| /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/example-analytics-1.ipynb | 0.537041 | 0.886273 | example-analytics-1.ipynb | pypi |
import argparse
import pandas as pd
def beta_mapper():
"""For converting previous data models to beta model"""
return {
"text": "text",
"start": "pos_start",
"end": "pos_end",
"text_norm": "concept_id",
"entity": "concept_id",
"canonical_name": "concept_name",
"entity_p": "score_id",
"tag": "concept_type",
"entity_type": "concept_type",
"entity_type_p": "score_type",
"entity_subtype": "concept_subtype",
}
def remap_concept_types(df):
"""Convert legacy concept types to beta types"""
type_mapper = {
"Activity": "Context",
"Anatomy": "Anatomy & Physiology",
"Boolean": "Context",
"Cardinal": "Context",
"Cell": "Cell Biology",
"Cell Component": "Cell Biology",
"CellLine": "Cell Biology",
"Chemical": "Chemicals & Drugs",
"Concept": "Context",
"Device": "Medical Devices",
"Disease": "Medical Conditions",
"Gene": "Genetics",
"Geography": "Context",
"Group": "Context",
"Mutation": "Genetics",
"None": "None",
"Nucleic Acid, Nucleoside, or Nucleotide": "Genetics",
"Object": "Context",
"Occupation": "Context",
"Organization": "Context",
"Phenomenon": "Anatomy & Physiology",
"Physiology": "Anatomy & Physiology",
"Procedure": "Medical Procedures",
"Species": "Species & Viruses",
}
if "concept_type" in df.columns:
return df.replace({"concept_type": type_mapper})
else:
return df.replace({"entity_type": type_mapper})
def convert_data_model(df):
"""Remap column names and change types"""
df = df.rename(columns=beta_mapper())
df = remap_concept_types(df)
return df
def save_dataframe(df, filename: str = "input.tsv", sep: str = "\t"):
"""Save locally"""
if "." in filename:
filename = filename.split(".")[0] + "_REMAPPED.tsv"
df.to_csv(filename, sep=sep, index=False)
def main(filename):
"""Convert file to proper data model"""
print(f"Reading: {filename}")
df = pd.read_csv(filename, sep=None, engine="python")
df_remapped = convert_data_model(df)
save_dataframe(df_remapped, filename)
print(f"Saved to: {filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"filename", type=str, default=None, help="File to have data model remapped"
)
args = parser.parse_args()
main(filename=args.filename) | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/convert_data_model.py | 0.580471 | 0.417568 | convert_data_model.py | pypi |
<div id="top"></div>
<h1 align="center">
<br>
Sciencer Toolkit
</h1>
<h4 align="center">A smarter way to find articles.</h4>
<p align="center">
<a href="https://pypi.org/project/sciencer/">
<img src="https://img.shields.io/pypi/status/sciencer.svg?style=flat-square"
alt="PyPi Package Version"></a>
<a href="https://github.com/SciencerIO/sciencer-toolkit/issues">
<img src="https://img.shields.io/github/issues-raw/SciencerIO/sciencer-toolkit.svg?style=flat-square&logo=github&logoColor=white"
alt="GitHub issues"></a>
<a href="https://github.com/SciencerIO/sciencer-toolkit/pulls">
<img src="https://img.shields.io/github/issues-pr-raw/SciencerIO/sciencer-toolkit.svg?style=flat-square&logo=github&logoColor=white"
alt="GitHub pull requests"></a>
<a href="https://github.com/SciencerIO/sciencer-toolkit/LICENSE">
<img src="https://img.shields.io/github/license/SciencerIO/sciencer-toolkit.svg?style=flat-square"
alt="License: MIT License"></a>
</p>
<p align="center">
<a href="#about">About</a> -
<a href="#usage">Usage</a> -
<a href="#roadmap">Roadmap</a> -
<a href="#contributing">Contributing</a>
</p>
<p align="center">
<a href="#collectors">Collectors</a> -
<a href="#expanders">Expanders</a> -
<a href="#filters">Filters</a> -
<a href="#providers">Providers</a>
</p>
---
## About
Sciencer Toolkit enables researchers to **programmatically conduct a literature review** using an intuitive yet flexible interface.
At its core, Sciencer collects sets of papers.
The initial set of papers is created through the use of **Collectors** (e.g. paper doi, author name).
Then, Sciencer iteratively finds new papers using **Expanders** (e.g. authors, citations, references).
Finally, new found papers need to satisfy a series of **Filters** in order to be accepted into the current set.
Being iterative in nature, Sciencer allows you to repeat the above steps has many times as you'd like.
This project was motivated by the absence of tools to automate systematic reviews using clear and well-defined criteria.
Still, for literature reviews that do not need to follow specific criteria, there are a several tools that can help to discover new papers.
## Usage
```python
# Create the Sciencer Core Component
sciencer = Sciencer()
# Define provider
sciencer.add_provider(SemanticScholarProvider())
# Define collectors
## this collector will gather all known papers authored by "John Doe" into de set
sciencer.add_collector(sciencer.collectors.CollectByAuthorID("John Doe"))
## this collector will collect the paper with DOI "1234567890" into the set
sciencer.add_collector(sciencer.collectors.CollectByDOI("1234567890"))
## this collector will collect the papers with
sciencer.add_collector(sciencer.collectors.CollectByTerms(["Term 1", "Term 2", "Term 3"]))
# Define expanders
## this expander will gather all known papers written by authors in the current set.
sciencer.add_expander(sciencer.expanders.ExpandByAuthors())
## this expander will gather all the referenced papers
sciencer.add_expander(sciencer.expanders.ExpandByReferences())
## this expander will gather all the cited papers
sciencer.add_expander(sciencer.expanders.ExpandByCitations())
# Define filters
## this filter will reject papers that were published before 2010 and after 2030
sciencer.add_filter(sciencer.filters.FilterByYear(min_year=2010, max_year=2030))
## this filter will reject all the papers that do not have the word social on the abstract
sciencer.add_filter(sciencer.filters.FilterByAbstract("social"))
## this filter will reject all the papers that do not have the field of study Computer Science
sciencer.add_filter(sciencer.filters.FilterByFieldOfStudy("Computer Science"))
# Run one iteration
results = sciencer.iterate()
```
For more examples on how to use the Sciencer toolkit, please check the directory `examples/`.
<p align="right">(<a href="#top">back to top</a>)</p>
## Collectors
| Name | Description | Parameters |
| --------- | :------------------------------------------- | :---------------------------------------- |
| Author ID | Collects all the papers written by an author | Authors's SemanticScholar ID |
| Paper DOI | Collects a paper by its DOI | Paper's DOI |
| Terms | Collects papers by terms | Query Terms <br> Maximum Number of Papers |
<p align="right">(<a href="#top">back to top</a>)</p>
## Expanders
| Name | Description |
| ---------- | :-------------------------------- |
| Authors | Expands a paper by its authors |
| References | Expands a paper by its references |
| References | Expands a paper by its citations |
<p align="right">(<a href="#top">back to top</a>)</p>
## Filters
| Name | Description | Parameters |
| ----------------- | :------------------------------------ | ----------------------------------------------------------------------------------- |
| By Year | Filters a paper by its year | The lowest acceptable year (inclusive) <br> The highest acceptable year (inclusive) |
| By Abstract Words | Filters a paper by its abstract | The collection of words the abstract should include (at least one) |
| By Field Of Study | Filters a paper by its field of study | The field of study the paper should have |
<p align="right">(<a href="#top">back to top</a>)</p>
## Providers
| Name | Provider | Features |
| :--------------: | :--------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------- |
| Semantic Scholar | [Semantic Scholar Academic Graph API](https://www.semanticscholar.org/product/api) | **Search by Author** (Name, S2ID) <br> **Search By Paper ID** (S2ID, DOI, ArXiv, MAG, ACL, PubMed, Corpus) |
| DBLP | [DBLP Search API](https://dblp.org/faq/How+to+use+the+dblp+search+API.html) | *Work in Progress* |
<p align="right">(<a href="#top">back to top</a>)</p>
## Roadmap
- [ ] Create Paper's and Author's Cache
- [x] Add Bulk Expanders (to avoid redundancy)
- [ ] Add support for multithreading
- [ ] Add Collectors
- [ ] Add Collect by Venue/Proceedings
- [ ] Add Expanders
- [x] Add Expand by Citations
- [x] Add Expand by References
- [ ] Add Expand by Venue/Proceedings
- [ ] Add Filters
- [ ] Add Filter by Number of Citations
- [x] Add Filter by Topic
- [ ] Add Filter by Keywords
- [ ] Add Compound Filters
- [x] Add utility to write results to a *.csv
See the [open issues](https://github.com/SciencerIO/sciencer-toolkit/issues) for a full list of proposed features (and known issues).
<p align="right">(<a href="#top">back to top</a>)</p>
## Contributing
Want to **add a new provider, filter or expander**?
Looking to improve **the core functionality of sciencer toolkit**.
We look forward to include your contributions in the toolkit!
If you have a suggestion that would improve the toolkit just send us a Pull Request!
If you are looking for an additional collector/filter/expander/provider or just want to report a bug, you can also simply open an issue with the tag "enchament" or "bug", respectively.
<p align="right">(<a href="#top">back to top</a>)</p>
| /sciencer-0.1.3.tar.gz/sciencer-0.1.3/README.md | 0.528777 | 0.817538 | README.md | pypi |
<h1 align="center">
ScienceWorld
</h1>
<p align="center">
<!-- Version badge using shields.io -->
<a href="https://github.com/allenai/ScienceWorld/releases">
<img src="https://img.shields.io/github/v/release/allenai/ScienceWorld">
</a>
<!-- Link to tutorials badge using shields.io -->
<a href="https://huggingface.co/spaces/MarcCote/ScienceWorld">
<img src="https://img.shields.io/badge/🤗-Demo-yellow">
</a>
<!-- Follow on twitter badge using shields.io -->
<a href="https://sciworld.apps.allenai.org">
<img src="https://img.shields.io/badge/Website-green">
</a>
</p>
ScienceWorld is a text-based virtual environment centered around accomplishing tasks from the standardized elementary science curriculum. This code accompanies the paper [ScienceWorld: Is your Textual Agent Smarter than a 5th grader?](https://arxiv.org/abs/2203.07540).
<h3 align="center"><img src="https://github.com/allenai/ScienceWorld/blob/main/media/scienceworld_environment.png" width="75%"/></h3>
### Demo and examples
You can try ScienceWorld yourself via our [HuggingFace Space](https://huggingface.co/spaces/MarcCote/ScienceWorld) or read some of the [playthrough transcripts](https://sciworld.apps.allenai.org/explore).
### Citation
```
@misc{scienceworld2022,
title={ScienceWorld: Is your Agent Smarter than a 5th Grader?},
author={Ruoyao Wang and Peter Jansen and Marc-Alexandre C{\^o}t{\'e} and Prithviraj Ammanabrolu},
year={2022},
eprint={2203.07540},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2203.07540}
}
```
# Quickstart
**Before running:** You will have to have `Java 1.8+` installed on your system (shipped with most linux distributions).
Install with pip:
```bash
conda create --name scienceworld python=3.8
conda activate scienceworld
pip install scienceworld
```
Run an example random agent, on task 13 (classification: place a non-living thing in a box), for 5 episodes:
> python examples/random_agent.py --task-num=13 --num-episodes=5 --simplifications-preset easy
Run a user console where you can interact with the environment, on task 3 (change of state: melting):
> python examples/human.py --task-num=3 --num-episodes=5
# Web Server Demo
A web server demo is also available, that allows running a ScienceWorld user console that can be interacted with in a web browser.
<h3 align="center"><img src="https://github.com/allenai/ScienceWorld/blob/main/media/web_demo_screenshot.png" width="75%"/></h3>
To run the web server demo:
```bash
conda create --name scienceworld python=3.8
conda activate scienceworld
pip install scienceworld[webserver]
```
Run the web server:
> python examples/scienceworld-web-server-example.py
Point your web browser to:
`localhost:8080`
# ScienceWorld Design
ScienceWorld is written in Scala (2.12.9), and compiles using `sbt` into a JAR file that is run with Java. For convenience, a Python API is provided (Python >= 3.7), which interfaces using the `py4j` package.
# Tasks
The tasks are listed in the table below along with their number of variations. Either the task ID or its name can be used to a task with `env.load()`.
| Task ID | Task Name | # Variations |
|-------|----------------------------------------------------|------|
| 1-1 | boil | 30 |
| 1-2 | melt | 30 |
| 1-3 | freeze | 30 |
| 1-4 | change-the-state-of-matter-of | 30 |
| 2-1 | use-thermometer | 540 |
| 2-2 | measure-melting-point-known-substance | 436 |
| 2-3 | measure-melting-point-unknown-substance | 300 |
| 3-1 | power-component | 20 |
| 3-2 | power-component-renewable-vs-nonrenewable-energy | 20 |
| 3-3 | test-conductivity | 900 |
| 3-4 | test-conductivity-of-unknown-substances | 600 |
| 4-1 | find-living-thing | 300 |
| 4-2 | find-non-living-thing | 300 |
| 4-3 | find-plant | 300 |
| 4-4 | find-animal | 300 |
| 5-1 | grow-plant | 126 |
| 5-2 | grow-fruit | 126 |
| 6-1 | chemistry-mix | 32 |
| 6-2 | chemistry-mix-paint-secondary-color | 36 |
| 6-3 | chemistry-mix-paint-tertiary-color | 36 |
| 7-1 | lifespan-longest-lived | 125 |
| 7-2 | lifespan-shortest-lived | 125 |
| 7-3 | lifespan-longest-lived-then-shortest-lived | 125 |
| 8-1 | identify-life-stages-1 | 14 |
| 8-2 | identify-life-stages-2 | 10 |
| 9-1 | inclined-plane-determine-angle | 168 |
| 9-2 | inclined-plane-friction-named-surfaces | 1386 |
| 9-3 | inclined-plane-friction-unnamed-surfaces | 162 |
| 10-1 | mendelian-genetics-known-plant | 120 |
| 10-2 | mendelian-genetics-unknown-plant | 480 |
# Baseline Agents
**DRRN:** https://github.com/cognitiveailab/drrn-scienceworld
**KG-A2C:** https://github.com/cognitiveailab/kga2c-scienceworld
**CALM:** https://github.com/cognitiveailab/calm-scienceworld
**Behavior Cloning and Decision Transformer:** https://github.com/cognitiveailab/t5-scienceworld
| /scienceworld-1.1.3.tar.gz/scienceworld-1.1.3/README.md | 0.512449 | 0.93744 | README.md | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
https://dylan1467.github.io/contact.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| /scienlib-1.1.tar.gz/scienlib-1.1/.github/CODE_OF_CONDUCT.md | 0.58439 | 0.685038 | CODE_OF_CONDUCT.md | pypi |
from __future__ import annotations
import re
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from sem.structure_parsing import recursive_folder_parsing
class ResultManager:
"""A manager for experimental results.
It takes care of collecting results organized in different folders.
"""
def __init__(
self,
root_dir: Optional[Path],
parsers: List[re.Pattern],
dataframe: Optional[pd.DataFrame] = None,
):
"""Initialize the ResultManager.
:param root_dir: root directory.
:param parsers: list of regular expression patterns.
:param dataframe: optional dataframe of already parsed arguments.
"""
self.root_dir = root_dir
self.parsers = parsers
self.df = dataframe
@classmethod
def from_arguments(
cls,
root_dir: Path,
arguments: List[Dict[str, str] | str] | Dict[str, str] | str,
auto_sort: bool = False,
) -> ResultManager:
"""Create an instance of ResultManager from the given arguments.
The single can be specified in two ways:
- as a single string. In this case there will be no parsing and only folders
with the specified name will be parsed
- as a {key: value} dictionary. In this case, every key is the name of a
parameter, and the value is a string specifying the regular expression for
parsing.
If a list of arguments is provided, this specifies a hierarchical folder
structure, every level has parsing specified by the relative list argument.
If auto_sort is set to True, within every dictionary the arguments are sorted in
a canonical way.
:param root_dir: root directory
:param arguments: arguments, see `help(ResultManager)`
:param auto_sort: whether to sort the arguments in the canonical order
:return: a ArgumentManager instance
"""
if not isinstance(arguments, list):
arguments = [arguments]
parsers = [
re.compile(_pattern_from_arguments(arg, auto_sort)) for arg in arguments
]
return ResultManager(root_dir, parsers)
@classmethod
def create_default_path(
cls,
root_dir: str | Path,
values: List[Dict[str, Any] | str] | Dict[str, Any] | str,
auto_sort: bool = False,
) -> Path:
"""Create the default path given
:param root_dir: root directory
:param values: the values specifying the directory structure.
If it is a string, it specifies a simple directory name.
If it is a dictionary, it specifies the {parameter name: value} that the
directory name describes.
If it is a list, it contains the string or dictionaries of values at every
sub-level of root:dir.
:param auto_sort: whether to sort the values specified as dictionaries according
to the canonical order.
:return: the path to a sub-folder with specified names.
"""
root_dir = Path(root_dir)
if not isinstance(values, list):
values = [values]
path = root_dir
for arg in values:
dir_name = (
arg if isinstance(arg, str) else _dirname_from_values(arg, auto_sort)
)
path = path / dir_name
return path
@property
def patterns(self) -> List[str]:
return [parser.pattern for parser in self.parsers]
def parse_paths(self, **kwargs) -> None:
"""Recursively parse the root directory according to the specified parsers."""
records = [
{**{"__PATH__": res_path}, **match_group_args}
for res_path, match_group_args in recursive_folder_parsing(
self.root_dir, self.parsers
)
]
self.df = pd.DataFrame.from_records(records, **kwargs)
def filter_results(
self,
equal: Optional[Dict[str, Any]] = None,
contained: Optional[Dict[str, Any]] = None,
) -> pd.DataFrame:
"""Filter the results in the result dataframe by row.
The rows whose column values are equal and/or contained in those specified, are
returned in the form of a new data frame. Notice that this method is different
from pandas DataFrame's filter, which filters along both axes and only by
column or row name.
:param equal: dictionary of {column name: value} pairs.
Rows with column value equal to that specified are returned.
:param contained: dictionary of {column name: iterable of values} pairs:
Rows with column values contained in those specified are returned.
:return: a new data frame according to the specified filters.
"""
filtered = self.df
if equal is not None:
for key, val in equal.items():
mask = filtered[key] == val
filtered = filtered[mask]
if contained is not None:
for key, val in contained.items():
mask = filtered[key].isin(val)
filtered = filtered[mask]
return filtered
# Utility functions specifying how the ResultManager builds the patterns.
# We use the following name convention for the dictionaries of arguments and values:
# - arguments are utilized to build regular expression patterns. They consist of
# {parameter name: string}, where the string are compiled to regular expression
# patterns
# - values are concrete {parameter name: value} pairs.
def _pattern_from_arguments(arguments: Dict[str, str] | str, auto_sort=False) -> str:
if isinstance(arguments, str):
return arguments
keys = _sorted_parameters(arguments) if auto_sort else arguments
pattern = "_".join(_argument_pattern(key, arguments[key]) for key in keys)
return pattern
def _dirname_from_values(values: Dict[str, str] | str, auto_sort=False) -> str:
if isinstance(values, str):
return values
keys = _sorted_parameters(values) if auto_sort else values
dirname = "_".join(_value_pattern(key, values[key]) for key in keys)
return dirname
def _sorted_parameters(*params):
return sorted(*params)
def _argument_pattern(argument: str, expr: str) -> str:
pattern = f"{argument}=(?P<{argument}>{expr})"
return pattern
def _value_pattern(argument: str, expr: Any) -> str:
pattern = f"{argument}={expr}"
return pattern | /scientific-experiment-manager-0.1.0.tar.gz/scientific-experiment-manager-0.1.0/sem/manager.py | 0.942122 | 0.608216 | manager.py | pypi |
import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer, util
from transformers import DataCollatorWithPadding
from collections import defaultdict
import json
import wandb
import ipdb
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
sns.set()
from utils.data_processor import clean_tweet
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the data to predict on",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['tweets', 'news'], default='tweets')
parser.add_argument("--output_file", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
num_labels = 1 if problem_type == 'regression' else 2
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
with open(args.data_loc) as f:
data = [json.loads(l) for l in f]
model = SentenceTransformer(model_name)
predictions = []
if problem_type == 'tweets':
for row in tqdm(data):
tweet_embeddings = model.encode([clean_tweet(t) for t in row['tweets']])
paper_embeddings = model.encode(list(row['paper_sentences']))
# Convert to range [1,5]
scores = (util.cos_sim(tweet_embeddings, paper_embeddings).clip(min=0, max=1) * 4) + 1
for s,tweet in zip(scores, row['full_tweets']):
tweet['paper_sentence_scores'] = s.tolist()
predictions.extend(s.tolist())
elif problem_type == 'news':
for row in tqdm(data):
news_text = [sent['text'] for url in row['news'] for sent in row['news'][url]]
news_embeddings = model.encode(news_text)
paper_text = [p['text'] for p in row['paper']]
paper_embeddings = model.encode(paper_text)
# Convert to range [1,5]
scores = (util.cos_sim(news_embeddings, paper_embeddings).clip(min=0, max=1) * 4) + 1
j = 0
for url in row['news']:
for sent in row['news'][url]:
sent['paper_sentence_scores'] = scores[j].tolist()
predictions.extend(scores[j].tolist())
j += 1
# Get the original data and attach the scores
with open(args.output_file, 'wt') as f:
for d in data:
f.write(json.dumps(d) + '\n')
fig = plt.figure(figsize=(6,5))
sns.kdeplot(predictions)
plt.tight_layout()
plt.savefig('data/dev-dist.png') | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/predict_similarity_scoring_unlabelled_sbert.py | 0.682785 | 0.261312 | predict_similarity_scoring_unlabelled_sbert.py | pypi |
import torch
import random
import numpy as np
import argparse
import pandas as pd
from functools import partial
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification, AutoModel
from transformers import AutoConfig
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
from collections import defaultdict
import json
import wandb
import ipdb
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
from utils.data_processor import read_unlabelled_tweet_dataset
from utils.trainer import CustomTrainer
from utils.metrics import compute_f1, acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the data to predict on",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['regression', 'single_label_classification'], default='regression')
parser.add_argument("--output_file", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
num_labels = 1 if problem_type == 'regression' else 2
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
dataset = read_unlabelled_tweet_dataset(args.data_loc, tk)
collator = DataCollatorWithPadding(tk)
# Get the F1 metric
compute_metric = compute_regression_metrics if problem_type == 'regression' else partial(acc_f1, 'binary')
# Create the training arguments
training_args = TrainingArguments(
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
max_grad_norm=None,
warmup_steps=args.warmup_steps,
num_train_epochs=args.n_epochs,
seed=seed,
output_dir='./output'
)
# Get the dataset
config = AutoConfig.from_pretrained(model_name, num_labels=num_labels, problem_type=problem_type)
model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
pred_output = trainer.predict(dataset)
predictions = pred_output.predictions
# Group the scores by tweet ID and sentence
tweet_id_to_scores = defaultdict(list)
for id_,pred in zip(dataset['tweet_id'], predictions):
tweet_id_to_scores[id_].append(float(pred))
# Open the original data
with open(args.data_loc) as f:
data = [json.loads(l) for l in f]
for row in data:
for tweet in row['full_tweets']:
tweet['sentence_scores'] = tweet_id_to_scores[tweet['tweet_id']]
# Get the original data and attach the scores
with open(args.output_file, 'wt') as f:
for d in data:
f.write(json.dumps(d) + '\n')
sns.kdeplot(predictions.squeeze())
plt.savefig('data/dev-dist.png') | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/predict_similarity_scoring_unlabelled.py | 0.785144 | 0.242295 | predict_similarity_scoring_unlabelled.py | pypi |
import torch
import random
import numpy as np
import argparse
import json
import os
from functools import partial
from datasets import load_metric
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification, AutoModel
from transformers import AutoConfig
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
import wandb
import pandas as pd
import ipdb
from utils.data_processor import read_datasets
from utils.trainer import CustomTrainer
from utils.metrics import compute_f1, acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['regression', 'single_label_classification'], default='regression')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
parser.add_argument("--train_split", help="Decide which data to train on", type=str, default='all', choices=['all', 'tweets', 'news'])
parser.add_argument("--test_split", help="Decide which test split to use", type=str, default='all', choices=['all', 'tweets', 'news'])
parser.add_argument("--use_context", help="Flag to switch to using the context", action='store_true')
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
test_filter = args.test_filter
use_context = args.use_context
num_labels = 1 if problem_type == 'regression' else 2
train_split = args.train_split
test_split = args.test_split
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
config = {
'run_name': args.run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'output_dir': args.output_dir,
'batch_size': args.batch_size,
'learning_rate': args.learning_rate,
'weight_decay': args.weight_decay,
'warmup_steps': args.warmup_steps,
'epochs': args.n_epochs,
'seed': args.seed,
'problem_type': args.problem_type,
'test_filter': args.test_filter,
'use_context': use_context,
'train_split': train_split,
'test_split': test_split
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# Create the tokenizer and model
if 'scibert' in model_name or 'citebert' in model_name:
tk = AutoTokenizer.from_pretrained(model_name, model_max_length=512)
else:
tk = AutoTokenizer.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, problem_type, test_filter, train_split, test_split, use_context)
collator = DataCollatorWithPadding(tk)
# Get the F1 metric
compute_metric = compute_regression_metrics if problem_type == 'regression' else partial(acc_f1, 'binary')
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
do_train=True,
do_eval=True,
evaluation_strategy='epoch',
per_device_train_batch_size=args.batch_size,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
max_grad_norm=None,
warmup_steps=args.warmup_steps,
num_train_epochs=args.n_epochs,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
config = AutoConfig.from_pretrained(model_name, num_labels=num_labels, problem_type=problem_type)
model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
if problem_type == 'regression':
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
compute_metrics=compute_metric,
data_collator=collator
)
else:
# Get label weights
labels = dataset["train"]['label']
weight = torch.tensor(len(labels) / (2 * np.bincount(labels)))
weight = weight.type(torch.FloatTensor)
# Create the trainer and train
trainer = CustomTrainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
compute_metrics=compute_metric,
data_collator=collator,
weight=weight
)
train_output = trainer.train()
model.save_pretrained(args.output_dir)
tk.save_pretrained(args.output_dir)
pred_output = trainer.predict(dataset['test'])
pred_metrics = pred_output.metrics
wandb.log(pred_metrics)
if test_split == 'all':
# Get tweet performance
tweet_data = dataset['test'].filter(lambda example: example['source'] == 'tweets')
tweet_metrics = trainer.predict(tweet_data, metric_key_prefix='tweet')
pred_metrics.update(tweet_metrics.metrics)
# Rank the errors for analysis
preds = tweet_metrics.predictions.squeeze()
labels = tweet_data['final_score_hard']
AE = np.abs(preds - np.array(labels))
ranking = np.argsort(AE)[::-1]
analysis_dframe = [[tweet_data['News Finding'][r], tweet_data['Paper Finding'][r], preds[r], labels[r]] for r in ranking]
analysis_dframe = pd.DataFrame(analysis_dframe, columns=['Tweet', 'Paper', 'Pred', 'Label'])
analysis_dframe.to_csv(f"{args.metrics_dir}/tweet_errors.csv", index=False)
# Get news performance
news_data = dataset['test'].filter(lambda example: example['source'] == 'news')
news_metrics = trainer.predict(news_data, metric_key_prefix='news')
pred_metrics.update(news_metrics.metrics)
# Iterate through the categories
for cat in categories:
curr_dataset = dataset['test'].filter(lambda example: cat in example['instance_id'])
# Predict
pred_output = trainer.predict(curr_dataset, metric_key_prefix=cat)
pred_metrics.update(pred_output.metrics)
tweet_curr = curr_dataset.filter(lambda example: example['source'] == 'tweets')
pred_output = trainer.predict(tweet_curr, metric_key_prefix=cat + '_tweet')
pred_metrics.update(pred_output.metrics)
news_curr = curr_dataset.filter(lambda example: example['source'] == 'news')
pred_output = trainer.predict(news_curr, metric_key_prefix=cat + '_news')
pred_metrics.update(pred_output.metrics)
wandb.log(pred_metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(pred_metrics)) | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/train_supervised.py | 0.662687 | 0.22378 | train_supervised.py | pypi |
import argparse
import random
from functools import partial
import wandb
import json
import os
import numpy as np
import torch
import torch.nn.functional as F
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from transformers import DataCollatorWithPadding
from transformers import Trainer
from transformers import TrainingArguments
from utils.data_processor import read_datasets
from utils.metrics import acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='coderpotter/adversarial-paraphrasing-detector')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
# config = AutoConfig.from_pretrained(model_name, num_labels=3)
# Initialization warning is apparently normal: https://github.com/huggingface/transformers/issues/5421
model = AutoModelForSequenceClassification.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, test_filter=test_filter)
labels = np.array(dataset['test']['label'])
dataset = dataset.remove_columns("label")
collator = DataCollatorWithPadding(tk)
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
do_train=True,
do_eval=True,
evaluation_strategy='epoch',
max_grad_norm=None,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
# Create the trainer and train
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
data_collator=collator
)
pred_output = trainer.predict(dataset['test'])
logits_orig = pred_output.predictions
# Take a softmax over logits
probs = F.softmax(torch.tensor(logits_orig), -1)
similarity = probs[:, 1]
# Convert to range [1,5]
preds = (similarity * 4) + 1
metrics = compute_regression_metrics((preds, labels), prefix='unsupervised_')
tweet_selector = [source == 'tweets' for source in dataset['test']['source']]
tweet_idx = np.where(tweet_selector)
tweet_metrics = compute_regression_metrics((preds[tweet_idx], labels[tweet_idx]),
prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = [source == 'news' for source in dataset['test']['source']]
news_idx = np.where(news_selector)
news_metrics = compute_regression_metrics((preds[news_idx], labels[news_idx]),
prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = [cat in instance_id for instance_id in dataset['test']['instance_id']]
# Predict
idx = np.where(selector)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_')
metrics.update(metrics_cat)
tweets = list(np.array(selector) & np.array(tweet_selector))
idx = np.where(tweets)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news = list(np.array(selector) & np.array(news_selector))
idx = np.where(news)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/eval_unsupervised_paraphrase_detection.py | 0.625209 | 0.257479 | eval_unsupervised_paraphrase_detection.py | pypi |
import torch
import random
import numpy as np
import argparse
import wandb
import torch.nn.functional as F
import torch
import json
import os
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
from datasets import load_dataset
from functools import partial
import ipdb
from utils.data_processor import read_datasets
from utils.metrics import acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
# config = AutoConfig.from_pretrained(model_name, num_labels=3)
# Initialization warning is apparently normal: https://github.com/huggingface/transformers/issues/5421
model = AutoModelForSequenceClassification.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, test_filter=test_filter)
labels = np.array(dataset['test']['label'])
dataset = dataset.remove_columns("label")
collator = DataCollatorWithPadding(tk)
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
evaluation_strategy='epoch',
max_grad_norm=None,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
# Create the trainer and train
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
pred_output = trainer.predict(dataset['test'])
logits_orig = pred_output.predictions
# Take a softmax over logits
probs = F.softmax(torch.tensor(logits_orig), -1)
similarity = probs[:,0]
# Convert to range [1,5]
preds = (similarity * 4) + 1
metrics = compute_regression_metrics((preds, labels), prefix='unsupervised_')
tweet_selector = [source == 'tweets' for source in dataset['test']['source']]
tweet_idx = np.where(tweet_selector)
tweet_metrics = compute_regression_metrics((preds[tweet_idx], labels[tweet_idx]),
prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = [source == 'news' for source in dataset['test']['source']]
news_idx = np.where(news_selector)
news_metrics = compute_regression_metrics((preds[news_idx], labels[news_idx]),
prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = [cat in instance_id for instance_id in dataset['test']['instance_id']]
# Predict
idx = np.where(selector)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_')
metrics.update(metrics_cat)
tweets = list(np.array(selector) & np.array(tweet_selector))
idx = np.where(tweets)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news = list(np.array(selector) & np.array(news_selector))
idx = np.where(news)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/eval_unsupervised_nli.py | 0.646014 | 0.269736 | eval_unsupervised_nli.py | pypi |
import torch
from torch import nn
import random
import numpy as np
import argparse
import json
import os
import torch.nn.functional as F
from sentence_transformers import SentenceTransformer, losses, evaluation, models
from torch.utils.data import DataLoader
import wandb
import pandas as pd
import ipdb
from utils.data_processor import read_datasets_sentence_transformers, read_dataset_raw, filter_data_sentence_transformers
from utils.data_processor import LABEL_COLUMN
from utils.metrics import compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=1e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
parser.add_argument("--train_split", help="Decide which data to train on", type=str, default='all',
choices=['all', 'tweets', 'news'])
parser.add_argument("--test_split", help="Decide which test split to use", type=str, default='all',
choices=['all', 'tweets', 'news'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
train_split = args.train_split
test_split = args.test_split
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
config = {
'run_name': args.run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'output_dir': args.output_dir,
'batch_size': args.batch_size,
'learning_rate': args.learning_rate,
'weight_decay': args.weight_decay,
'warmup_steps': args.warmup_steps,
'epochs': args.n_epochs,
'seed': args.seed,
'train_split': train_split,
'test_split': test_split
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# Create the model
model = SentenceTransformer(model_name)
dataset = read_datasets_sentence_transformers(args.data_loc, args.test_filter, train_split, test_split)
train_dataloader = DataLoader(dataset['train'], shuffle=True, batch_size=args.batch_size)
dev_data = read_dataset_raw(f"{args.data_loc}/dev.csv")
dev_sentences1 = list(dev_data['Paper Finding'])
dev_sentences2 = list(dev_data['News Finding'])
dev_scores = [(s-1)/4 for s in dev_data[LABEL_COLUMN]]
evaluator = evaluation.EmbeddingSimilarityEvaluator(dev_sentences1, dev_sentences2, dev_scores)
train_loss = losses.CosineSimilarityLoss(model)
# Same loss used to train mpnet model
#train_loss = losses.MultipleNegativesRankingLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=args.n_epochs,
evaluator=evaluator,
evaluation_steps=len(train_dataloader),
output_path=args.output_dir,
save_best_model=True,
optimizer_params={'lr': args.learning_rate}
)
# Test
test_data = read_dataset_raw(f"{args.data_loc}/test.csv")
test_data = filter_data_sentence_transformers(test_data, args.test_filter, test_split)
if args.test_filter == 'easy':
test_data = test_data[test_data['instance_id'].str.contains('easy')]
elif args.test_filter == 'hard':
test_data = test_data[~test_data['instance_id'].str.contains('easy')]
paper_embeddings = model.encode(list(test_data['Paper Finding']))
news_embeddings = model.encode(list(test_data['News Finding']))
scores = F.cosine_similarity(torch.Tensor(paper_embeddings), torch.Tensor(news_embeddings), dim=1).clip(
min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
preds = (scores * 4) + 1
labels = test_data[LABEL_COLUMN]
metrics = compute_regression_metrics((preds, labels), prefix='test_')
if test_split == 'all':
tweet_selector = test_data['source'] == 'tweets'
tweet_metrics = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix='tweet_')
metrics.update(tweet_metrics)
tweet_data = test_data[tweet_selector]
tweets = list(tweet_data['News Finding'])
paper = list(tweet_data['Paper Finding'])
preds_tweet = preds[tweet_selector]
labels_tweet = list(labels[tweet_selector])
AE = np.abs(preds_tweet - np.array(labels_tweet))
assert len(tweets) == len(paper)
assert len(tweets) == len(preds_tweet)
assert len(tweets) == len(labels_tweet)
ranking = np.argsort(AE)[::-1]
analysis_dframe = [[tweets[r], paper[r], preds_tweet[r], labels_tweet[r]] for r in
ranking]
analysis_dframe = pd.DataFrame(analysis_dframe, columns=['Tweet', 'Paper', 'Pred', 'Label'])
analysis_dframe.to_csv(f"{args.metrics_dir}/tweet_errors.csv", index=False)
news_selector = test_data['source'] == 'news'
news_metrics = compute_regression_metrics((preds[news_selector], labels[news_selector]), prefix='news_')
metrics.update(news_metrics)
wandb.log(metrics)
for cat in categories:
selector = test_data['instance_id'].str.contains(cat)
# Predict
metrics_cat = compute_regression_metrics((preds[selector], labels[selector]), prefix=cat + '_')
metrics.update(metrics_cat)
tweet_selector = selector & (test_data['source'] == 'tweets')
metrics_cat = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news_selector = selector & (test_data['source'] == 'news')
metrics_cat = compute_regression_metrics((preds[news_selector], labels[news_selector]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/train_supervised_sentence_transformers.py | 0.744749 | 0.218899 | train_supervised_sentence_transformers.py | pypi |
import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer
import wandb
import json
import os
import ipdb
import torch.nn.functional as F
from utils.data_processor import read_dataset_raw
from utils.metrics import compute_regression_metrics
from utils.data_processor import LABEL_COLUMN
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the test data",
type=str, required=True)
parser.add_argument("--eval_data_loc",
help="The location of the validation data",
type=str, required=True)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='all-MiniLM-L6-v2')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': args.test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# Load the data
val_data = read_dataset_raw(args.eval_data_loc)
test_data = read_dataset_raw(args.data_loc)
if args.test_filter == 'easy':
test_data = test_data[test_data['instance_id'].str.contains('easy')]
elif args.test_filter == 'hard':
test_data = test_data[~test_data['instance_id'].str.contains('easy')]
# Load the model
model = SentenceTransformer(model_name)
paper_embeddings = model.encode(list(test_data['Paper Finding']))
news_embeddings = model.encode(list(test_data['News Finding']))
scores = F.cosine_similarity(torch.Tensor(paper_embeddings), torch.Tensor(news_embeddings), dim=1).clip(min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
preds = (scores * 4) + 1
labels = test_data[LABEL_COLUMN]
metrics = compute_regression_metrics((preds,labels), prefix='unsupervised_')
tweet_selector = test_data['source'] == 'tweets'
tweet_metrics = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = test_data['source'] == 'news'
news_metrics = compute_regression_metrics((preds[news_selector], labels[news_selector]), prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = test_data['instance_id'].str.contains(cat)
# Predict
metrics_cat = compute_regression_metrics((preds[selector], labels[selector]), prefix=cat + '_')
metrics.update(metrics_cat)
tweet_selector = selector & (test_data['source'] == 'tweets')
metrics_cat = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news_selector = selector & (test_data['source'] == 'news')
metrics_cat = compute_regression_metrics((preds[news_selector], labels[news_selector]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/eval_unsupervised_sts.py | 0.671363 | 0.266406 | eval_unsupervised_sts.py | pypi |
import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer, util
from transformers import AutoModelForSequenceClassification
from transformers import AutoConfig
from transformers import AutoTokenizer
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
import pandas as pd
from datasets import Dataset
import wandb
import json
import os
from tqdm import tqdm
from rank_bm25 import BM25Okapi
from datasets import load_dataset
import ipdb
import torch.nn.functional as F
from utils.data_processor import read_covert_dataset, read_covidfact_dataset
from utils.metrics import acc_f1, compute_regression_metrics
from utils.rank_metrics import mean_average_precision, mean_reciprocal_rank
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
def bm25(claims, evidence):
corpus = [e[1].split(" ") for e in evidence]
bm25 = BM25Okapi(corpus)
ranked_lists = []
for claim,ev_id in tqdm(claims):
# Create dataset to pass through model
preds = bm25.get_scores(claim.split(" "))
# Get order
rank = np.argsort(preds)[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
def sts_model(claims, evidence, args):
#Load the model
model = SentenceTransformer(args.model_name)
evidence_embeddings = model.encode([e[1] for e in evidence])
claim_embeddings = model.encode([c[0] for c in claims])
scores = util.cos_sim(claim_embeddings, evidence_embeddings)
ranked_lists = []
for row_score,(claim, ev_id) in zip(scores, claims):
# Get order
rank = np.argsort(row_score.numpy())[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
def trained_model(claims, evidence, args):
tk = AutoTokenizer.from_pretrained(args.base_model, model_max_length=512)
config = AutoConfig.from_pretrained(args.model_name, num_labels=1)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name, config=config)
training_args = TrainingArguments(
output_dir=args.output_dir
)
collator = DataCollatorWithPadding(tk)
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
def preprocess(examples):
batch = tk(examples['claim'], text_pair=examples['evidence'], truncation=True)
return batch
# Iterate through each claim and get a ranked list
ranked_lists = []
for claim,ev_id in tqdm(claims):
# Create dataset to pass through model
pairs = [[e[0], claim, e[1]] for e in evidence]
dframe = pd.DataFrame(pairs, columns=['id', 'claim', 'evidence'])
dataset = Dataset.from_pandas(dframe)
dataset = dataset.map(preprocess, batched=True)
pred_output = trainer.predict(dataset)
preds = pred_output.predictions.squeeze()
# Get order
rank = np.argsort(preds)[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the COVERT data",
type=str, required=True)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='copenlu/citebert')
parser.add_argument("--base_model",
help="The base model",
type=str, default='copenlu/citebert')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--dataset",
help="The name of the dataset to use",
type=str, default='covert', choices=['covert', 'covidfact'])
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--eval_type", help="Decide which test samples to test on", type=str, default='ours', choices=['ours', 'sts', 'bm25'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
eval_type = args.eval_type
dataset_name = args.dataset
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'eval_type': args.eval_type,
'dataset': dataset_name
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# Load the data
if dataset_name == 'covert':
claim_label,evidence = read_covert_dataset(args.data_loc)
elif dataset_name == 'covidfact':
claim_label, evidence = read_covidfact_dataset(args.data_loc)
if eval_type == 'ours':
metrics = trained_model(claim_label, evidence, args)
elif eval_type == 'bm25':
metrics = bm25(claim_label, evidence)
elif eval_type == 'sts':
metrics = sts_model(claim_label, evidence, args)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/eval_evidence_retrieval.py | 0.700588 | 0.269416 | eval_evidence_retrieval.py | pypi |
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from typing import List, AnyStr, Tuple, Dict
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr, spearmanr
import ipdb
def accuracy(preds: np.ndarray, labels: np.ndarray) -> float:
return np.sum(preds == labels).astype(np.float32) / float(labels.shape[0])
def acc_f1(averaging, eval_pred) -> Dict:
logits, labels = eval_pred
if len(logits.shape) > 1:
preds = np.argmax(logits, axis=-1)
else:
preds = logits
acc = accuracy(preds, labels)
P, R, F1, _ = precision_recall_fscore_support(labels, preds, average=averaging)
return {'accuracy': acc, 'precision': P, 'recall': R, 'f1': F1}
def compute_regression_metrics(eval_pred, clip_value=(1.0,5.0), prefix=''):
predictions, labels = eval_pred
predictions = np.clip(predictions, clip_value[0], clip_value[1])
mse = mean_squared_error(labels, predictions)
if len(predictions.shape) > 1:
predictions = predictions[:,0]
rho = pearsonr(predictions, labels.squeeze())
psi = spearmanr(predictions, labels.squeeze())
return {f"{prefix}mse": mse, f'{prefix}rho': rho[0], f'{prefix}rho-p': rho[1], f'{prefix}psi': psi[0], f'{prefix}psi-p': psi[1]}
def compute_f1(f1_metric, average, eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return f1_metric.compute(predictions=predictions, references=labels, average=average)
def compute_rouge(tokenizer, metric, eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [lab.strip() for lab in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
#result = {"rouge": result["score"]}
result = {
'rouge1_low_p': result['rouge1'].low.precision,
'rouge1_low_r': result['rouge1'].low.recall,
'rouge1_low_fmeasure': result['rouge1'].low.fmeasure,
'rouge1_mid_p': result['rouge1'].mid.precision,
'rouge1_mid_r': result['rouge1'].mid.recall,
'rouge1_mid_fmeasure': result['rouge1'].mid.fmeasure,
'rouge1_high_p': result['rouge1'].high.precision,
'rouge1_high_r': result['rouge1'].high.recall,
'rouge1_high_fmeasure': result['rouge1'].high.fmeasure,
'rouge2_low_p': result['rouge2'].low.precision,
'rouge2_low_r': result['rouge2'].low.recall,
'rouge2_low_fmeasure': result['rouge2'].low.fmeasure,
'rouge2_mid_p': result['rouge2'].mid.precision,
'rouge2_mid_r': result['rouge2'].mid.recall,
'rouge2_mid_fmeasure': result['rouge2'].mid.fmeasure,
'rouge2_high_p': result['rouge2'].high.precision,
'rouge2_high_r': result['rouge2'].high.recall,
'rouge2_high_fmeasure': result['rouge2'].high.fmeasure,
'rougeL_low_p': result['rougeL'].low.precision,
'rougeL_low_r': result['rougeL'].low.recall,
'rougeL_low_fmeasure': result['rougeL'].low.fmeasure,
'rougeL_mid_p': result['rougeL'].mid.precision,
'rougeL_mid_r': result['rougeL'].mid.recall,
'rougeL_mid_fmeasure': result['rougeL'].mid.fmeasure,
'rougeL_high_p': result['rougeL'].high.precision,
'rougeL_high_r': result['rougeL'].high.recall,
'rougeL_high_fmeasure': result['rougeL'].high.fmeasure,
'rougeLsum_low_p': result['rougeLsum'].low.precision,
'rougeLsum_low_r': result['rougeLsum'].low.recall,
'rougeLsum_low_fmeasure': result['rougeLsum'].low.fmeasure,
'rougeLsum_mid_p': result['rougeLsum'].mid.precision,
'rougeLsum_mid_r': result['rougeLsum'].mid.recall,
'rougeLsum_mid_fmeasure': result['rougeLsum'].mid.fmeasure,
'rougeLsum_high_p': result['rougeLsum'].high.precision,
'rougeLsum_high_r': result['rougeLsum'].high.recall,
'rougeLsum_high_fmeasure': result['rougeLsum'].high.fmeasure,
}
#prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
#result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 6) for k, v in result.items()}
return result | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/utils/metrics.py | 0.825343 | 0.645357 | metrics.py | pypi |
import torch
from torch import nn
from torch.optim import SGD
from transformers import AutoModel
from tqdm import tqdm
import torch.nn.functional as F
import ipdb
class GradientReversal(torch.autograd.Function):
"""
Basic layer for doing gradient reversal
"""
lambd = 1.0
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_output):
return GradientReversal.lambd * grad_output.neg()
class DomainAdversarialModel(nn.Module):
"""
A really basic wrapper around BERT
"""
def __init__(self, model: AutoModel, n_classes: int = 2, **kwargs):
super(DomainAdversarialModel, self).__init__()
self.model = AutoModel.from_pretrained(model)
self.domain_classifier = nn.Linear(self.model.config.hidden_size, n_classes)
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: torch.LongTensor,
labels: torch.LongTensor = None,
**kwargs
):
# 1) Get the CLS representation from BERT
outputs = self.model(
input_ids,
attention_mask=attention_mask
)
# (b x n_classes)
cls_hidden_state = outputs.pooler_output
adv_input = GradientReversal.apply(cls_hidden_state)
adv_logits = self.domain_classifier(adv_input)
outputs['logits'] = adv_logits
loss_fn = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fn(adv_logits, labels)
outputs['loss'] = loss
return outputs
def save_pretrained(self, output_dir: str):
self.model.save_pretrained(output_dir)
# Optimize the softmax temperature to minimize the negative log likelihood
class Temp(nn.Module):
def __init__(self):
super().__init__()
self.T = nn.Parameter(torch.ones(1))
def forward(self, logits):
return logits / self.T
def calculate_log_likelihood(model, loader, T, device):
# loader = torch.utils.data.DataLoader(dset, batch_size=32,
# num_workers=2)
with torch.no_grad():
labels_all = []
preds_all = []
for i, batch in enumerate(tqdm(loader), 0):
# get the inputs; data is a list of [inputs, labels]
for b in batch:
batch[b] = batch[b].to(device)
labels = batch.pop('labels')
# forward + backward + optimize
outputs = model(**batch)
logits = outputs['logits']
logits /= T
preds = F.log_softmax(logits, dim=-1)
labels_all.append(labels.detach())
preds_all.append(preds.detach())
nll = F.nll_loss(torch.concat(preds_all), torch.concat(labels_all), reduction='mean')
return nll.item()
def calibrate_temperature(model, loader, device):
# loader = torch.utils.data.DataLoader(dset, batch_size=32,
# num_workers=2)
T = Temp().to(device)
optim = SGD(T.parameters(), lr=1e-3)
patience = 10
c = 0
eps = 1e-5
t_curr = 1.0
done = False
print(f"NLL before calibration: {calculate_log_likelihood(model, loader, t_curr, device)}")
for epoch in range(3): # loop over the dataset multiple times
for i, batch in enumerate(tqdm(loader), 0):
# get the inputs; data is a list of [inputs, labels]
for b in batch:
batch[b] = batch[b].to(device)
labels = batch.pop('labels')
# zero the parameter gradients
optim.zero_grad()
# forward + backward + optimize
outputs = model(**batch)
logits = outputs['logits']
logits = T(logits)
preds = F.log_softmax(logits, dim=-1)
nll = F.nll_loss(preds, labels, reduction='mean')
nll.backward()
optim.step()
if abs(t_curr - T.T.item()) > eps:
c = 0
else:
c += 1
if c == patience:
done = True
break
t_curr = T.T.item()
if done:
break
print(f"NLL after calibration: {calculate_log_likelihood(model, loader, t_curr, device)}")
return t_curr | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/utils/model.py | 0.93528 | 0.508971 | model.py | pypi |
import numpy as np
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
>>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
>>> delta_r = 1. / sum(r)
>>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
>>> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=0):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> dcg_at_k(r, 1)
3.0
>>> dcg_at_k(r, 1, method=1)
3.0
>>> dcg_at_k(r, 2)
5.0
>>> dcg_at_k(r, 2, method=1)
4.2618595071429155
>>> dcg_at_k(r, 10)
9.6051177391888114
>>> dcg_at_k(r, 11)
9.6051177391888114
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> ndcg_at_k(r, 1)
1.0
>>> r = [2, 1, 2, 0]
>>> ndcg_at_k(r, 4)
0.9203032077642922
>>> ndcg_at_k(r, 4, method=1)
0.96519546960144276
>>> ndcg_at_k([0], 1)
0.0
>>> ndcg_at_k([1], 2)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
if __name__ == "__main__":
import doctest
doctest.testmod() | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/utils/rank_metrics.py | 0.923842 | 0.822688 | rank_metrics.py | pypi |
from sentence_transformers import SentenceTransformer, util
from typing import Optional, AnyStr, List
import numpy as np
import torch
import torch.nn.functional as F
class SimilarityEstimator(object):
"""
Estimator of information matching score (IMS) between two scientific sentences
"""
def __init__(
self,
model_name_or_path: Optional[AnyStr] = 'copenlu/spiced',
device: Optional[AnyStr] = None,
use_auth_token: Optional[bool] = False,
cache_folder: Optional[AnyStr] = None
):
"""
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name. Defaults to the best model from Wright et al. 2022.
:param device: Device (like ‘cuda’ / ‘cpu’) that should be used for computation. If None, checks if a GPU can be used.
:param use_auth_token: HuggingFace authentication token to download private models.
:param cache_folder: Path to store models
"""
self.model_name_or_path = model_name_or_path
self.device = device
self.use_auth_token = use_auth_token
self.cache_folder = cache_folder
self.model = SentenceTransformer(
model_name_or_path=model_name_or_path,
device=device,
use_auth_token=use_auth_token,
cache_folder=cache_folder
)
def estimate_ims(
self,
a: List[AnyStr],
b: List[AnyStr]
) -> np.ndarray:
"""
Estimate the information matching score between all sentences in 'a' and all sentences in 'b'. Score will a scalar between 1 and 5, where 1 means no information similarity and 5 means the information is exactly the same between the two sentences.
:param a: A list of sentences
:param b: Second list of sentences
:return: A matrix S of size $N$x$M$ where $N$ is the length of list $a$, $M$ is the length of list $b$, and entry $S_{ij}$ is the information matching score between sentence $a_{i}$ and $b_{j}$
"""
sentence1_embedding = self.model.encode(a)
sentence2_embedding = self.model.encode(b)
S = (util.cos_sim(sentence1_embedding, sentence2_embedding).clip(min=0, max=1) * 4) + 1
return S.detach().numpy()
def estimate_ims_array(
self,
a: List[AnyStr],
b: List[AnyStr]
) -> List:
"""
Estimate the information matching score between each sentence in $a$ and its corresponding $b$ (i.e. $a_{i}$ and $b_{i}$). Score will a scalar between 1 and 5, where 1 means no information similarity and 5 means the information is exactly the same between the two sentences.
:param a: A list of sentences
:param b: Second list of sentences of the same size as $a$
:return: A list $s$ of size $N$ where $N$ is the length of both list $a$ and list $b$ and entry $s_{i}$ is the information matching score between $a_{i}$ and $b_{i}$
"""
assert len(a) == len(b), f"len(a) != len(b), lists of sentences must be equal length. len(a) == {len(a)}, len(b) == {len(b)}"
sentence1_embedding = self.model.encode(a)
sentence2_embedding = self.model.encode(b)
scores = F.cosine_similarity(torch.Tensor(sentence1_embedding), torch.Tensor(sentence2_embedding), dim=1).clip(
min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
s = (scores * 4) + 1
return s.tolist() | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/scientific_information_change/estimate_similarity.py | 0.949494 | 0.651036 | estimate_similarity.py | pypi |
from __future__ import annotations
from pathlib import Path
from functools import wraps
from typing import TypeVar, List, Tuple, Union, Callable, Optional
from warnings import warn, filterwarnings, catch_warnings
from textwrap import dedent
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
from numpy import amin, amax
from .plot_settings import apply_styles, rwth_cycle
from .types_ import Vector, Matrix
mpl.use("Agg")
In = TypeVar("In", List[float], Tuple[float],
Vector)
In2D = TypeVar("In2D", list[list[float]], list[Vector], tuple[Vector],
Matrix)
def fix_inputs(input_1: In, input_2: In)\
-> tuple[Vector, Vector]:
"""
Remove nans and infinities from the input vectors.
Parameters
---------
input_1, input_2:
X/Y-axis data of plot
Returns
------
New vectors x and y with nans removed.
"""
if len(input_1) != len(input_2):
raise ValueError(
"The sizes of the input vectors are not the same.")
nan_count = np.count_nonzero(np.isnan(input_2))
inf_count = np.count_nonzero(np.isinf(input_2))
if nan_count != 0 or inf_count != 0:
new_input_1 = np.empty(len(input_1)-nan_count-inf_count)
new_input_2 = np.empty(len(new_input_1))
position = 0
for x_input, y_input in zip(input_1, input_2):
if not np.isnan(y_input) or np.isinf(y_input):
new_input_1[position] = x_input
new_input_2[position] = y_input
position += 1
return new_input_1, new_input_2
return np.array(input_1), np.array(input_2)
def check_inputs(input_1: In, input_2: In, label_1: str, label_2: str)\
-> bool:
"""
Check the input vectors to see, if they are large enough.
Parameters
---------
input_1, input_2:
X/Y-axis data of plot
label_1, label_2:
Labels of the X/Y axis
Returns
------
True, if the plot can be created.
"""
if len(input_1) <= 1 or len(input_2) <= 1:
warn(
"There are not enough points in the following plots:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
if min(input_1) == max(input_1):
warn(
"The area of the x-axis is not large enough in the following plot:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
if min(input_2) == max(input_2):
warn(
"The area of the y-axis is not large enough in the following plot:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
infinity = np.isinf(input_1).any() or np.isinf(input_2).any()
if infinity:
warn(dedent(f"""There are infinities in the data of the following plot:
label1: {label_1}, label2: {label_2}. It cannot be drawn."""),
RuntimeWarning)
return False
nan = np.isnan(input_1).any() or np.isnan(input_2).any()
if nan:
warn(dedent(f"""There are nans in the data of the following plot:
label1: {label_1}, label2: {label_2}. It cannot be drawn."""),
RuntimeWarning)
return False
return True
@apply_styles
def plot_fit(X: In, Y: In,
fit_function: Callable[..., float],
xlabel: str, ylabel: str, filename: Union[str, Path], *,
args: Optional[Tuple[float]] = None,
logscale: bool = False) -> None:
"""Creates a plot of data and a fit and saves it to 'filename'."""
X, Y = fix_inputs(X, Y) # type: ignore
if not check_inputs(
X, Y, xlabel, ylabel):
return
n_fit = 1000
_fit_function: Callable[[float], float]
if args is not None:
@wraps(fit_function)
def _fit_function(x: float) -> float:
"""This is the function, which has been fitted"""
return _fit_function(x, *args)
else:
_fit_function = fit_function
plt.plot(X, Y, label="data")
X_fit = [min(X) + (max(X) - min(X)) * i / (n_fit - 1)
for i in range(n_fit)]
Y_fit = [_fit_function(x) for x in X_fit]
plt.plot(X_fit, Y_fit, label="fit")
if logscale:
plt.xscale("log")
plt.yscale("log")
plt.xlim(min(X), max(X))
if logscale:
plt.ylim(min(Y) * 0.97, max(Y) * 1.02)
else:
plt.ylim(
min(Y) - (max(Y) - min(Y)) * 0.02,
max(Y) + (max(Y) - min(Y)) * 0.02
)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles(three_d=True)
def plot_surface(X: In2D, Y: In2D, Z: In2D,
xlabel: str, ylabel: str, zlabel: str,
filename: Union[str, Path], *,
log_scale: bool = False,
set_z_lim: bool = True,
colorscheme: str = "rwth_gradient",
figsize: tuple[float, float] = (4.33, 3.5),
labelpad: Optional[float] = None,
nbins: Optional[int] = None) -> None:
"""create a 2D surface plot of meshgrid-like valued Xs, Ys and Zs"""
if not check_inputs(
np.array(X).flatten(),
np.array(Z).flatten(), xlabel, zlabel):
return
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
fig.subplots_adjust(left=-0.02, right=0.75, bottom=0.15, top=0.98)
ax.plot_surface(X, Y, Z, cmap=colorscheme)
ax.set_box_aspect(aspect=None, zoom=.8)
if labelpad is None:
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel, rotation=90)
else:
ax.set_xlabel(xlabel, labelpad=labelpad)
ax.set_ylabel(ylabel, labelpad=labelpad)
ax.set_zlabel(zlabel, rotation=90, labelpad=labelpad)
assert ax.zaxis is not None
ax.set_xlim(amin(X), amax(X)) # type: ignore
ax.set_ylim(amin(Y), amax(Y)) # type: ignore
if set_z_lim:
if not log_scale:
ax.set_zlim(
amin(Z) - (amax(Z) - amin(Z)) * 0.02, # type: ignore
amax(Z) + (amax(Z) - amin(Z)) * 0.02 # type: ignore
)
else:
ax.set_zlim(
amin(Z) * 0.97, amax(Z) * 1.02) # type: ignore
if log_scale:
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_zscale("log")
for spine in ax.spines.values():
spine.set_visible(False)
ax.xaxis.pane.set_alpha(0.3)
ax.yaxis.pane.set_alpha(0.3)
ax.zaxis.pane.set_alpha(0.3)
if nbins is not None:
ax.xaxis.set_major_locator(
MaxNLocator(nbins)
)
ax.yaxis.set_major_locator(
MaxNLocator(nbins)
)
fig.set_size_inches(*figsize)
with catch_warnings():
filterwarnings("ignore", message=".*Tight layout")
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def plot(X: In, Y: In, xlabel: str, ylabel: str,
filename: Union[Path, str], *, logscale: bool = False,
ylim: Optional[tuple[float, float]] = None,
yticks: bool = True, cycler: int = 0) -> None:
"""Create a simple 1D plot"""
X, Y = fix_inputs(X, Y) # type: ignore
if not check_inputs(
X, Y, xlabel, ylabel):
return
if len(X) <= 1 or len(Y) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
if cycler > 0:
for _ in range(cycler):
plt.plot([], [])
plt.plot(X, Y, linestyle="-")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if logscale:
plt.xscale("log")
plt.yscale("log")
if ylim is None:
plt.ylim(min(Y) * 0.97, max(Y) * 1.02)
elif ylim is None:
plt.ylim(
min(Y) - (max(Y) - min(Y)) * 0.02,
max(Y) + (max(Y) - min(Y)) * 0.02
)
if ylim is not None:
plt.ylim(*ylim)
plt.xlim(min(X), max(X))
if not yticks:
plt.yticks([])
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def two_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
xlabel: str, ylabel: str,
filename: Union[Path, str], *,
logscale: bool = False, cycle: int = 0,
color: tuple[int, int] = (0, 1),
outer: bool = False) -> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot and a single y-axis.
Keyword arguments:
cycle -- skip this many colours in the colour-wheel before plotting
color -- use these indeces in the colour-wheel when creating a plot
outer -- use the outer limits on the x-axis rather than the inner limit
"""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
if not (
check_inputs(x1, y1, xlabel, label1)
or check_inputs(x2, y2, xlabel, label2)):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
if cycle > 0:
color = (color[0] + cycle, color[1] + cycle)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
plt.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
plt.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
min_ = min(min(y1), min(y2))
max_ = max(max(y1), max(y2))
if not logscale:
plt.ylim(
min_ - (max_ - min_) * 0.02,
max_ + (max_ - min_) * 0.02
)
else:
plt.xscale("log")
plt.yscale("log")
plt.ylim(
min_ * 0.97, max_ * 1.02)
if outer:
plt.xlim(min(min(x1), min(x2)),
max(max(x1), max(x2)))
else:
plt.xlim(max(min(x1), min(x2)),
min(max(x1), max(x2)))
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def three_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
x3: In, y3: In, label3: str,
xlabel: str, ylabel: str,
filename: Union[Path, str], *,
logscale: bool = False,
xmin: Optional[float] = None,
xmax: Optional[float] = None) -> None:
"""Create a simple 1D plot with three different graphs inside of a single
plot and a single y-axis."""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
x3, y3 = fix_inputs(x3, y3) # type: ignore
if not (
check_inputs(x1, y1, xlabel, label1)
or check_inputs(x2, y3, xlabel, label1)
or check_inputs(x3, y3, xlabel, label3)):
return
if any(len(x) <= 1 for x in (x1, x2, y1, y2, x3, y3)):
raise ValueError(
f"The data for plot {filename} contains empty rows!")
plt.plot(x1, y1, label=label1)
plt.plot(x2, y2, label=label2, linestyle="dashed")
plt.plot(x3, y3, label=label3, linestyle="dotted")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
min_ = min(min(y1), min(y2), min(y3))
max_ = max(max(y1), max(y2), max(y3))
if not logscale:
plt.ylim(
min_ - (max_ - min_) * 0.02,
max_ + (max_ - min_) * 0.02
)
else:
plt.xscale("log")
plt.yscale("log")
plt.ylim(
min_ * 0.97, max_ * 1.02)
if xmin is not None and xmax is not None:
plt.xlim(xmin, xmax)
else:
plt.xlim(min(x1), max(x1))
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def two_axis_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
xlabel: str, ylabel: str,
ylabel2: str,
filename: Union[Path, str], *,
ticks: Optional[tuple[list[float], list[str]]] = None,
xlim: Optional[tuple[float, float]] = None,
color: tuple[int, int] = (0, 1))\
-> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot with two y-axis.
The variable "ticks" sets costum y-ticks on the second y-axis. The first
argument gives the position of the ticks and the second argument gives the
values to be shown.
Color selects the indeces of the chosen color-wheel, which should be taken
for the different plots. The default is (1,2)."""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
if not check_inputs(
y1, y2, label1, label2):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
fig = plt.figure()
ax1 = fig.add_subplot(111)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
# first plot
lines = ax1.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.set_ylim(
min(y1) - (max(y1) - min(y1)) * 0.02,
max(y1) + (max(y1) - min(y1)) * 0.02
)
# second plot
ax2 = ax1.twinx()
lines += ax2.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
ax2.set_ylabel(ylabel2)
ax2.set_ylim(
min(y2) - (max(y2) - min(y2)) * 0.02,
max(y2) + (max(y2) - min(y2)) * 0.02
)
# general settings
if xlim is None:
plt.xlim(min(x1), max(x1))
else:
plt.xlim(*xlim)
labels = [line.get_label() for line in lines]
plt.legend(lines, labels)
# ticks
if ticks is not None:
ax2.set_yticks(ticks[0])
ax2.set_yticklabels(ticks[1])
plt.tight_layout()
plt.savefig(filename)
plt.close()
def make_invisible(ax: plt.Axes) -> None:
"""Make all patch spines invisible."""
ax.set_frame_on(True)
ax.patch.set_visible(False)
for spine in ax.spines.values():
spine.set_visible(False)
@apply_styles
def three_axis_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
x3: In, y3: In, label3: str,
xlabel: str, ylabel: str,
ylabel2: str, ylabel3: str,
filename: Union[Path, str], *,
ticks: Optional[tuple[list[float], list[str]]] = None,
xlim: Optional[tuple[float, float]] = None,
color: tuple[int, int, int] = (0, 1, 2),
legend: bool = True)\
-> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot with two y-axis.
The variable "ticks" sets costum y-ticks on the second y-axis. The first
argument gives the position of the ticks and the second argument gives the
values to be shown.
Color selects the indeces of the chosen color-wheel, which should be taken
for the different plots. The default is (1,2)."""
# pylint: disable=R0915
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
x3, y3 = fix_inputs(x3, y3) # type: ignore
if not check_inputs(
y1, y2, label1, label2):
return
if not check_inputs(
x3, y3, xlabel, label3):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
assert len(color) == 3
fig, ax1 = plt.subplots()
fig.subplots_adjust(right=0.75)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
# first plot
lines = ax1.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.set_ylim(
min(y1) - (max(y1) - min(y1)) * 0.02,
max(y1) + (max(y1) - min(y1)) * 0.02
)
ax1.yaxis.label.set_color(colors[color[0]])
ax1.tick_params(axis="y", colors=colors[color[0]])
# second plot
ax2 = ax1.twinx()
lines += ax2.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
ax2.set_ylabel(ylabel2)
ax2.set_ylim(
min(y2) - (max(y2) - min(y2)) * 0.02,
max(y2) + (max(y2) - min(y2)) * 0.02
)
ax2.yaxis.label.set_color(colors[color[1]])
ax2.tick_params(axis="y", colors=colors[color[1]])
# third plot
ax3 = ax1.twinx()
make_invisible(ax3)
ax3.spines["right"].set_position(("axes", 1.25))
ax3.spines["right"].set_visible(True)
lines += ax3.plot(x3, y3, label=label3,
color=colors[color[2]],
linestyle=linestyle[2])
ax3.set_ylabel(ylabel3)
ax3.set_ylim(
min(y3) - (max(y3) - min(y3)) * 0.02,
max(y3) + (max(y3) - min(y3)) * 0.02
)
ax3.yaxis.label.set_color(colors[color[2]])
ax3.tick_params(axis="y", colors=colors[color[2]])
# general settings
if xlim is None:
plt.xlim(min(x1), max(x1))
else:
plt.xlim(*xlim)
labels = [line.get_label() for line in lines]
if legend:
plt.legend(lines, labels)
# ticks
if ticks is not None:
ax2.set_yticks(ticks[0])
ax2.set_yticklabels(ticks[1])
plt.tight_layout()
plt.savefig(filename)
plt.close() | /scientific_plots-1.7.2-py3-none-any.whl/scientific_plots/default_plots.py | 0.945951 | 0.587795 | default_plots.py | pypi |
from __future__ import annotations
import csv
import locale
from contextlib import contextmanager
from copy import copy, deepcopy
from functools import wraps
from typing import (
Generator, Optional, Union, Callable, Any, overload)
from pathlib import Path
from warnings import warn, catch_warnings, simplefilter
from textwrap import dedent
import mpl_toolkits
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pyplot import Axes
from matplotlib import colors
from cycler import cycler
import numpy as np
from .utilities import translate
from .types_ import Vector
mpl.use("Agg")
plt.rcParams["axes.unicode_minus"] = False
SPINE_COLOR = "black"
FIGSIZE = (3.15, 2.35)
FIGSIZE_SLIM = (3.15, 2.1)
FIGSIZE_SMALL = (2.2, 2.1)
_savefig = copy(plt.savefig) # backup the old save-function
def linestyles() -> Generator[str, None, None]:
"""get the line-stiles as an iterator"""
yield "-"
yield "dotted"
yield "--"
yield "-."
rwth_colorlist: list[tuple[int, int, int]] = [(0, 84, 159), (246, 168, 0),
(161, 16, 53), (0, 97, 101)]
rwth_cmap = colors.ListedColormap(rwth_colorlist, name="rwth_list")
mpl.colormaps.register(rwth_cmap)
rwth_hex_colors = ["#00549F", "#F6A800", "#A11035", "#006165",
"#57AB27", "#E30066"]
rwth_cycle = (
cycler(color=rwth_hex_colors)
+ cycler(linestyle=["-", "--", "-.", "dotted",
(0, (3, 1, 1, 1, 1, 1)),
(0, (3, 5, 1, 5))]))
rwth_gradient: dict[str, tuple[tuple[float, float, float],
tuple[float, float, float]]] = {
"red": ((0.0, 0.0, 0.0), (1.0, 142 / 255, 142 / 255)),
"green": ((0.0, 84 / 255.0, 84 / 255), (1.0, 186 / 255, 186 / 255)),
"blue": ((0.0, 159 / 255, 159 / 255), (1.0, 229 / 255, 229 / 255)),
}
def make_colormap(seq: list[tuple[tuple[Optional[float], ...],
float,
tuple[Optional[float], ...]]],
name: str = "rwth_gradient")\
-> colors.LinearSegmentedColormap:
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
cdict: dict[str, list[tuple[float,
Optional[float],
Optional[float]
]
]] =\
{"red": [], "green": [], "blue": []}
for item in seq:
red_1, green_1, blue_1 = item[0]
red_2, green_2, blue_2 = item[2]
cdict["red"].append((item[1], red_1, red_2))
cdict["green"].append((item[1], green_1, green_2))
cdict["blue"].append((item[1], blue_1, blue_2))
return colors.LinearSegmentedColormap(name, cdict)
def partial_rgb(*x: float) -> tuple[float, ...]:
"""return the rgb value as a fraction of 1"""
return tuple(v / 255.0 for v in x)
hks_44 = partial_rgb(0.0, 84.0, 159.0)
hks_44_75 = partial_rgb(64.0, 127.0, 183.0)
rwth_orange = partial_rgb(246.0, 168.0, 0.0)
rwth_orange_75 = partial_rgb(250.0, 190.0, 80.0)
rwth_gelb = partial_rgb(255.0, 237.0, 0.0)
rwth_magenta = partial_rgb(227.0, 0.0, 102.0)
rwth_bordeux = partial_rgb(161.0, 16.0, 53.0)
rwth_gradient_map = make_colormap(
[
((None, None, None), 0., hks_44),
(hks_44_75, 0.33, hks_44_75),
(rwth_orange_75, 0.66, rwth_orange),
(rwth_bordeux, 1., (None, None, None))
]
)
mpl.colormaps.register(rwth_gradient_map)
def _germanify(ax: Axes, reverse: bool = False) -> None:
"""
translate a figure from english to german.
The direction can be reversed, if reverse it set to True
Use the decorator instead
"""
for axi in ax.figure.axes:
try:
axi.ticklabel_format(
useLocale=True)
except AttributeError:
pass
items = [
axi.xaxis.label,
axi.yaxis.label,
*axi.get_xticklabels(),
*axi.get_yticklabels(),
]
try:
if axi.zaxis is not None:
items.append(axi.zaxis.label)
items += [*axi.get_zticklabels()]
except AttributeError:
pass
if axi.get_legend():
items += [*axi.get_legend().texts]
for item in items:
item.set_text(translate(item.get_text(),
reverse=reverse))
try:
plt.tight_layout()
except IndexError:
pass
@contextmanager
def germanify(ax: Axes,
reverse: bool = False) -> Generator[None, None, None]:
"""
Translate the plot to german and reverse
the translation in the other direction. If reverse is set to false, no
reversal of the translation will be applied.
"""
old_locale = locale.getlocale(locale.LC_NUMERIC)
try:
try:
locale.setlocale(locale.LC_ALL, "de_DE")
locale.setlocale(locale.LC_NUMERIC, "de_DE")
except locale.Error:
# locale not available
pass
plt.rcParams["axes.formatter.use_locale"] = True
_germanify(ax)
yield
except Exception as e:
print("Translation of the plot has failed")
print(e)
raise
finally:
try:
locale.setlocale(locale.LC_ALL, old_locale)
locale.setlocale(locale.LC_ALL, old_locale)
except locale.Error:
pass
plt.rcParams["axes.formatter.use_locale"] = False
if reverse:
_germanify(ax, reverse=True)
def data_plot(filename: Union[str, Path]) -> None:
"""
Write the data, which is to be plotted, into a txt-file in csv-format.
"""
# pylint: disable=W0613
if isinstance(filename, str):
file_ = Path(filename)
else:
file_ = filename
file_ = file_.parent / (file_.stem + ".csv")
ax = plt.gca()
try:
with open(file_, "w", encoding="utf-8", newline="") as data_file:
writer = csv.writer(data_file)
for line in ax.get_lines():
writer.writerow(
[line.get_label(), ax.get_ylabel(), ax.get_xlabel()])
writer.writerow(line.get_xdata())
writer.writerow(line.get_ydata())
except PermissionError as e:
print(f"Data-file could not be written for {filename}.")
print(e)
def read_data_plot(filename: Union[str, Path])\
-> dict[str, tuple[Vector, Vector]]:
"""Read and parse the csv-data-files, which have been generated by the
'data_plot'-function."""
data: dict[str, tuple[Vector, Vector]] = {}
with open(filename, "r", newline="", encoding="utf-8") as file_:
reader = csv.reader(file_)
title: str
x_data: Vector
for i, row in enumerate(reader):
if i % 3 == 0:
title = row[0]
elif i % 3 == 1:
x_data = np.array(row, dtype=float)
else:
y_data: Vector
y_data = np.array(row, dtype=float)
data[title] = (x_data, y_data)
return data
@contextmanager
def presentation_figure(figsize: tuple[float, float] = (4, 3)) ->\
Generator[Axes, None, None]:
"""context manager to open an close the file.
default seaborn-like plot"""
fig, ax = plt.subplots(figsize=figsize)
mpl.rcParams["text.latex.preamble"] = [
r"\usepackage{helvet}", # set the normal font here
r"\usepackage{sansmath}", # load up the sansmath so that math
# -> helvet
r"\sansmath", # <- tricky! -- gotta actually tell tex to use!
]
mpl.rc("font", family="sans-serif")
mpl.rc("text", usetex=False)
font = {"size": 30}
mpl.rc("font", **font)
plt.set_cmap("rwth_list")
try:
yield ax
except Exception as e:
print("creation of plot failed")
print(e)
raise
finally:
plt.close(fig)
plt.close("all")
mpl.rcParams.update(mpl.rcParamsDefault)
plt.style.use("default")
old_save = plt.savefig
def alternative_save(
filename: Path,
dpi: Optional[int] = None,
bbox_inches: Optional[Union[str, tuple[float, float]]] = None,
figsize: tuple[float, float] = FIGSIZE,
subfolder: str = "small") -> None:
"""
Create additional saves of the given figsize and save these new figures
into subfolder of given names. This function can be used to create
additional plots of different sizes without a large overhead.
"""
fig = deepcopy(plt.gcf())
fig.set_size_inches(*figsize)
with catch_warnings(record=True) as warning:
simplefilter("always")
fig.tight_layout()
if warning:
if issubclass(warning[-1].category, UserWarning):
plt.close(fig)
return
folder = filename.parent / subfolder
folder.mkdir(exist_ok=True)
try:
fig.savefig(
folder
/ filename.name, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
fig.savefig(
folder
/ (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
plt.close(fig)
def try_save(filename: Path,
dpi: Optional[int] = None,
bbox_inches: Optional[Union[str, tuple[float, float]]] = None, *,
small: bool = False,
slim: bool = False) -> None:
"""Try to save the current figure to the given path, if it is not possible,
try to save it under a different name.
If small is set to true, also create
a smaller version of the given plot.
If slim is set to true, a slightly slimmer version
of the plot is created."""
def alternative_save(
figsize: tuple[float, float] = FIGSIZE,
subfolder: str = "small") -> None:
"""
Create additional saves of the given figsize and save these new figures
into subfolder of given names. This function can be used to create
additional plots of different sizes without a large overhead.
"""
fig = deepcopy(plt.gcf())
fig.set_size_inches(*figsize)
with catch_warnings(record=True) as warning:
simplefilter("always")
fig.tight_layout()
if warning:
if issubclass(warning[-1].category, UserWarning):
plt.close(fig)
return
folder = filename.parent / subfolder
folder.mkdir(exist_ok=True)
try:
fig.savefig(
folder
/ filename.name, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
fig.savefig(
folder
/ (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
plt.close(fig)
try:
old_save(filename, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
old_save(filename.parent / (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
if small:
alternative_save(
figsize=FIGSIZE_SMALL,
subfolder="small")
if slim:
alternative_save(
figsize=FIGSIZE_SLIM,
subfolder="slim")
def new_save_simple(subfolder: Union[str, Path] = "", suffix: str = "", *,
german: bool = False, png: bool = True,
pdf: bool = True, small: bool = False,
slim: bool = False)\
-> Callable[..., None]:
"""
Return a new save function, which saves the file to a new given name in pdf
format, and also creates a png version.
If the argument "german" is set to true, also create German language
version of the plots.
"""
@wraps(old_save)
def savefig_(filename: Union[Path, str],
dpi: Optional[int] = None,
bbox_inches: Optional[
Union[tuple[float, float], str]] = None) -> None:
"""Save the plot to this location as pdf and png."""
if isinstance(filename, str):
filename = Path(filename)
if filename.parent == Path("."):
warn(
f"The filename {filename} in 'savefig' does "
f"not contain a subfolder (i.e. 'subfolder/{filename})! "
"Many files might be created onto the top level.")
if subfolder:
(filename.parent / subfolder).mkdir(exist_ok=True)
new_path_pdf = filename.parent / subfolder / (
filename.stem + suffix + ".pdf")
new_path_png = filename.parent / subfolder / (
filename.stem + suffix + ".png")
else:
new_path_pdf = filename.parent / (
filename.stem + suffix + ".pdf")
new_path_png = filename.parent / (
filename.stem + suffix + ".png")
# save the data
data_path = filename.parent / (
filename.stem + ".dat")
if not data_path.exists():
data_plot(data_path)
try:
plt.tight_layout()
except IndexError:
pass
# save the figure
if pdf:
try_save(new_path_pdf, bbox_inches=bbox_inches,
small=small, slim=slim)
if png:
try_save(new_path_png, bbox_inches=bbox_inches,
dpi=dpi, small=small, slim=slim)
if german:
with germanify(plt.gca()):
if pdf:
try_save(
new_path_pdf.parent
/ (new_path_pdf.stem + "_german.pdf"),
bbox_inches=bbox_inches, small=small,
slim=slim)
if png:
try_save(
new_path_png.parent
/ (new_path_png.stem + "_german.png"),
bbox_inches=bbox_inches, dpi=dpi, small=small,
slim=slim)
return savefig_
def presentation_settings() -> None:
"""Change the settings of rcParams for presentations."""
# increase size
fig = plt.gcf()
fig.set_size_inches(8, 6)
mpl.rcParams["font.size"] = 24
mpl.rcParams["axes.titlesize"] = 24
mpl.rcParams["axes.labelsize"] = 24
# mpl.rcParams["axes.location"] = "left"
mpl.rcParams["lines.linewidth"] = 3
mpl.rcParams["lines.markersize"] = 10
mpl.rcParams["xtick.labelsize"] = 18
mpl.rcParams["ytick.labelsize"] = 18
mpl.rcParams["figure.figsize"] = (10, 6)
mpl.rcParams["figure.titlesize"] = 24
mpl.rcParams["font.family"] = "sans-serif"
def set_rwth_colors(three_d: bool = False) -> None:
"""Apply the RWTH CD colors to matplotlib."""
mpl.rcParams["text.usetex"] = False
mpl.rcParams["axes.prop_cycle"] = rwth_cycle
if three_d:
plt.set_cmap("rwth_gradient")
else:
plt.set_cmap("rwth_list")
def set_serif() -> None:
"""Set the plot to use a style with serifs."""
mpl.rcParams["font.family"] = "serif"
mpl.rcParams["font.serif"] = [
"cmr10", "stix", "Times New Roman"]
mpl.rcParams["mathtext.fontset"] = "cm"
def set_sans_serif() -> None:
"""Set matplotlib to use a sans-serif font."""
mpl.rcParams["font.family"] = "sans-serif"
mpl.rcParams["font.sans-serif"] = [
"Arial", "Helvetica", "DejaVu Sans"]
class ThreeDPlotException(Exception):
"""This exception is called when a 3D plot is drawn. This is used to exit
the plotting function with the science-style."""
class FallBackException(Exception):
"""This is excaption is thrown when the fallback-style is selected.
Only for debug purposes."""
def check_3d(three_d: bool) -> None:
"""This function checks if the current plot is a 3d plot. In that case, an
exception is thrown, which can be used to stop the creation of the default
plot."""
if three_d:
raise ThreeDPlotException
if isinstance(plt.gca(), mpl_toolkits.mplot3d.axes3d.Axes3D):
raise ThreeDPlotException
PlotFunction = Callable[..., None]
@overload
def apply_styles(plot_function: PlotFunction, *,
three_d: bool = False,
_fallback: bool = False) -> PlotFunction:
...
@overload
def apply_styles(plot_function: None, *, three_d: bool = False,
_fallback: bool = False)\
-> Callable[[PlotFunction], PlotFunction]:
...
@overload
def apply_styles(*, three_d: bool = False,
_fallback: bool = False)\
-> Callable[[PlotFunction], PlotFunction]:
...
def apply_styles(plot_function: Optional[PlotFunction] = None, *,
three_d: bool = False, _fallback: bool = False)\
-> Union[Callable[[PlotFunction], PlotFunction], PlotFunction]:
"""
Apply the newly defined styles to a function, which creates a plot.
The new plots are saved into different subdirectories and multiple
variants of every plot will be created.
Arguments
--------
three_d: Create a use this option for 3D-plots
fallback: switch directly to the fallback-style (for debug)
"""
# pylint: disable=too-many-statements
def _decorator(_plot_function: PlotFunction) -> PlotFunction:
"""This is the actual decorator. Thus, the outer function
'apply_styles' is actually a decorator-factory."""
@wraps(_plot_function)
def new_plot_function(*args: Any, **kwargs: Any) -> None:
"""
New plotting function, with applied styles.
"""
# default plot
plt.set_cmap("rwth_list")
plt.savefig = new_save_simple(png=False)
_plot_function(*args, **kwargs)
errors = (OSError, FileNotFoundError, ThreeDPlotException,
FallBackException)
def journal() -> None:
"""Create a plot for journals."""
set_rwth_colors(three_d)
set_serif()
plt.savefig = new_save_simple("journal", png=False,
small=not three_d)
_plot_function(*args, **kwargs)
plt.close("all")
def sans_serif() -> None:
"""
Create a plot for journals with sans-serif-fonts.
"""
set_rwth_colors(three_d)
set_sans_serif()
plt.savefig = new_save_simple("sans_serif", german=True,
small=not three_d)
_plot_function(*args, **kwargs)
plt.close("all")
def grayscale() -> None:
"""
Create a plot in grayscales for disserations.
"""
mpl.rcParams["text.usetex"] = False
set_serif()
if three_d:
plt.set_cmap("Greys")
new_kwargs = copy(kwargs)
new_kwargs["colorscheme"] = "Greys"
else:
new_kwargs = kwargs
plt.savefig = new_save_simple("grayscale", png=False,
small=not three_d,
slim=not three_d)
_plot_function(*args, **new_kwargs)
plt.close("all")
def presentation() -> None:
"""
Create a plot for presentations.
"""
if three_d:
new_kwargs = copy(kwargs)
new_kwargs["figsize"] = (9, 7)
new_kwargs["labelpad"] = 20
new_kwargs["nbins"] = 5
else:
new_kwargs = kwargs
set_rwth_colors(three_d)
presentation_settings()
set_sans_serif()
plt.savefig = new_save_simple("presentation",
german=True, pdf=False)
_plot_function(*args, **new_kwargs)
plt.close("all")
try:
plt.close("all")
check_3d(three_d)
if _fallback:
raise FallBackException
plt.close("all")
# journal
with plt.style.context(["science", "ieee"]):
journal()
# sans-serif
with plt.style.context(["science", "ieee", "nature"]):
sans_serif()
# grayscale
with plt.style.context(["science", "ieee", "grayscale"]):
grayscale()
# presentation
with plt.style.context(["science", "ieee"]):
presentation()
except errors:
if not three_d:
warn(dedent(""""Could not found style 'science'.
The package was probably installed incorrectly.
Using a fallback-style."""), ImportWarning)
plt.close("all")
# journal
with plt.style.context("fast"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
journal()
# sans-serif
with plt.style.context("fast"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
sans_serif()
# grayscale
with plt.style.context("grayscale"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
grayscale()
# presentation
with plt.style.context("fast"):
presentation()
plt.savefig = old_save
return new_plot_function
if plot_function is not None:
return _decorator(plot_function)
return _decorator | /scientific_plots-1.7.2-py3-none-any.whl/scientific_plots/plot_settings.py | 0.744656 | 0.243817 | plot_settings.py | pypi |
from __future__ import print_function
import re
from functools import wraps
from subprocess import Popen, PIPE
from sys import __stdout__
from os import mkdir
from os.path import dirname, exists
from typing import Iterable, Optional, List, Callable, TypeVar, Union, Any
from pathlib import Path
from collections import OrderedDict
from urllib3.exceptions import MaxRetryError
from python_translator import Translator
def running_average(X: List[float], n: int) -> List[float]:
"""creates a running average of X with n entries in both dircetions"""
X_new = []
for i in range(n):
X_new += [sum(X[0: i + 1]) / (i + 1)]
for i in range(n, len(X) - n):
X_new += [sum(X[i - n: i + n + 1]) / (2 * n + 1)]
for i in range(len(X) - n, len(X)):
X_new += [sum(X[2 * i - len(X) + 1:]) / ((len(X) - (i + 1)) * 2 + 1)]
return X_new
def quality_of_fit(X: List[float], Y: List[float]) -> float:
"""calculates the quality of a fit 'bestimmtheitsmass'"""
mean_x = sum(X) / len(X)
return sum(x ** 2 - mean_x ** 2 for x in X)\
/ sum(x ** 2 - mean_x ** 2 for x in Y)
Return = TypeVar("Return")
def print_redirect(f: Callable[..., Return]) -> Callable[..., Return]:
"""
wraps print to print to both stdout and __stdout__
the reason for doing that is that abaqus replaces
stdout and prints everything to abaqus
this way everything can be seen both in the command line and in abaqus
"""
@wraps(f)
def inner_function(*args: Any, **argv: Any) -> Return:
f(*args, **argv)
for i, arg in enumerate(args):
if isinstance(type, arg):
del arg[i]
for _, val in argv.items():
if isinstance(type, val):
del val
argv["file"] = __stdout__
f(*args, **argv)
try:
return f(*args, **argv)
except TypeError:
print("The argument 'file' is twice in a print statement")
raise
return inner_function
def filter_function(x: str) -> bool:
"""filter function in order to remove unused
arguments from this script before parsing"""
if ":" in x or "rwth" in x:
return False
analysed_item = x.replace("--", ".")
analysed_item = analysed_item.replace("-", "!")
if len(analysed_item) > 2\
and "." not in analysed_item\
and "!" in analysed_item:
return False
return True
def filter_argv(X: list[str]) -> list[str]:
"""removes unessecery entries from
argv which have been generated by abaqus"""
Y: list[str] = list(X)
for i, x in enumerate(X):
if not filter_function(x):
del Y[i]
if i < len(Y):
del Y[i + 1]
return Y
def dumb_plot(X: list[float], Y: list[float],
title: str = "title", log: bool = False) -> None:
"""plot X and Y using dumb gnuplot"""
try:
with Popen(["gnuplot"], stdin=PIPE) as gnuplot:
assert gnuplot, """Gnuplot could not be started."""
assert gnuplot.stdin, """Input to Gnuplot could not be found."""
gnuplot.stdin.write(bytes("set term dumb 79 25\n", "utf-8"))
if log:
gnuplot.stdin.write(bytes("set logscale xy\n", "utf-8"))
gnuplot.stdin.write(
bytes(
f"""plot '-' using 1:2 title '{title}'
with linespoints \n""",
"utf-8"
)
)
for x, y in zip(X, Y):
gnuplot.stdin.write(bytes(f"{x} {y}\n", "utf-8"))
gnuplot.stdin.write(bytes("e\n", "utf-8"))
gnuplot.stdin.flush()
except FileNotFoundError:
print("There is no installed instance of gnuplot")
return
def read_file(filename: Union[str, Path],
header: int = 0,
type_: type = float) -> tuple[list[float], ...]:
"""read a file with given filename into
a python-tuple. Skip n='header' lines in the beginning
of the document"""
# reads a given file and returns lists
lists: list[list[float]] = []
with open(filename, "r", encoding="utf-8") as input_file:
for i, line in enumerate(input_file):
if i < header:
continue
lists += [[type_(v) for v in line.split()]]
return tuple(list(v) for v in zip(*lists))
def write_file(filename: Union[Path, str],
*args: Iterable[float],
title: Optional[str] = None) -> None:
"""write the rows given in 'arga' to a file"""
# write data to a file
if not any(args[0]):
raise Exception("Tried to write an empty row to a file")
if isinstance(filename, str):
filename = Path(filename)
with open(filename, "w", encoding="utf-8") as output_file:
if title is not None:
output_file.write(title + "\n")
for row in zip(*args):
output_file.write(" ".join(str(r) for r in row) + "\n")
def mkdir_p(foldername: str) -> None:
"""creates a new folder if the folder does not exist"""
try:
if exists(foldername):
return
mkdir(foldername)
except IOError as e:
# recursive creation of needed folders
if e.errno != 2:
raise
mkdir_p(dirname(foldername))
mkdir_p(foldername)
def trash_remover(func: Callable[..., tuple[Return, ...]])\
-> Callable[..., Return]:
"""only keeps the first output of a given function"""
@wraps(func)
def inner_function(*args: Any, **kwargs: Any) -> Return:
result = func(*args, **kwargs)
return result[0]
return inner_function
def use_translator(string: str, lang1: str, lang2: str) -> str:
"""
Translate the given input string from lang2 to lang1 using google
translate.
"""
# one letter strings
if len(string) <= 1:
return string
if r"\times" in string:
return string
try:
translator = Translator()
result: str
result = translator.translate(string, lang1, lang2)
except (OSError, MaxRetryError):
return string
return result
def translate(string: str, reverse: bool = False) -> str:
"""translates a string from english to german
@input string any string wich contains specific english words
@input reverse translate ger->en if set to true
@return the german translation of the same string"""
if reverse:
string = re.sub(r'(\d+),(\d+)', r'\1\.\2', string)
else:
string = re.sub(r'(\d+)\.(\d+)', r'\1,\2', string)
_dict: dict[str, str] = OrderedDict({
"leakage": "Leckage",
"contact pressure": "Kontaktdruck",
"fluid pressure": "Fluiddruck",
"pressure": "Druck",
"density": "Dichte",
"roundness": "Rundheit",
"eccentricity": r"Exzentrizit\"at",
"contact area": r"Kontaktoberfl\"ache",
"area": r"Fl\"ache",
"maximal": "Maximale",
"time": "Zeit",
"normal-force": "Normalkraft",
"normal force": "Normalkraft",
"total force": "Gesamtkraft",
"force": "Kraft",
"distance": "Abstand",
"position": "Position",
"contact broadness": "Kontaktbreite",
"broadness": "Breite",
"contact": "Kontakt-",
"seat": "Sitz",
"ball": "Kugel",
"high": "hoch",
"low": "tief",
"elastic": "elastisch",
"plastic": "plastisch",
"angle": "Winkel",
"degree": "Grad",
"deg": "Grad",
"hard": "hart",
"soft": "weich",
"fit": "Fit",
"data": "Messwerte",
"velocity": "Geschwindigkeit",
"measurement": "Messung",
"experiment": "Experiment",
"simulation": "Simulation",
"analytical": "analytisch",
"signal": "Signal",
"valve control": "Ventilansteuerung",
"off": "zu",
"on": "auf",
"status": "Zustand",
"valve": "Ventil",
"relative pressure": "Relativdruck",
"absolute pressure": "Absolutdruck",
"relative": "relativ",
"absolute": "absolut",
"plot": "Graph"
})
if not reverse and r"\times" not in string:
for key, value in _dict.items():
if key in string.lower():
string = re.sub(key, value, string, flags=re.IGNORECASE)
break
else:
string = use_translator(string, "german", "english")
else:
for key, value in _dict.items():
if value.lower() in string.lower():
string = string.lower().replace(value.lower(), key.lower())
break
else:
string = use_translator(string, "english", "german")
return string | /scientific_plots-1.7.2-py3-none-any.whl/scientific_plots/utilities.py | 0.551815 | 0.288379 | utilities.py | pypi |
from __future__ import annotations
from os.path import join
from math import pi
from queue import Queue
from threading import Thread
from subprocess import check_output
from typing import (
List, Tuple, TypeVar, Union, Iterable, Any, Optional)
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import array
from .plot_settings import apply_styles, rwth_gradient_map
from .types_ import Vector
mpl.use("Agg")
SURFACEFOLDER = join("simulation", "surface_data")
In = TypeVar("In", List[float], Tuple[float],
Vector)
@apply_styles
def create_two_d_scatter_plot(
X: In, Y: In, Z: In,
folder: Union[str, Path],
plot_title: str,
xlabel: Optional[str], ylabel: Optional[str], zlabel: str)\
-> None:
"""create two_d_plots"""
# rearrange x, y, z to fit the shape needed for the plot
fig = plt.figure()
plt.set_cmap("jet")
ax = fig.add_subplot(projection="3d")
ax.scatter(Y, X, Z, cmap=rwth_gradient_map)
if xlabel:
ax.set_ylabel(xlabel)
if ylabel:
ax.set_xlabel(ylabel)
ax.set_zlabel(zlabel)
plt.tight_layout()
plt.savefig(join(folder, plot_title.replace(" ", "_") + ".pdf"))
@apply_styles
def create_two_d_surface_plot(
X: In, Y: In, Z: In,
folder: Union[str, Path],
plot_title: str,
xlabel: Optional[str], ylabel: Optional[str], zlabel: str)\
-> None:
"""create two_d_plots"""
# rearrange x, y, z to fit the shape needed for the plot
fig = plt.figure()
plt.set_cmap("jet")
Z_flat: Vector = array(Z)
# X_two_d, Y_two_d=meshgrid(X_flat, Y_flat)
ax = fig.add_subplot(projection="3d")
# ax.plot_surface(X_two_d, Y_two_d, Z_flat, cmap=rwth_gradient_map)
ax.plot_trisurf(Y, X, Z, cmap=rwth_gradient_map)
if ylabel:
ax.set_ylabel(ylabel)
if xlabel:
ax.set_xlabel(xlabel)
ax.set_zlabel(zlabel)
ax.set_zlim(min(Z_flat) * 0.98, max(Z_flat) * 1.05)
ax.set_xlim(min(Y), max(Y))
ax.set_ylim(min(X), max(X))
plt.tight_layout()
plt.savefig(join(folder, plot_title.replace(" ", "_") + ".pdf"))
def get_leakage(data: Iterable[Any], var: str = "density",
surface_file: Optional[str] = None) -> list[float]:
"""calculate the leakage for a given set of data
@param data enumerable set of valve-objects
which allow the determination of the leakage
@return list of the same dimension for the leakage"""
if surface_file is None:
surface_path = join(SURFACEFOLDER, "c_q.dat")
leakage_bin = join(".", "subroutines", "bin", "test_leakage")
Y: list[float] = []
X: list[float] = []
q: Queue[Any] = Queue()
# run every call of the fortran-code in parallel
for d in data: # put the data into the
# queue to access them later as needed
q.put(d)
def work_of_queue() -> None:
nonlocal X
nonlocal Y
while True:
d = q.get()
if d is None:
return # last data-point
pressure = max(d.short.p)
print(pressure)
print(d.angle, d.wobble)
C = float(check_output([leakage_bin, "-i", surface_path, "-P",
f"{pressure}"]))
# A=d.short.unroundness2
A = d.short.sigma
R = d.valve.seat.radius
delta_p = d.dictionary["fluid-pressure"]["value"]
Y += [delta_p * 2.0 * pi * R / A * C]
X += [getattr(d, var)]
threads = [Thread(target=work_of_queue) for i in range(16)]
for thread in threads: # start all threads
thread.start()
q.put(None)
for thread in threads: # wait for all threads to finish
thread.join()
return Y
def plot_2d_surface(
data: Iterable[Any],
folder: str = "simulation",
var1: str = "angle",
var2: str = "wobble",
xlabel1: Optional[str] = None,
xlabel2: Optional[str] = None,
surface_file: Optional[str] = None,
) -> None:
"""create the two d surface plots of two given variables"""
X = [getattr(d, var1) for d in data]
Y = [getattr(d, var2) for d in data]
pressure = [max(d.short.p) for d in data]
A = [d.short.unroundness for d in data]
leakage = get_leakage(data, surface_file=surface_file)
create_two_d_scatter_plot(
X, Y, pressure, folder, "two d pressure",
xlabel1, xlabel2, "maximal pressure [MPa]"
)
create_two_d_scatter_plot(
X, Y, A, folder, "two d area", xlabel1, xlabel2, "contact area [mm]"
)
create_two_d_scatter_plot(
X, Y, leakage, folder,
"two d leakage", xlabel2, xlabel2, "leakage [ml/s]"
)
create_two_d_surface_plot(
X,
Y,
pressure,
folder,
"two d pressure surface",
xlabel1,
xlabel2,
"maximal pressure [MPa]",
)
create_two_d_surface_plot(
X, Y, A, folder, "two d area surface",
xlabel1, xlabel2, "contact area [mm]"
)
create_two_d_surface_plot(
X,
Y,
pressure,
folder,
"two d pressure surface",
xlabel1,
xlabel2,
"maximal pressure [MPa]",
)
create_two_d_surface_plot(
X, Y, leakage, folder, "two d leakage surface",
xlabel2, xlabel2, "leakage [ml/s]"
) | /scientific_plots-1.7.2-py3-none-any.whl/scientific_plots/two_d_plot.py | 0.901004 | 0.486392 | two_d_plot.py | pypi |
import re
from typing import Any, Callable, Iterable, List, Optional
# ========================================= What can be exported? =========================================
__all__ = ['strings_to_', 'strings_to_integers', 'strings_to_floats', 'string_to_float', 'match_one_string',
'match_one_pattern', 'all_strings']
def strings_to_(strings: Iterable[str], f: Callable) -> Iterable[Any]:
"""
Convert a list of strings to a list of certain form, specified by *f*.
:param strings: a list of string
:param f: a function that converts your string
:return: type undefined, but specified by `to_type`
.. doctest::
>>> strings_to_(['0.333', '0.667', '0.250'], float)
[0.333, 0.667, 0.25]
"""
if not all_strings(strings):
raise TypeError('All have to be strings!')
# ``type(strs)`` is the container of *strs*.
return type(strings)(map(f, strings))
def strings_to_integers(strings: Iterable[str]) -> Iterable[int]:
"""
Convert a list of strings to a list of integers.
:param strings: a list of string
:return: a list of converted integers
.. doctest::
>>> strings_to_integers(['1', '1.0', '-0.2'])
[1, 1, ValueError('-0.2 cannot be converted to an integer')]
>>> strings_to_integers(['1', '1.0', '-0.'])
[1, 1, 0]
"""
return strings_to_(strings, lambda x: int(float(x)) if float(x).is_integer() else ValueError(
"{} cannot be converted to an integer".format(x)))
def strings_to_floats(strings: Iterable[str]) -> Iterable[float]:
"""
Convert a list of strings to a list of floats.
:param strings: a list of string
:return: a list of converted floats
.. doctest::
>>> strings_to_floats(['1', '1.0', '-0.2'])
[1.0, 1.0, -0.2]
"""
return strings_to_(strings, string_to_float)
def string_to_float(s: str) -> float:
"""
Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float
by Python ``float`` function, so I wrote this function to help conversion. For example,
:param s: a string denoting a double precision number
:return: a Python floating point number
.. doctest::
>>> string_to_float('1d-82')
1e-82
>>> string_to_float('-1.0D-82')
-1e-82
>>> string_to_float('+0.8D234')
8e+233
>>> string_to_float('.8d234')
8e+233
>>> string_to_float('+1.0D-5')
1e-05
>>> string_to_float('-0.00001')
-1e-05
>>> string_to_float('.8e234')
8e+233
>>> string_to_float('.1')
0.1
"""
return float(re.sub('d', 'e', s, flags=re.IGNORECASE))
def match_one_string(pattern: str, s: str, *args):
"""
Make sure you know only none or one string will be matched! If you are not sure, use `match_one_pattern` instead.
:param pattern:
:param s:
:param args:
:return:
.. doctest::
>>> p = "\d+"
>>> s = "abc 123 def"
>>> match_one_string(p, s, int)
123
>>> print(match_one_string(p, "abc"))
Pattern "\d+" not found, or more than one found in string abc!
None
>>> print(match_one_string(p, "abc 123 def 456"))
Pattern "\d+" not found, or more than one found in string abc 123 def 456!
None
"""
try:
# `match` is either an empty list or a list of string.
match, = re.findall(pattern, s)
if len(args) == 0: # If no wrapper argument is given, return directly the matched string
return match
elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match
wrapper, = args
return wrapper(match)
else:
raise TypeError(
'Multiple wrappers are given! Only one should be given!')
except ValueError:
print("Pattern \"{0}\" not found, or more than one found in string {1}!".format(
pattern, s))
def match_one_pattern(pattern: str, s: str, *args: Callable, **flags):
"""
Find a pattern in a certain string. If found and a wrapper is given, then return the wrapped matched-string; if no
wrapper is given, return the pure matched string. If no match is found, return None.
:param pattern: a pattern, can be a string or a regular expression
:param s: a string
:param args: at most 1 argument can be given
:param flags: the same flags as ``re.findall``'s
:return:
.. doctest::
>>> p = "\d+"
>>> s = "abc 123 def 456"
>>> match_one_pattern(p, s)
['123', '456']
>>> match_one_pattern(p, s, int)
[123, 456]
>>> match_one_pattern(p, "abc 123 def")
['123']
>>> print(match_one_pattern('s', 'abc'))
Pattern "s" not found in string abc!
None
>>> match_one_pattern('s', 'Ssa', flags=re.IGNORECASE)
['S', 's']
"""
match: Optional[List[str]] = re.findall(pattern, s,
**flags) # `match` is either an empty list or a list of strings.
if match:
if len(args) == 0: # If no wrapper argument is given, return directly the matched string
return match
elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match
wrapper, = args
return [wrapper(m) for m in match]
else:
raise TypeError(
'Multiple wrappers are given! Only one should be given!')
else: # If no match is found
print("Pattern \"{0}\" not found in string {1}!".format(pattern, s))
return None
def all_strings(iterable: Iterable[object]) -> bool:
"""
If any element of an iterable is not a string, return `True`.
:param iterable: Can be a set, a tuple, a list, etc.
:return: Whether any element of an iterable is not a string.
.. doctest::
>>> all_strings(['a', 'b', 'c', 3])
False
>>> all_strings(('a', 'b', 'c', 'd'))
True
"""
return all(isinstance(_, str) for _ in iterable) | /scientific_string-0.1.0-py3-none-any.whl/scientific_string/__init__.py | 0.86852 | 0.540621 | __init__.py | pypi |
import matplotlib.pyplot as plt
import numpy as np
from scientific_tools.graphics.function_graphs import plot_2Dfunction
import scientific_tools.physics.uncertainty as uncertainty
def plot_uncertainty_function(f, u_f, min_x, max_x, values_number, args_before_x=[], args_after_x=[], title="", xlabel="", ylabel="", function_label="f(x)", uncertainty_label="f(x)±u(f(x))", function_color='red', uncertainty_color='blue', function_linestyle="-", uncertainty_linestyle="-", **kwargs) :
"""Draw a graph with f, f + u_f and f - u_f
Draw an uncertainty graph with the function f, the function plus its uncertainty (f + u_f) and the fonction minus its uncertainty (f - u_f).
f is a function that take at least one argument x that varies from min_x to max_x by taking values_number values.
u_f calculate the uncertainty of f. It take at least one argument : x. (To add other arguments, see plot_2Dfunction documentation. N.B. f and u_f must have the same arguments)
title is the graph title
xlabel and ylabel are texts to put on the axes
function_label is the text display in the legend about function curve
uncertainty_label is the text display in the legend about curves that represent f ± u_f
function_color is color of function curve
uncertainty_color is the color of curves that represent f ± u_f
function_linestyle & uncertainty_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
f_plus_u = lambda *args, **kwargs : f(*args, **kwargs) + u_f(*args, **kwargs)
plot_2Dfunction(f_plus_u, min_x, max_x, values_number, args_before_x, args_after_x, color=uncertainty_color, linestyle =uncertainty_linestyle, function_label=uncertainty_label, **kwargs)#draw f+u_f
f_minus_u = lambda *args, **kwargs : f(*args, **kwargs) - u_f(*args, **kwargs)
plot_2Dfunction(f_minus_u, min_x, max_x, values_number, args_before_x, args_after_x, color=uncertainty_color, linestyle =uncertainty_linestyle, **kwargs)#draw f-u_f
plot_2Dfunction(f, min_x, max_x, values_number, args_before_x, args_after_x, title=title, xlabel=xlabel, ylabel=ylabel, function_label=function_label, color=function_color, linestyle =function_linestyle, **kwargs)#draw f (this is the last function drawing else title and axes labels haven't been displayed)
plt.legend()#show function & uncertainty labels
def plot_uncertainty_points(x, y, u_x, u_y, title="Experimental values with error bar", xlabel="", ylabel="") :
"""Draw experimental values with error bar
x is the list of x coordinates, y is the list of y coordinates
u_x is the list of x uncertainties, u_y is the list of y uncertainties
xlabel is the text to display with the x ax
ylabel is the text to display with the y ax
"""
plt.errorbar(x, y, xerr=u_x, yerr=u_y, fmt='bo', label='Mesures')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def null_function(*args, **kwargs) :
"""Return 0 for all value of 'value'.
It's can use as an uncertainty calculator when the function is a reference function. (see the documentation of plot_z_score_graph).
"""
return 0
def plot_z_score_graph(f1, u_f1, f2, u_f2, min_x, max_x, values_nb, args_f1_before_x=[], args_f1_after_x=[], kwargs_f1={}, args_f2_before_x=[], args_f2_after_x=[], kwargs_f2={}, z_score_limit=2, title="", xlabel="", ylabel="", limit_label="Limits of z-score validity", z_score_label="Z-score", limit_color='red', z_score_color='blue', limit_linestyle="-", z_score_linestyle="-",) :
"""Trace the z-score between two functions
f1 is the first function & f2 is the second one.
u_f1 is the function that calculate the f1 uncertainty & u_f2 calculate f2 uncertainty.
Those four functions takes at least one argument x that varies from min_x to max_x by taking values_nb values.
f1 and u_f1 take same args and kwargs. args_f1_before_x is the list of f1 positional arguments before the x position
args_f1_after_x is the list of f1 positional arguments after the x position
kwargs_f1 is a dictionary with f1 kwargs
(Idem for f2)
If a function is a function reference, u_f must be null_function (define in this module).
z_score_limit is the validity limit for the z-score (usually, it's 2)
limit_color is color of lines that represents limits of z_score validity
title is the graph title
xlabel and ylabel are texts to put on the axes
limit_label is the text display in the legend about lines that represents limits of z_score validity
z_score_label is the text display in the legend about the z-score curve
z_score_color is the color of the z_score curve
limit_linestyle & z_score_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
x_values = np.linspace(min_x, max_x, values_nb)
#calculate values for f1 & f2
f1_values = []
u_f1_values = []
f2_values = []
u_f2_values = []
for x in x_values :
f1_values.append(f1(*args_f1_before_x, x, *args_f1_after_x, **kwargs_f1))
if u_f1 is not null_function :
u_f1_values.append(u_f1(*args_f1_before_x, x, *args_f1_after_x, **kwargs_f1))
f2_values.append(f2(*args_f2_before_x, x, *args_f2_after_x, **kwargs_f2))
if u_f2 is not null_function :
u_f2_values.append(u_f2(*args_f2_before_x, x, *args_f2_after_x, **kwargs_f2))
z_score_values = []
#calculate z_score
if u_f1 is null_function :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score_ref(f2_values[i], f1_values[i], u_f2_values[i]))
elif u_f2 is null_function :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score_ref(f1_values[i], f2_values[i], u_f1_values[i]))
else :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score(f1_values[i], u_f1_values[i], f2_values[i], u_f2_values[i]))
#displaying
plt.plot(x_values, z_score_values, color=z_score_color, linestyle=z_score_linestyle, label=z_score_label)
plt.plot([np.min(x_values), np.max(x_values)], [z_score_limit, z_score_limit], color=limit_color,linestyle=limit_linestyle, label=limit_label)
plt.plot([np.min(x_values), np.max(x_values)], [-z_score_limit, -z_score_limit], color=limit_color,linestyle=limit_linestyle)
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_z_score_points_graph(x, y1, u_y1, y2, u_y2, z_score_limit=2, title="", xlabel="", ylabel="", limit_label="Limits of z-score validity", z_score_label="Z-score", limit_color='red', z_score_color='blue', limit_linestyle="-", z_score_linestyle="-") :
"""Trace the z-score between two lists of points
x is the list of point abscissa
y1 is the first list of values & f2 is the second one.
u_y1 is the list of uncertainties of y1 points & u_y2 is the list for y2 points uncertainties. If a list of points is a reference, u_y be a list of zero
title is the graph title
xlabel and ylabel are texts to put on the axes
limit_label is the text display in the legend about lines that represents limits of z_score validity
z_score_label is the text display in the legend about the z-score curve
z_score_limit is the validity limit for the z-score (usually, it's 2)
limit_color is color of lines that represents limits of z_score validity
z_score_color is the color of the z_score curve
limit_linestyle & z_score_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
z_score_values = []
#calculate z_score
for i in range(len(x)) :
z_score_values.append(uncertainty.z_score(y1[i], u_y1[i], y2[i], u_y2[i]))
#displaying
plt.plot(x, z_score_values, color=z_score_color, linestyle=z_score_linestyle, label=z_score_label)
plt.plot([np.min(x), np.max(x)], [z_score_limit, z_score_limit], color=limit_color,linestyle=limit_linestyle, label=limit_label)
plt.plot([np.min(x), np.max(x)], [-z_score_limit, -z_score_limit], color=limit_color,linestyle=limit_linestyle)
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel) | /scientific_tools-0.0.0a17-py3-none-any.whl/scientific_tools/graphics/uncertainty_graphs.py | 0.777469 | 0.784484 | uncertainty_graphs.py | pypi |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def plot_2Dfunction(function, min_x, max_x, values_number, args_before_x=[], args_after_x=[], title="", xlabel="", ylabel="", function_label="", color="blue", linestyle ="-", **kwargs) :
"""Trace the 2D graphic of the function "function"
function is a function with at least one argument x
args_before_x is the list of positional arguments before the variable argument's position
args_after_x is the list of positional arguments after the variable argument's position
The value of the variable argument x varies from min_x to max_variable by taking values_number values
title is the graph title
xlabel and ylabel are texts to put on the axes
function_label is the label of the function. (Doesn't show it if you doesn't call plt.legend() after this plot_2Dfunction.)
color is the line color
linestyle is the line style (cf Matplotlib docs for futher information)
You can add after keywords arguments for the function "function"
"""
variable_list = np.linspace(min_x, max_x, values_number)
results_list = []
for variable in variable_list :
results_list.append(function(*args_before_x, variable, *args_after_x, **kwargs))
#displaying
plt.plot(variable_list, results_list, color=color, linestyle=linestyle, label=function_label)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_3Dfunction(function, min_x, max_x, values_x, min_y, max_y, values_y, args_before_variables=[], args_between_variables=[], args_after_variables=[], x_before_y = True, title="", xlabel ="", ylabel="", zlabel="", colormap=cm.RdYlGn, **kwargs) :
"""Trace the 3D graphic of the function "function"
function is a function with at least two arguments
args_before_variable is the list of positional arguments before the first variable argument's position
args_between_variables is the list of positional arguments between positions of the first and the second variable
args_after_variables is the list of positional arguments after the second variable argument's position
x_before_x is true if x variable is the first variable (in the function arguments order)
The value of the "x" variable varies from min_x to max_x by taking values_x values
Idem for "y" variable
title is the graph title
xlabel, ylabel and zlabel are texts to put on the axes
colormap is the colormap used for displaying
You can add after keywords arguments for the function "function"
"""
line = np.linspace(min_x, max_x, values_x)
array_x = np.array([line for i in range(values_y) ], dtype=float)
#create an array with x values
column = np.linspace(min_y, max_y, values_y)
array_y = np.array([[column[j]]*values_x for j in range(values_y)], dtype=float)
#create an array with y values
results = []#a array like object with values of function
for i in range(values_y) :
results_line = []
for j in range(values_x) :
variable1 = array_x[i][j]
variable2 = array_y[i][j]
if x_before_y is False :
variable1, variable2 = variable2, variable1
results_line.append(function(*args_before_variables, variable1, *args_between_variables, variable2, *args_after_variables, **kwargs))
results.append(results_line)
array_z = np.array(results, dtype=float)
linewidth = (max_x - min_x+ max_y - min_y)/20#to trace around 10 lines
#displaying
ax = plt.axes(projection='3d')#3D diplaying
ax.plot_surface(array_x, array_y, array_z, cmap=colormap, linewidth=linewidth)#linewidth : distance between two lines
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel) | /scientific_tools-0.0.0a17-py3-none-any.whl/scientific_tools/graphics/function_graphs.py | 0.562537 | 0.73053 | function_graphs.py | pypi |
"""Calculate standard uncertainty (standart uncertainty mainly)"""
from warnings import WarningMessage
import numpy as np
def standard_uncertainty(u_x, u_y, dz_dx, dz_dy) :
"""Calculate the standard uncertainty of z with the general formule."""
return np.sqrt((u_x*dz_dx)**2+(u_y*dz_dy)**2)
def standard_uncertainty_addition(u_x, u_y, a=1, b=1) :
"""Calculate the standard uncertainty of z = ax + by (a & b const).
a and b are constants define with no uncertainty
"""
return np.sqrt((a**2)*(u_x**2)+(b**2)*(u_y)**2)
def relative_uncertainty_multiplication(ur_x, ur_y, a=1, b=1, c=1) :
"""Calculate the relative uncertainty of z= c*x^a*y^b (a, b, c const)
a, b and c are constants define with no uncertainty
c have no influance on the result
ur(x) = u(x)/x. Idem for y.
"""
return np.sqrt((a*ur_x)**2+(b*ur_y)**2)
def relative_uncertainty_multiplications(k=1, *args) :
"""Calculate the relative uncertainty of z = k*Π(xi^ni) (ni & k are const)
ni & k are constants define with no uncertainty
Arguments are tuples (or lists) of with those elements (in this order) : relative uncertainty and power (optionnal, default is 1)
k have no influance on the result
"""
u_r2 = 0#relative uncertainty**2
for arg in args :
if not isinstance(arg, (list, tuple)) :
raise TypeError("args must be tuples or lists")
if len(arg) < 1 :
raise ValueError("args must have at least one element : relative uncertainty and power (optionnal, default is 1)")
if len(arg) > 2 :
raise WarningMessage("args must have at most two elements : relative uncertainty and power (optionnal, default is 1)")
u_r2 += (arg[1]*arg[0])**2
return np.sqrt(u_r2)
def standard_uncertainty_multiplication(x, u_x, y, u_y, a=1, b=1, c=1) :
"""Calculate the standard uncertainty of z= c*x^a*y^b (a, b, c const)
a, b and c are constants define with no uncertainty
"""
z = c*(x**a)*(y**b)
return relative_uncertainty_multiplication(u_x/x, u_y/y, a, b, c)*abs(z)
def standard_uncertainty_multiplications(k=1, *args) :
"""Calculate the standard uncertainty of z = k*Π(xi^ni) (ni & k are const)
ni & k are constants define with no uncertainty
Arguments are tuples (or lists) of with those elements (in this order) : value, standard uncertainty and power (optionnal, default is 1)
"""
z=k
u_r2 = 0#relative uncertainty**2
for arg in args :
if not isinstance(arg, (list, tuple)) :
raise TypeError("args must be tuples or lists")
if len(arg) < 2 :
raise ValueError("args must have at least two elements : value, standard uncertainty and power (optionnal, default is 1)")
if len(arg) >3 :
raise WarningMessage("args must have at most three elements : value, standard uncertainty and power (optionnal, default is 1)")
z *= arg[0]**arg[2]
u_r2 += (arg[2]*arg[1]/arg[0])**2
return abs(z)*np.sqrt(u_r2)
def z_score_ref(x, x_ref, u_x):
"""Calculate the z-score between a measured value and a reference value.
x is the measured value, x_ref the reference value and u_x the uncertainty
"""
#this function is similar to z_score(x, u_x, x_ref, 0)
#but avoid to calculate square and square root
return abs((x-x_ref)/u_x)
def z_score(x1, u_x1, x2, u_x2) :
"""Calculate the z-score between two measured values
x1 is the first value, x2 the second
u_x1 is the uncertainty for x1, u_x2 for x2
"""
return abs(x1-x2)/np.sqrt(u_x1**2 + u_x2**2) | /scientific_tools-0.0.0a17-py3-none-any.whl/scientific_tools/physics/uncertainty.py | 0.888414 | 0.783947 | uncertainty.py | pypi |
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from scientisst.constants import *
class ArgParser:
class MyParser(ArgumentParser):
def error(self, message):
sys.stderr.write("error: %s\n\n" % message)
self.print_help()
sys.exit(2)
def __init__(self):
usage = "%(prog)s [args] address"
description = "description: The program connects to the ScientISST Sense device and starts an acquisition, providing the option to store the received data in a .csv file."
self.parser = self.MyParser(
usage=usage, description=description, formatter_class=RawTextHelpFormatter
)
self.parser.add_argument(
"address",
nargs="?",
type=str,
help="For BTH communication:\n\tLinux: BTH MAC address\n\tMac: serial port address\n\tWindows: BTH serial COM port\nFor TCP/UDP communication:\n\tAll plataforms: server port.",
)
self.parser.add_argument(
"-f",
"--frequency",
dest="fs",
help="sampling frequency, default: 1000",
type=int,
default=1000,
)
self.parser.add_argument(
"-c",
"--channels",
dest="channels",
type=str,
help="analog channels, default: 1,2,3,4,5,6",
default="1,2,3,4,5,6",
)
self.parser.add_argument(
"-d",
"--duration",
dest="duration",
help="duration in seconds, default: unlimited",
type=int,
default=0,
)
self.parser.add_argument(
"-o",
"--output",
dest="output",
help="write report to output file, default: None",
type=str,
default=None,
)
self.parser.add_argument(
"-r",
"--raw",
action="store_false",
dest="convert",
default=True,
help="do not convert from raw to mV",
)
self.parser.add_argument(
"-s",
"--lsl",
dest="stream",
action="store_true",
default=False,
help="stream data using Lab Streaming Layer protocol. Use `python -m pylsl.examples.ReceiveAndPlot` to view stream",
)
self.parser.add_argument(
"--script",
dest="script",
help="send the received frames to a script that inherits the CustomScript class",
type=str,
default=None,
)
self.parser.add_argument(
"-q",
"--quiet",
action="store_false",
dest="verbose",
default=True,
help="don't print ScientISST frames",
)
self.parser.add_argument(
"-v",
"--version",
dest="version",
action="store_true",
default=False,
help="show sense.py version",
)
self.parser.add_argument(
"--verbose",
dest="log",
action="store_true",
default=False,
help="log sent/received bytes",
)
self.parser.add_argument(
"-m",
"--mode",
dest="mode",
type=str,
default=COM_MODE_BT,
help="The communication mode. Currently supported modes: "
+ ", ".join(COM_MODE_LIST)
+ ". Default: "
+ COM_MODE_BT,
)
self.args = self.parser.parse_args()
def error(self, value):
self.parser.error(value) | /scientisst_sense-1.1.0-py3-none-any.whl/sense_src/arg_parser.py | 0.463444 | 0.206834 | arg_parser.py | pypi |
class InvalidAddressError(Exception):
"""
The specified address is invalid.
"""
def __init__(self):
super().__init__("The specified address is invalid.")
class BTAdapterNotFoundError(Exception):
"""
No Bluetooth adapter was found.
"""
def __init__(self):
super().__init__("No Bluetooth adapter was found.")
class DeviceNotFoundError(Exception):
"""
The device could not be found.
"""
def __init__(self):
super().__init__("The device could not be found.")
class ContactingDeviceError(Exception):
"""
The computer lost communication with the device.
"""
def __init__(self):
super().__init__("The computer lost communication with the device.")
class PortCouldNotBeOpenedError(Exception):
"""
The communication port does not exist or it is already being used.
"""
def __init__(self):
super().__init__(
"The communication port does not exist or it is already being used."
)
class PortInitializationError(Exception):
"""
The communication port could not be initialized.
"""
def __init__(self):
super().__init__("The communication port could not be initialized.")
class DeviceNotIdleError(Exception):
"""
The device is not idle.
"""
def __init__(self):
super().__init__("The device is not idle.")
class DeviceNotInAcquisitionError(Exception):
"""
The device is not in acquisition mode.
"""
def __init__(self):
super().__init__("The device is not in acquisition mode.")
class InvalidParameterError(Exception):
"""
Invalid parameter.
"""
def __init__(self):
super().__init__("Invalid parameter.")
class NotSupportedError(Exception):
"""
Operation not supported by the device.
"""
def __init__(self):
super().__init__("Operation not supported by the device.")
class UnknownError(Exception):
"""
Unknown error: `message`.
"""
def __init__(self, message=""):
super().__init__("Unknown error: {}".format(message)) | /scientisst_sense-1.1.0-py3-none-any.whl/scientisst/exceptions.py | 0.793346 | 0.191933 | exceptions.py | pypi |
# Discriminant Analysis with categorical variables (DISQUAL)
```
# Chargement des librairies
import numpy as np
import pandas as pd
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
DTrain = pd.read_excel("CongressVotePipeline.xlsx",sheet_name="train",header=0)
display(DTrain.head())
DTrain.info()
from scientisttools.discriminant_analysis import DISQUAL
disqual = DISQUAL(n_components=None,
target=["group"],
features_labels=list(DTrain.columns[:-1]),
row_labels=DTrain.index,
parallelize=False)
disqual.fit(DTrain)
stats_test = disqual.statistics_test_
stats_test.keys()
stats_test["chi2"]
stats_test["log-likelihood-test"]
stats_test["cramer's V"]
stats_test["tschuprow's T"]
stats_test["pearson"]
```
## Résultats de l'ACM
```
mca = disqual.mca_model_
```
### Valeurs propres
```
eig = mca.eig_.T
display(pd.DataFrame(eig))
from scientisttools.pyplot import plot_eigenvalues
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,8))
plot_eigenvalues(mca,ax=axe,n_components=32)
plt.show()
from scientisttools.extractfactor import get_mca_mod,get_mca_ind
mod =get_mca_mod(mca)
mod_infos = mod["infos"]
display(mod_infos)
mod_coord = mod["coord"]
display(mod_coord.iloc[:,:2])
# Fonction de projection
fproj = disqual.projection_function_
display(fproj.iloc[:,:2])
# Coordonnées des individus
row = get_mca_ind(mca)
row_coord = row["coord"]
display(row_coord.head(10).iloc[:,:2])
# Coeffcients du lDA
lda_coef = disqual.lda_coef_
display(lda_coef)
lda_intercept = disqual.lda_intercept_
display(lda_intercept)
# Evaluation globale
se = disqual.statistical_evaluation_
display(se)
coef = disqual.coef_
display(coef)
intercept = disqual.intercept_
display(intercept)
DTest = pd.read_excel("CongressVotePipeline.xlsx",sheet_name="test",header=0)
DTest.head()
DTest.info()
XTest = DTest[DTest.columns[:-1]]
new_coord = disqual.transform(XTest)
new_coord.iloc[:,:2]
XTrain = DTrain[DTrain.columns[:-1]]
coord = disqual.transform(XTrain)
coord.iloc[:,:2]
pred_train = disqual.predict(XTrain)
pred_train
from sklearn.metrics import confusion_matrix
confusion_matrix(DTrain[DTrain.columns[-1]],pred_train)
pred_test = disqual.predict(XTest)
pred_test
confusion_matrix(DTest[DTest.columns[-1]],pred_test)
prob_train = disqual.predict_proba(XTrain)
prob_train
prob_test = disqual.predict_proba(XTest)
prob_test
```
| /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/disqual_example.ipynb | 0.408277 | 0.755592 | disqual_example.ipynb | pypi |
# Additionnal functions
```
from scientisttools.utils import *
import numpy as np
from scipy.spatial.distance import pdist,squareform
# Match arg
lst = ["gaussian", "epanechnikov", "rectangular", "triangular"]
print(match_arg("gauss", lst))
print(match_arg("pauss", lst))
# is_euclidean
np.random.seed(123)
w = np.array(np.random.uniform(size=10000)).reshape(100,100)
w = squareform(pdist(w,metric="euclidean"))
is_euclidean(w)
is_euclidean(w,plot=True,printf=True)
w = np.array([[1,4],[2,5],[3,6]])
bicenter_wt(w, [0.2,0.6,0.2], [0.3,0.7])
notes = np.array([13, 11, 10, 11, 12, 5, 8, 7, 2, 4, 16, 17, 13, 16, 15])
suivi = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3])
display(notes)
display(suivi)
pd.DataFrame(eta2(suivi,notes),index=['eta - test'])
```
| /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/utils.ipynb | 0.407687 | 0.816736 | utils.ipynb | pypi |
# Canonical Discriminant Analysis on Iris dataset
```
from seaborn import load_dataset
import numpy as np
import pandas as pd
iris = load_dataset("iris")
print(iris.head())
# Chargement de la
from scientisttools.discriminant_analysis import CANDISC
candisc = CANDISC(n_components=2,
target=["species"],
row_labels=iris.index,
features_labels=list(iris.columns[:-1]),
parallelize=False)
# Instanciattion
candisc.fit(iris)
```
### Summary Information
```
candisc.summary_information_.T
```
#### Class level Information
```
candisc.class_level_information_
```
### Squared Mahalanobis Distances and Distance statistics
```
candisc.squared_mdist_
# Univariate statistics
candisc.univariate_test_statistis_
candisc.anova_
# Multivariate
# Ne pas oublier la fonction print
print(candisc.manova_)
# Likelihood test
candisc.likelihood_test_
candisc.eig_.T
## Correlation between Canonical and Original Variables
# Total Canonical Structure
from scientisttools.extractfactor import get_candisc_var
pd.concat(get_candisc_var(candisc,choice="correlation"),axis=0)
# Raw Canonical Coefficients
from scientisttools.extractfactor import get_candisc_coef
coef = get_candisc_coef(candisc,choice="absolute")
coef
# Class Means on Canonical Variables
candisc.gmean_coord_
from scientisttools.pyplot import plotCANDISC
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(16,8))
plotCANDISC(candisc,color=["blue",'#5DC83F','red'],marker=['o',"*",'>'],ax=axe)
plt.show()
score_coef = get_candisc_coef(candisc,choice="score")
score_coef
from scientisttools.extractfactor import summaryCANDISC
summaryCANDISC(candisc,to_markdown=True)
```
## Backward Elimination
```
from scientisttools.discriminant_analysis import STEPDISC
# Backward Elimination
stepdisc = STEPDISC(method="backward",
alpha=0.01,
model_train=True,
verbose=True)
stepdisc.fit(candisc)
stepdisc.train_model_
```
| /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/candisc_iris.ipynb | 0.492432 | 0.766731 | candisc_iris.ipynb | pypi |
# Canonical Discriminant Analysis (CANDISC)
```
# Chargement des librairies
import numpy as np
import pandas as pd
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
# Chargement de la base
DTrain = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="WINE",header=0)
DTrain.head()
from scientisttools.discriminant_analysis import CANDISC
candisc = CANDISC(n_components=2,
target=["Qualite"],
row_labels=DTrain.index,
features_labels=list(DTrain.columns[:-1]),
priors=None,
parallelize=False)
# Entraînement
candisc.fit(DTrain)
candisc.correlation_ratio_
candisc.anova_
print(candisc.manova_) # ne pas oublier d'utiliser print
```
## Coefficients canoniques bruts
```
from scientisttools.extractfactor import get_candisc_coef
# Coeffcients
coef = get_candisc_coef(candisc)
coef
from scientisttools.pyplot import plotCANDISC
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(16,8))
plotCANDISC(candisc,color=["blue",'#5DC83F','red'],marker=['o',"*",'>'],ax=axe)
plt.show()
candisc.global_performance_
candisc.likelihood_test_
from scientisttools.extractfactor import get_candisc_var
# Covariance
pd.concat(get_candisc_var(candisc,choice="covariance"),axis=0)
# Correlation avec les axes
pd.concat(get_candisc_var(candisc,choice="correlation"),axis=0)
```
### Individus supplémentaires
```
## Inidvidu supplémentaire
XTest = pd.DataFrame({"Temperature" : 3000, "Soleil" : 1100, "Chaleur" : 20, "Pluie" : 300},index=[1958])
XTest
candisc.transform(XTest)
candisc.decision_function(XTest)
candisc.predict_proba(XTest)
```
## Fonctions de décision
```
score_coef = get_candisc_coef(candisc,choice="score")
score_coef
XTrain = DTrain.drop(columns=["Qualite"])
candisc.decision_function(XTrain).head()
candisc.predict_proba(XTrain).head()
candisc.predict(XTrain).head()
# score
candisc.score(XTrain,DTrain["Qualite"])
from scientisttools.extractfactor import summaryCANDISC
summaryCANDISC(candisc,to_markdown=True)
```
## Backward Elimination
```
from scientisttools.discriminant_analysis import STEPDISC
stepdisc = STEPDISC(method="backward",alpha=0.01,model_train=True,verbose=True)
stepdisc.fit(candisc)
stepdisc.train_model_
fig, axe =plt.subplots(figsize=(16,8))
plotCANDISC(stepdisc.train_model_,color=["blue",'#5DC83F','red'],marker=['o',"*",'>'],ax=axe)
plt.show()
# Summary
summaryCANDISC(stepdisc.train_model_,to_markdown=True)
```
| /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/candisc_wine.ipynb | 0.457137 | 0.660172 | candisc_wine.ipynb | pypi |
# Scierra
Scierra [_see-eh-rah_] is a **S**imulated **C**++ **I**nt**er**preter with **R**ecurrent **A**daptation.
In human words, it's a interactive interpreter for C++, which allows you to run and debug your program immediately as you type. Well, basically. But the implementation is slightly trickier.
To get a quick start, simply launch Scierra on the terminal and type `cout << "Hello, World!";`. Yes, that's a complete C++ program in Scierra!
**WARNING:** Scierra is still under development. Even though many vital aspects of C++ (e.g. function definition, templates, classes) are already supported, Scierra does not handle input statements very well. This is unfortunately keeping Scierra in Beta...
## Navigation
* [Example](#Example)
* [Installation](#Installation)
* [Prerequisites](#Prerequisites)
* [Install with PIP](#Install-with-PIP)
* [Usage](#Usage)
* [Quick Start](#Quick-Start)
* [Keywords](#Keywords)
* [Docs](#Docs)
* [Anatomy of a C++ Program in Scierra](#Anatomy-of-a-C-Program-in-Scierra)
* [Unsupported features](#Unsupported-features)
* [LICENSE](#LICENSE)
## Example
***An sample program running on the Scierra interpreter:***
```c++
++> cout << "Hello, World!";
Hello, World!
++> int factorial(int n){
--> if (n==1 || n==0)
--> return 1;
--> else return n * factorial(n-1);
--> }
++> cout << "10 factorial is: " << factorial(10);
10 factorial is: 3628800
```
## Installation
### Prerequisites:
* **Python** must be **installed** and **added to PATH**.
The key ideas of Scierra and it's CLI have been implemented in Python.
* **GCC** (GNU Compiler Collection) must be **installed** and **added to PATH**.
This allows Python to access G++ through the command line. If you're a Linux user, there's a good chance that GCC tools are already included in your distro. Users of other operating systems like Windows or MacOS may need to make additional installations. MinGW has been tested to work with Scierra on Windows.
### Install with PIP
Install Scierra with PIP using:
$ pip install scierra
After installation, run Scierra on your terminal using:
$ scierra
## Usage
### Quick Start
Launch `scierra` in your terminal, and try pasting in the full sample program below.
Note Scierra's ability to automatically categorise whether the block of code you've just typed belongs to the `main` function section, global declarations section, or preprocessors section (refer to the [anatomy of a C++ program in Scierra](#Anatomy-of-a-C-Program-in-Scierra)). The `<esc>` command closes the interpreter.
```c++
cout << "Hello, World!\n";
#define CYAN "\033[36m"
#define GREEN "\033[32m"
#define DEFAULT "\033[0m"
cout << GREEN << "I am SCIERRA" << DEFAULT << endl;
int factorial(int n){
if (n==1 || n==0)
return 1;
else return n * factorial(n-1);
}
cout << CYAN << "10 factorial is: " << factorial(10) << DEFAULT << endl;
<esc>
```
Below is a demo of the above program running in a terminal with Scierra:

### Keywords
Type these special keywords at any stage when writing your code to perform special functions.
* `<print>`: Prints out the code you've written so far.
* `<restart>`: Restarts another interpreter session and forgets all local variables.
* `<esc>`: Terminates Scierra.
#### Code keywords
Put the following keywords at the start of each block of your code for special operations.
* `<`: Using this keyword before a single-lined statement without any semicolons (e.g. `<10+23` or `<"Hey!"`) makes Scierra automatically output the evaluated value of the statement. It works with all data types, variables and classes that supports `cout` statements. You can even join multiple outputs together! E.g.
```c++
++> int x = 132;
++> < x*7
924
++> < x%127 << x%12 << "COOL!"
50COOL!
++>
```
* `<prep>`: Forcefully specifies that the block of code that you type belongs to the 'preprocessor' section of the program. E.g.
```c++
++> <prep>
--> const int Answer_to_Ultimate_Question_of_Life = 42;
++>
```
This puts `const int Answer_to_Ultimate_Question_of_Life = 42;` in the 'preprocessors' section. Without the `<prep>` keyword, this statement would be automatically placed in the `main` function by Scierra.
Refer to: [Anatomy of a C++ Program in Scierra](#Anatomy-of-a-C-Program-in-Scierra).
* `<glob>`: Forcefully specifies that the block of code that you type belongs to the 'globals' section of the program.
Refer to: [Anatomy of a C++ Program in Scierra](#Anatomy-of-a-C-Program-in-Scierra).
* `<main>`: Forcefully specifies that the block of code that you type belongs to the `main` function in the program.
Refer to: [Anatomy of a C++ Program in Scierra](#Anatomy-of-a-C-Program-in-Scierra).
## Docs
### Anatomy of a C++ Program in Scierra
Scierra divides a C++ program into three distinct sections: the 'preprocessor' section, the 'globals' section, and the 'main' section. Please refer to the [keywords and expressions table](#Keywords-and-Expressions-Table) for the full list of keywords and expressions that Scierra uses to categorise a block of code. However, here is a quick overview:
The 'preprocessor' section comes at the top of the program. This is where libraries are imported and namespaces are defined. By default in Scierra, the libraries `iostream`, `sstream`, `fstream`, `vector` and `string` are already imported, and the namespace `std` is under use. The 'globals' section is reserved for global class and function declarations, while the 'main' section goes into the `main` function of your C++ program.
When you enter a block of code in Scierra, it automatically categorises it into one of these three sections based on syntactical keywords and expressions. You can override this automatic behaviour by using one of the [code keywords](#Code-Keywords).
#### Keywords and Expressions Table
Here is a table showing the different keywords and expressions that Scierra uses to categorise your block of code.
| Preprocessor Section | Globals Section | Main Section |
| :---: | :---: | :---: |
| `#include` statement | `class` keyword | _Anything that doesn't fit into the former two sections_ |
| `#define` statement | `struct` keyword | |
| `typedef` keyword | `return` keyword | |
| `using` keyword | `void` keyword | |
| | `template` keyword | |
| | `typename` keyword | |
### Unsupported features
Scierra supports most features that come with your installed version of GCC.
However, unfortunately the following features are not yet supported by Scierra:
* any expression involving inputs
* lambda expressions
* range-based for loops
## LICENSE
[Apache License 2.0](LICENSE)
| /scierra-0.6.1.tar.gz/scierra-0.6.1/README.md | 0.649023 | 0.851953 | README.md | pypi |
# sciex
Framework for "scientific" experiments (Result organization; Experiment and Trial setup; Baseline Comparisons)
This tool helps strip out the repetitive parts of setting up and running experiments, and lets you focus on writing the logic of trial running and result types. This reduces the stupid errors one may make when running experiments, and makes results organized and gathering statistics convenient.
## Setup
```
pip install sciex
```
## How it works
#### An Experiment consists of trials
In this framework, an `Experiment` consists of a number of `Trial`s. One trial is independent from another.
```python
class Experiment:
"""One experiment simply groups a set of trials together.
Runs them together, manages results etc."""
def __init__(self, name, trials, outdir,
logging=True, verbose=False):
```
A `Trial` can be initialized by a `name` and a `config`. There is no requirement on the format of `config`; It just needs to be able to be serialized by pickle. Note that we recommend a convention of giving trial names:
```python
class Trial:
def __init__(self, name, config, verbose=False):
"""
Trial name convention: "{trial-global-name}_{seed}_{specific-setting-name}"
Example: gridworld4x4_153_value-iteration-200.
The ``seed'' is optional. If not provided, then there should be only one underscore.
"""
```
**Your job** is to define a child class of `Trial`, implementing its function `run()`, so that it catersx to your experiment design.
#### Parallel trial running
We want to save time and run trials in parallel, if possible. Thus, instead of directly executing the trials, `Experiment` first saves the trials as `pickle` files in **an organized manner**, then generates __shell scripts__, each bundles a subset of all the trials. The shell script contains commands using `trial_runner.py` to conduct trials. More specifically, the **organized manner** means, the pickle files will be saved under, along with `trial_runner.py`, the shell scripts and `gather_results.py`.
```
./{Experiment:outdir}
/{Experiment:name}_{timestamp}
/{Trial:name}
/trial.pkl
gather_results.py
run_{i}.sh
trial_runner.py
```
Inside the shell script `run_{i}` where `i` is the index of the bundle, you will only find commands of the sort:
```
python trial_runner.py {path/to/trial/trial.pkl} {path/to/trial} --logging
```
Thus, you just need to do
```
$ ./run_{i}.sh
```
on a terminal to execute the trials covered by this shell script. You can open multiple terminals and run all shell scripts together in parallel.
##### New
**Divide run scripts by computer.** This is useful
for the case where you have generated a lot of running
scripts (each runs multiple trials) and you want to
add another level of grouping.
```
python -m sciex.divide ./ -c sunny windy rainy cloudy -r 0.3 0.2 0.3 0.2 -n 4 3 4 3
```
where `./` is the path to the experiment root directly (here I am just there),
`sunny windy rainy cloudy` are four computers, and I want to run 30% run scripts
on `sunny`, 20% on `windy`, and so on. On each computer I will start e.g. `4` terminals
for `sunny` and `3` for `windy`, etc.
**Run multiple trials with shared resource.** The trials are contained
in a run script, or a file with a list of paths to trial pickle files.
The trial is expected to implement `provide_shared_resource` and
`could_provide_resource` and the resource should only be read, and not written to
by the trials. (NOTE this doesn't actually work with cuda models because of difficulty sharing CUDA memory)
```
python -m sciex.batch_runner run_script_or_file_with_trial_paths {Experiment:outdir}
```
#### Result types
We know that for different experiments, we may produce results of different type. For example, some times we have a list of values, some times a list of objects, sometimes a particular kind of object, and some times it is a combination of multiple result types. For instance, each trial in a image classification task may produce labels for test images. Yet each trial in image segmentation task may produce arrays of pixel locations. We want you to decide what those result types are and how to process them. Hence the `Result` interface (see `components.py`).
To be more concrete, in `sciex`, each `Trial` can produce multiple `Result`s. Trials with the same `specific_name` (see trial naming convention above) will be **gathered** (to compute a statistic). See the interface below:
```python
class Result:
def save(self, path):
"""Save result to given path to file"""
raise NotImplemented
@classmethod
def collect(cls, path):
"""path can be a str of a list of paths"""
raise NotImplemented
@classmethod
def FILENAME(cls):
"""If this result depends on only one file,
put the filename here"""
raise NotImplemented
@classmethod
def gather(cls, results):
"""`results` is a mapping from specific_name to a dictionary {seed: actual_result}.
Returns a more understandable interpretation of these results"""
return None
@classmethod
def save_gathered_results(cls, results, path):
"""results is a mapping from global_name to the object returned by `gather()`.
Post-processing of results should happen here.
Return "None" if nothing to be saved."""
return None
...
```
Basically, you define how to save this kind of result (`save()`), how to collect it, i.e. read it from a file (`collect()`), how to gather a set of results of this kind (`gather()`), for example, computing mean and standard deviation. As an example, `sciex` provides a `YamlResult` type (see `result_types.py`):
```python
import yaml
import pickle
from sciex.components import Result
class YamlResult(Result):
def __init__(self, things):
self._things = things
def save(self, path):
with open(path, "w") as f:
yaml.dump(self._things, f)
@classmethod
def collect(cls, path):
with open(path) as f:
return yaml.load(f)
```
We didn't define the `gather()` and `save_gathered_results()` functions because these are experiment-specific. For example, in a reinforcement learning experiment, I may want to gather rewards as a type of result. Here's how I may implement that. Notice that since I know I will put these results in a paper, my implementation of `save_gathered_results` will be saving a LaTex table in a `.tex` file.
```python
class RewardsResult(YamlResult):
def __init__(self, rewards):
"""rewards: a list of reward floats"""
super().__init__(rewards)
@classmethod
def FILENAME(cls):
return "rewards.yaml"
@classmethod
def gather(cls, results):
"""`results` is a mapping from specific_name to a dictionary {seed: actual_result}.
Returns a more understandable interpretation of these results"""
# compute cumulative rewards
myresult = {}
for specific_name in results:
all_rewards = []
for seed in results[specific_name]:
cum_reward = sum(list(results[specific_name][seed]))
all_rewards.append(cum_reward)
myresult[specific_name] = {'mean': np.mean(all_rewards),
'std': np.std(all_rewards),
'_size': len(results[specific_name])}
return myresult
@classmethod
def save_gathered_results(cls, gathered_results, path):
def _tex_tab_val(entry, bold=False):
pm = "$\pm$" if not bold else "$\\bm{\pm}$"
return "%.2f %s %.2f" % (entry["mean"], pm, entry["std"])
# Save plain text
with open(os.path.join(path, "rewards.txt"), "w") as f:
pprint(gathered_results, stream=f)
# Save the latex table
with open(os.path.join(path, "rewards_latex.tex"), "w") as f:
tex =\
"\\begin{tabular}{ccccccc}\n%automatically generated table\n"\
" ... "
for global_name in gathered_results:
row = " %s & %s & %s & %s & %s & %s\\\\\n" % (...)
tex += row
tex += "\\end{tabular}"
f.write(tex)
return True
```
Then, after all the results are produced, when you want to gather the results and produce some report of the statistics (or plots), just run
```
$ ./{Experiment:outdir}/{Experiment:name}_{timestamp}/gather_results.py
```
| /sciex-0.3.tar.gz/sciex-0.3/README.md | 0.874533 | 0.897874 | README.md | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019, Lluís Vilanova"
__license__ = "GPL version 3 or later"
# pylint: disable=no-name-in-module,import-error
from sciexp2.common import utils
from sciexp2.common.filter import Filter
# pylint: disable=redefined-builtin
def extract(template, function, filter=None, path="_PATH_"):
"""Extract data from all files matching `template` and `filter`.
Parameters
----------
template : str
Template for file paths to extract.
function : callable
Function returning a pandas data frame from a single file.
filter : str or Filter, optional
Filter for the set of path names extracted from `template`. Can
reference any variable name in `template` as well as the one in `path`.
path : str, optional
Variable name used to hold each path name extracted from `template` when
finding the files (see `sciexp2.common.utils.find_files`).
Returns
-------
pandas.DataFrame or None
Pandas data frame with the data from all files matching `template` and
`filter`. Variables in `template` are added as new columns into the
result (with their respective values on each row). If no file is found
matching `template` and `filter`, returns `None`.
Notes
-----
Argument `function` is called with a single argument, corresponding to one
of the file path names matching `template` and `filter`.
"""
filter_ = filter
if filter_ is None:
filter_ = Filter()
else:
filter_ = Filter(filter_)
result = None
files = utils.find_files(template, path=path)
for elem in files:
if not filter_.match(elem):
continue
elem_path = elem.pop(path)
try:
data = function(elem_path)
except:
print(f"ERROR: while extracing data from: {elem_path}")
raise
data = data.assign(**elem)
if result is None:
result = data
else:
result = result.append(data, ignore_index=True)
return result | /sciexp2-expdata-0.1.7.tar.gz/sciexp2-expdata-0.1.7/sciexp2/expdata/pandas.py | 0.785144 | 0.382718 | pandas.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2009-2023, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import glob
import os
import six
import pydoc
from sciexp2.common import text
import sciexp2.expdef.system
#: Paths to search for available templates.
#:
#: The order of the list establishes which template will be used in case it
#: exists in more than one directory.
#:
#: Includes the current directory and the `templates` directory shipped with
#: SciExp².
SEARCH_PATH = [
os.curdir,
os.sep.join([os.path.dirname(__file__), "templates"]),
]
def _get_path(name):
"""Get the path to descriptor or template file with given name."""
for path in SEARCH_PATH:
file_path = os.sep.join([path, name])
if os.path.isfile(file_path):
return file_path
return None
_DOC = pydoc.TextDoc()
def _bold(text):
return _DOC.bold(text)
def _indent(text):
return _DOC.indent(text)
class _FakeDict (dict):
def __getitem__(self, key):
if key not in self:
dict.__setitem__(self, key, "")
return dict.__getitem__(self, key)
class TemplateError (Exception):
"""Error retrieving template file."""
def __init__(self, message):
Exception.__init__(self, message)
class Template:
"""A launcher template.
The non-string attributes must be specified in the template descriptor file
as the string identifying the object name.
Attributes
----------
name : str
Template name (taken from the file name of descriptor file).
description : str
Template description.
system : `sciexp2.expdef.system.System`, optional
Name of the execution system this template if for. Can be inherited
from `parent`.
template : str, optional
The template file this descriptor uses. For file ``name.tpl`` you must
use ``name``. Can be inherited from `parent`.
parent : `sciexp2.templates.Template`, optional
Parent template to inherit from.
submit_args : list of str, optional
Extra arguments passed to the job submission program.
overrides : dict, optional
Dictionary mapping variable names in the parent template to their
corresponding values for this template.
defaults : dict, optional
Dictionary mapping variable names in the template to their
corresponding values, in case the user provides none.
Notes
-----
Template descriptor files can use the following variables to refer to the
corresponding attributes set in their parent template:
- `parent_submit_args`
- `parent_overrides`
- `parent_defaults`
Argument `submit_args` can contain variable *LAUNCHER_BASE*, which contains
the absolute path to the base output directory
(`expdef.experiments.Experiments.out`).
"""
_LOADING = []
def __init__(self, name):
"""Create a new template object from its descriptor file."""
if name in Template._LOADING:
raise TemplateError("Circular template dependency: %s"
% " -> ".join(Template._LOADING + [name]))
self.name = name
# load descriptor file
dsc_path = _get_path(self.name + ".dsc")
if dsc_path is None:
raise TemplateError("Cannot find template descriptor file '%s'" %
(self.name + ".dsc"))
dsc = open(dsc_path, "r").read()
# load to know who's the parent
globals_ = dict(
parent_submit_args=[],
parent_overrides=_FakeDict(),
parent_defaults=_FakeDict(),
)
namespace = {}
six.exec_(dsc, globals_, namespace)
self._init_parent(namespace, dsc_path)
# reload with parent-specific information
if self.parent is None:
globals_ = dict(
parent_submit_args=[],
parent_overrides={},
parent_default={},
)
else:
globals_ = dict(
parent_submit_args=list(self.parent.submit_args),
parent_overrides=dict(self.parent.overrides),
parent_default=dict(self.parent.defaults),
)
namespace = {}
six.exec_(dsc, globals_, namespace)
namespace.pop("parent", None)
self._init_description(namespace, dsc_path)
self._init_system(namespace, dsc_path)
self._init_template(namespace, dsc_path)
self._init_submit_args(namespace)
self._init_overrides(namespace)
self._init_defaults(namespace)
# do not accept any other variable in the descriptor
if len(namespace) > 0:
raise TemplateError("Unknown variables in template %s: %s" %
(dsc_path, ", ".join(namespace)))
def _init_parent(self, namespace, dsc_path):
parent_name = namespace.pop("parent", None)
if parent_name is not None:
Template._LOADING.append(self.name)
try:
self.parent = get(parent_name)
except TemplateError as e:
raise TemplateError("When loading parent of %s: %s" %
(dsc_path, e.message))
Template._LOADING.remove(self.name)
else:
self.parent = None
def _init_description(self, namespace, dsc_path):
self.description = namespace.pop("description", None)
if self.description is None:
raise TemplateError("Template descriptor without 'description': "
"%s" % dsc_path)
def _init_system(self, namespace, dsc_path):
self.system = None
if self.parent is not None:
self.system = self.parent.system
system_name = namespace.pop("system", None)
if system_name:
try:
self.system = sciexp2.expdef.system.get(system_name)
except sciexp2.expdef.system.SystemError as e:
raise TemplateError("Error loading 'system' for template "
"%s: %s" % (dsc_path, e.message))
elif self.system is None:
raise TemplateError("Template descriptor without 'system': "
"%s" % dsc_path)
def _init_template(self, namespace, dsc_path):
self.template = None
if self.parent is not None:
self.template = self.parent.template
self.template_path = self.parent.template_path
template_name = namespace.pop("template", None)
if template_name:
self.template = template_name
self.template_path = _get_path(self.template + ".tpl")
if self.template_path is None:
raise TemplateError("Template descriptor with incorrect "
"'template' %r: %s" %
(self.template, dsc_path))
elif self.template is None:
raise TemplateError("Template descriptor without 'template': "
"%s" % dsc_path)
def _init_submit_args(self, namespace):
parent_submit_args = self.parent.submit_args if self.parent else []
self.submit_args = namespace.pop("submit_args", parent_submit_args)
def _init_overrides(self, namespace):
parent_overrides = self.parent.overrides if self.parent else {}
self_overrides = namespace.pop("overrides", {})
self.overrides = dict(self_overrides)
for key, val in parent_overrides.items():
new_val = text.translate(val, self_overrides)
if new_val != val or key not in self_overrides:
self.overrides[key] = new_val
def _init_defaults(self, namespace):
if self.parent:
self.defaults = dict(self.parent.defaults)
else:
self.defaults = {}
self.defaults.update(namespace.pop("defaults", {}))
def get_short_description(self, get_long=False):
"""Get short description."""
res = [_bold(self.name)]
contents = []
contents += [self.description.strip()]
has_parent = self.parent is not None
if has_parent:
contents += [_bold("Parent : ") + self.parent.name]
if get_long or not has_parent:
contents += [_bold("System : ") + self.system.name]
contents += [_bold("Template: ") + self.template]
res.append(_indent("\n".join(contents)))
return "\n".join(res)
def get_description(self):
"""Get a full description."""
res = [self.get_short_description(True)]
contents = []
if len(self.submit_args) > 0:
contents += [_bold("Submit arguments:")]
contents += [_indent(" ".join(self.submit_args))]
if len(self.defaults) > 0:
contents += [_bold("Default values:")]
defaults = ["%-15s :: \"%s\"" % (var, val)
for var, val in sorted(six.iteritems(self.defaults))]
contents += [_indent("\n".join(defaults))]
with open(self.template_path) as contents_file:
mandatory_vars = set(text.get_variables(contents_file.read()))
mandatory_vars |= set([
v
for val in six.itervalues(self.overrides)
for v in text.get_variables(val)])
mandatory_vars -= self.system.assumes()
mandatory_vars -= self.system.defines()
mandatory_vars -= set(self.defaults)
if len(mandatory_vars) > 0:
contents += [_bold("Mandatory variables:")]
mandatory = sorted([str(var) for var in mandatory_vars])
contents += [_indent("\n".join(mandatory))]
with open(self.template_path) as contents_file:
contents += [_bold("Contents:")]
fcontents = "".join(contents_file.readlines())
overrides = dict(self.overrides)
for var in text.get_variables(fcontents):
if var not in overrides:
overrides[var] = "{{%s}}" % var
fcontents = text.translate(fcontents, overrides, recursive=False)
contents += [_indent(fcontents)]
res += [_indent("\n".join(contents))]
return "\n".join(res)
_TEMPLATES = {}
def get(name):
"""Get a Template object by name."""
if name in _TEMPLATES:
res = _TEMPLATES[name]
else:
res = Template(name)
_TEMPLATES[name] = res
return res
def _get_all_templates():
"""Search for all possible template file descriptors."""
for path in SEARCH_PATH:
for file_path in glob.iglob(path + os.sep + "*.dsc"):
name = os.path.basename(file_path)[:-4]
get(name)
def get_description():
"""Get a short description of all available templates."""
_get_all_templates()
return "\n\n".join([tpl.get_short_description()
for tpl in six.itervalues(_TEMPLATES)]) | /sciexp2-expdef-2.0.13.tar.gz/sciexp2-expdef-2.0.13/sciexp2/expdef/templates.py | 0.645232 | 0.162579 | templates.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2009-2023, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import abc
import glob
import imp
import os
import shutil
import six
import weakref
import sciexp2.common.instance
from sciexp2.common.filter import *
from sciexp2.common import text
from sciexp2.common import utils
#: Paths to search for available execution systems.
#:
#: The order of the list establishes which execution system implementation will
#: be used in case it exists in more than one directory.
#:
#: Includes the current directory and the `system` directory shipped with
#: SciExp².
SEARCH_PATH = [
os.curdir,
os.path.dirname(__file__),
]
_DEVNULL = open("/dev/null", "w")
class SystemError (Exception):
"""Error loading system."""
def __init__(self, message):
Exception.__init__(self, message)
class SubmitArgsError (Exception):
"""Error translating job submission arguments."""
def __init__(self, variables):
Exception.__init__(
self,
"Found non-exported variables in job submission arguments: " +
", ".join(variables))
class System (six.with_metaclass(abc.ABCMeta)):
"""Abstract job manager.
Each system must implement the abstract methods defined in this class
and define two class attributes:
========= =======================================================
Name Description
========= =======================================================
`ASSUMES` List of variables that are assumed to be present in the
launchers instance group for the system to work.
`DEFINES` List of variables that the system internally defines and thus
must not be present in the launchers instance group.
========= =======================================================
See also
--------
compute_state
"""
ASSUMES = ["LAUNCHER", "DONE", "FAIL"]
DEFINES = ["_STATE", "LAUNCHER_BASE"]
def __init__(self, base_path, launchers, depends, submit_args):
"""
Parameters
----------
base_path : str
Base directory where launchers are located.
launchers : InstanceGroup
Group describing the launchers.
depends : sequence of str
Variable names to which jobs depend.
submit_args : sequence of str
Extra arguments to the job-submitting program.
"""
self._base = base_path
self._base_abs = os.path.realpath(self._base)
assert os.path.isdir(self._base_abs)
self._launchers = launchers
for assume in self.assumes():
if assume not in self._launchers and len(self._launchers) > 0:
raise ValueError("Variable '%s' must be present" % assume)
for define in self.defines():
if define in self._launchers:
raise ValueError("Variable '%s' must not be present" % define)
self._jobs = None
self._depends = set(depends)
self._submit_args = list(submit_args)
def get_relative_path(self, path, cwd=None):
"""Get path (relative to base) as relative to `cwd`."""
if cwd is None:
cwd = os.getcwd()
if not os.path.isabs(path):
path = os.path.join(self._base_abs, path)
return os.path.relpath(path, cwd)
def build(self, types, *filters):
"""Generate a sequence with the jobs matching the given criteria.
Parameters
----------
types : set
Set of states that the jobs must be on.
filters : list of filters
List of filters that the jobs must match.
See also
--------
Job
"""
self.compute_state()
build_filter = and_filters(*filters)
if len(types) > 0:
state_filter = " or ".join(["_STATE == '%s'" % state
for state in types
if state != "inverse"])
if "inverse" in types:
state_filter = "not (%s)" % state_filter
build_filter = and_filters(build_filter, state_filter)
if len(self._jobs) > 0:
build_filter.validate(set(self._jobs.variables()))
return self._jobs.select(build_filter)
else:
return sciexp2.common.instance.InstanceGroup()
@classmethod
def assumes(cls):
"""The set of variables that must be present on the launchers."""
return set(System.ASSUMES + cls.ASSUMES)
@classmethod
def defines(cls):
"""The set of variables that must not be present on the launchers."""
return set(System.DEFINES + cls.DEFINES)
@abc.abstractmethod
def compute_state(self):
"""Compute the current state of jobs.
The implementation must set the ``_jobs`` attribute with an
InstanceGroup of `Job` instances. This can be computed using the
contents of the ``_launchers`` attribute.
"""
pass
@staticmethod
def post_generate(base, path, instance, xlator):
"""Post-process the generation of file `path`."""
pass
class Job (six.with_metaclass(abc.ABCMeta, sciexp2.common.instance.Instance)):
"""Abstract job descriptor.
Each job must implement the abstract methods defined in this class.
See also
--------
state, submit, kill
"""
# job states
RUNNING = "running"
DONE = "done"
FAILED = "failed"
OUTDATED = "outdated"
NOTRUN = "notrun"
STATES = [
RUNNING,
DONE,
FAILED,
OUTDATED,
NOTRUN,
]
STATE_SHORT = {
RUNNING: u"\u2699",
DONE: u"\u2713",
FAILED: "x",
OUTDATED: "o",
NOTRUN: " ",
}
STATE_LONG = {
RUNNING: "Running",
DONE: "Done",
FAILED: "Failed",
OUTDATED: "Outdated",
NOTRUN: "Not run",
}
def __init__(self, system, state, instance):
"""
Parameters
----------
system : System
System for which this job is.
state : str
Execution state of the job.
instance : str
Launcher instance describing this job.
"""
sciexp2.common.instance.Instance.__init__(self, instance)
self["_STATE"] = state
self._system = weakref.proxy(system)
def __repr__(self):
return repr(sciexp2.common.instance.Instance(self))
@classmethod
def compute_state(cls, system, instance):
"""Generic job state computation.
Parameters
----------
system : System
System for which this job is being checked.
instance
Launcher instance describing a job.
Returns
-------
Generic job state according to the failed/done files; otherwise returns
`NOTRUN`.
"""
fail_path = instance["FAIL"]
if not os.path.isabs(fail_path):
fail_path = os.sep.join([system._base, fail_path])
if os.path.exists(fail_path):
return cls.FAILED
done_path = instance["DONE"]
if not os.path.isabs(done_path):
done_path = os.sep.join([system._base, done_path])
if not os.path.exists(done_path):
return cls.NOTRUN
done_mtime = os.stat(done_path).st_mtime
for dep in system._depends:
path = text.translate(dep, instance)
if path == "":
continue
path = utils.get_path(path)
if not os.path.isabs(path):
path = os.sep.join([system._base, path])
if not os.path.exists(path) or \
done_mtime < os.stat(path).st_mtime:
return cls.OUTDATED
return cls.DONE
@abc.abstractmethod
def state(self):
"""Return a string describing the job and its state."""
pass
@abc.abstractmethod
def submit(self, *args):
"""Submit a job to execution."""
pass
def _submit_args(self, args):
"""Return extra arguments for the job submitting program."""
instance = dict(self)
instance["LAUNCHER_BASE"] = self._system._base_abs
try:
return [text.translate(arg, instance)
for arg in self._system._submit_args + list(args)]
except text.VariableError as e:
raise SubmitArgsError(e.message)
@abc.abstractmethod
def kill(self, *args):
"""Kill a job from execution."""
pass
def _kill_args(self, args):
"""Return extra arguments for the job killing program."""
instance = dict(self)
instance["LAUNCHER_BASE"] = self._system._base_abs
try:
return [text.translate(arg, instance)
for arg in list(args)]
except text.VariableError as e:
raise SubmitArgsError(e.message)
def get(name):
"""Get an execution system implementation by name.
See also
--------
SEARCH_PATH
"""
try:
info = imp.find_module(name, SEARCH_PATH)
system = imp.load_module(name, *info)
except ImportError:
raise SystemError("Unknown system %r" % name)
try:
res = system.System
except AttributeError:
raise AttributeError("Does not look like an execution " +
"system implementation: %s" % name)
res.name = name
return res | /sciexp2-expdef-2.0.13.tar.gz/sciexp2-expdef-2.0.13/sciexp2/expdef/system/__init__.py | 0.741955 | 0.205416 | __init__.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2013-2023, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import collections
import contextlib
import multiprocessing
import multiprocessing.pool
from . import utils
#: Default amount of parallelism.
PARALLELISM = True
def get_parallelism(parallelism):
"""Compute the amount of available parallelism according to the system.
Parameters
----------
parallelism
Requested parallelism.
Notes
-----
The returned amount of parallelism depends on the value of `parallelism`:
================ ======================================================
Value Meaning
================ ======================================================
`None` Use `PARALLELISM` instead.
`True` Auto-detect as the number of cores in the system.
positive integer Use the given fixed amount.
negative integer Auto-detect as the number of cores in the system minus
the given value.
================ ======================================================
"""
if parallelism is None:
parallelism = PARALLELISM
if parallelism is True:
parallelism = multiprocessing.cpu_count()
elif isinstance(parallelism, int):
if parallelism == 0:
raise ValueError("Invalid parallelism setting: %s" % parallelism)
if parallelism < 0:
parallelism = multiprocessing.cpu_count() + parallelism
else:
raise TypeError("Invalid parallelism setting: %s" % parallelism)
return parallelism
#: Default amount of blocking.
BLOCKING = 50
#: Amount of blocking to use when the work length is unknown.
BLOCKING_UNKNOWN = 20
#: Maximum amount of blocking.
BLOCKING_MAX = 100
def get_blocking(blocking, work_len, parallelism=None):
"""Compute the amount of necessary blocking according to work length.
Blocking establishes the amount of work items that each worker receives at
a time. When using processes, this will reduce the costs of communication.
Parameters
----------
blocking
Amount of blocking.
work_len : int or None
Work length (unknown if `None`).
parallelism
Argument to `get_parallelism`.
Notes
-----
Argument `blocking` can mean different things depending on its value type:
================ ======================================================
Value Meaning
================ ======================================================
`None` Use `BLOCKING` instead.
`True` Evenly partition block with the amount of parallelism.
positive integer Use the given fixed amount.
positive float Use the given ratio of the given work elements, up to
`BLOCKING_MAX`.
================ ======================================================
If the work size is unknown, `True` or a float argument will revert to
`BLOCKING_UNKNOWN`.
"""
if blocking is None:
blocking = BLOCKING
if blocking is True:
if work_len is None:
blocking = BLOCKING_UNKNOWN
else:
parallelism = get_parallelism(parallelism)
blocking = work_len / parallelism
elif isinstance(blocking, float):
if work_len is None:
blocking = BLOCKING_UNKNOWN
else:
blocking = min(work_len * blocking, BLOCKING_MAX)
elif not isinstance(blocking, int):
raise TypeError("Invalid blocking setting: %s" % blocking)
return blocking
@contextlib.contextmanager
def configuration(**kwargs):
"""Context manager to temporarily override global parameters.
Parameters
----------
kwargs
Temporary values to global parameters.
Examples
--------
Temporarily override the default parallelism to all cores minus one and set
blocking to 100:
>>> def fadd (x) : return x + 1
>>> def fmul (x) : return x * 2
>>> with configuration(PARALLELISM = -1, BLOCKING = 100):
... s = p_imap(range(10000), fadd)
... res = p_imap(s, fmul) # doctest: +SKIP
This is equivalent to:
>>> s = p_imap(range(10000), fadd,
... parallelism = -1, blocking = 100) # doctest: +SKIP
>>> res = p_imap(s, fmul,
... parallelism = -1, blocking = 100) # doctest: +SKIP
"""
variables = ["PARALLELISM", "BLOCKING", "BLOCKING_UNKNOWN", "BLOCKING_MAX"]
backup = {}
new = {}
for var in variables:
backup[var] = globals()[var]
new[var] = kwargs.pop(var, backup[var])
utils.assert_kwargs(kwargs)
globals().update(new)
yield None
globals().update(backup)
def _p_init(work, parallelism, blocking):
if isinstance(work, collections.abc.Sized):
work_len = len(work)
else:
work_len = None
parallelism = get_parallelism(parallelism)
blocking = get_blocking(blocking, work_len, parallelism)
pool = multiprocessing.Pool(processes=parallelism)
return pool, blocking
def p_imap(work, func, parallelism=None, blocking=None):
"""Return a sequence with the result of mapping `func` to `work` in parallel.
This function uses processes.
Parameters
----------
work : sequence
Sequence of items to process in parallel.
func : callable
Function to apply on each element of `work`.
parallelism
Argument to `get_parallelism`.
blocking
Argument to `get_blocking`.
See also
--------
multiprocessing.Pool, multiprocessing.Pool.imap
"""
pool, blocking = _p_init(work, parallelism, blocking)
res = iter(pool.imap(func, work, chunksize=blocking))
pool.close()
return res
def p_imap_unordered(work, func, parallelism=None, blocking=None):
"""Return a sequence with the result of mapping `func` to `work` in parallel.
This function uses processes.
Parameters
----------
work : sequence
Sequence of items to process in parallel.
func : callable
Function to apply on each element of `work`.
parallelism
Argument to `get_parallelism`.
blocking
Argument to `get_blocking`.
See also
--------
multiprocessing.Pool, multiprocessing.Pool.imap_unordered
"""
pool, blocking = _p_init(work, parallelism, blocking)
res = iter(pool.imap_unordered(func, work, chunksize=blocking))
pool.close()
return res
def p_map(work, func, parallelism=None, blocking=None):
"""Return a list with the result of mapping `func` to `work` in parallel.
This function uses processes.
Parameters
----------
work : sequence
Sequence of items to process in parallel.
func : callable
Function to apply on each element of `work`.
parallelism
Argument to `get_parallelism`.
blocking
Argument to `get_blocking`.
See also
--------
multiprocessing.Pool, multiprocessing.Pool.map
"""
pool, blocking = _p_init(work, parallelism, blocking)
res = pool.map(func, work, chunksize=blocking)
pool.close()
return res
def _t_init(work, parallelism, blocking):
if isinstance(work, collections.abc.Sized):
work_len = len(work)
else:
work_len = None
parallelism = get_parallelism(parallelism)
blocking = get_blocking(blocking, work_len, parallelism)
pool = multiprocessing.pool.ThreadPool(processes=parallelism)
return pool, blocking
def t_imap(work, func, parallelism=None, blocking=None):
"""Return a sequence with the result of mapping `func` to `work` in parallel.
This function uses threads.
Parameters
----------
work : sequence
Sequence of items to process in parallel.
func : callable
Function to apply on each element of `work`.
parallelism
Argument to `get_parallelism`.
blocking
Argument to `get_blocking`.
See also
--------
multiprocessing.pool.ThreadPool, multiprocessing.pool.ThreadPool.imap
"""
pool, blocking = _t_init(work, parallelism, blocking)
res = iter(pool.imap(func, work, chunksize=blocking))
pool.close()
return res
def t_imap_unordered(work, func, parallelism=None, blocking=None):
"""Return a sequence with the result of mapping `func` to `work` in parallel.
This function uses threads.
Parameters
----------
work : sequence
Sequence of items to process in parallel.
func : callable
Function to apply on each element of `work`.
parallelism
Argument to `get_parallelism`.
blocking
Argument to `get_blocking`.
See also
--------
multiprocessing.pool.ThreadPool, multiprocessing.pool.ThreadPool.imap
"""
pool, blocking = _t_init(work, parallelism, blocking)
res = iter(pool.imap_unordered(func, work, chunksize=blocking))
pool.close()
return res
def t_map(work, func, parallelism=None, blocking=None):
"""Return a list with the result of mapping `func` to `work` in parallel.
This function uses threads.
Parameters
----------
work : sequence
Sequence of items to process in parallel.
func : callable
Function to apply on each element of `work`.
parallelism
Argument to `get_parallelism`.
blocking
Argument to `get_blocking`.
See also
--------
multiprocessing.pool.ThreadPool, multiprocessing.pool.ThreadPool.map
"""
pool, blocking = _t_init(work, parallelism, blocking)
res = pool.map(func, work, chunksize=blocking)
pool.close()
return res | /sciexp2-expdef-2.0.13.tar.gz/sciexp2-expdef-2.0.13/sciexp2/common/parallel.py | 0.849285 | 0.354266 | parallel.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2008-2023, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import collections.abc
import re
import linecache
def _re_match(value, pattern):
cre = re.compile(pattern)
return cre.match(value) is not None
class Filter:
"""Boolean expression to check against a dict-like object.
The filter contains an arbitrary Python expression, where every variable
will be taken from the dict we are matching the filter against.
Parameters
----------
expression : Filter or dict or str, optional
Expression to use in the filter.
Raises
------
SyntaxError
The expression is not valid.
Notes
-----
If `expression` is a dict-like object, it will define an expression that
exactly matches its items.
Every filter will have the following global names defined:
============================ ==========================
``re_match(var, str)`` Check if ``var`` matches the regular
expression ``str``.
============================ ==========================
See also
--------
validate
match
and_filters, or_filters
Examples
--------
Filters can be easily composed together:
>>> f1 = Filter("a < 3")
>>> f2 = Filter("b == 4")
>>> and_filters(f1, f2)
Filter("(a < 3) and (b == 4)")
>>> or_filters(f1, f2)
Filter("(a < 3) or (b == 4)")
Filter objects can be later matched against dict-like objects:
>>> f = Filter("a < 3 and b == 4")
>>> f.match(dict(a=2, b=4))
True
>>> f.match(dict(a=3, b=4))
False
Using a dict as an expression is equivalent to building a perfect match for
the dict's items:
>>> Filter({"VAR1": 1, "VAR2": 2})
Filter("VAR1 == 1 and VAR2 == 2")
"""
_GLOBALS = {"re_match": _re_match}
def __init__(self, expression=None):
if expression is None or expression == "":
expression = "True"
elif isinstance(expression, Filter):
# pylint: disable=protected-access
expression = expression._expression
elif isinstance(expression, collections.abc.Mapping):
keys = sorted(expression.keys())
expression = " and ".join(["%s == %r" % (key, expression[key])
for key in keys])
self._expression = expression
self._code_id = "<dynamic-%d>" % id(self._expression)
self._code = compile(self._expression, self._code_id, "eval")
linecache.cache[self._code_id] = (len(self._expression), None,
self._expression.split("\n"),
self._code_id)
def __del__(self):
if self._code_id in linecache.cache:
del linecache.cache[self._code_id]
def __str__(self):
"""Return a string representation of the filter."""
return self._expression
def __repr__(self):
return "Filter(\"%s\")" % str(self)
def validate(self, allowed):
"""Validate that variables in the filter are present in the given set.
Parameters
----------
allowed : set of variable names
Set of variable names to allow on the filter.
Raises
------
NameError
Filter contains a variable name not present in `allowed`.
"""
present = set(self._code.co_names)
missing = present - (set(allowed) | set(["re_match"]))
if missing:
missing = list(missing)
raise NameError("name %r is not allowed" % missing[0])
def match(self, source):
"""Check if the given `source` matches this filter.
Parameters
----------
source : dict-like
Dictionary to match this filter against.
Returns
-------
bool : Whether the match is positive or not.
Raises
------
NameError
Filter contains a variable name not present in `source`.
See also
--------
validate
"""
# pylint: disable=eval-used
return eval(self._code, dict(source), self._GLOBALS)
def and_filters(*filters):
"""Convenience function to *and* all `filters` together."""
filters = ["(%s)" % Filter(f) for f in filters]
expression = " and ".join(filters)
return Filter(expression)
def or_filters(*filters):
"""Convenience function to *or* all `filters` together."""
filters = ["(%s)" % Filter(f) for f in filters]
expression = " or ".join(filters)
return Filter(expression)
__all__ = [
"Filter", "and_filters", "or_filters",
] | /sciexp2-expdef-2.0.13.tar.gz/sciexp2-expdef-2.0.13/sciexp2/common/filter.py | 0.811116 | 0.495178 | filter.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2008-2023, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import os
import shutil
import signal
import subprocess
import tempfile
import functools
import collections
import weakref
import numpy as np
import six
from . import pp
from . import progress
# -----------------------------
def assert_kwargs(kwargs):
"""Raise an exception if extra keys are present."""
if kwargs:
extra = "s" if len(kwargs) > 1 else ""
raise TypeError("Unexpected argument%s: %s" % (extra,
", ".join(kwargs)))
# -----------------------------
def assert_dir(path):
"""Check that given directory exists, otherwise create it."""
if path != "" and not os.path.exists(path):
os.makedirs(path)
def assert_path(path):
"""Check that given path exists, otherwise create directories."""
if not path.endswith(os.sep):
path = os.path.dirname(path)
assert_dir(path)
def get_path(path):
"""Get path after expanding user and environment variables."""
path = os.path.expanduser(path)
return os.path.expandvars(path)
def get_file(path, mod="w"):
"""Open the given file, creating any intermediate directory."""
dir_path = os.path.dirname(path)
assert_dir(dir_path)
return open(path, mod)
def get_tmp_file(mode="w", delete=True):
"""Get a temporal file."""
return tempfile.NamedTemporaryFile(mode=mode, delete=delete)
with open(os.devnull, "w") as _null:
_HAVE_RSYNC = subprocess.call(["which", "rsync"], stdout=_null)
if _HAVE_RSYNC == 0:
def copy_path_rsync(path_from, path_to, preserve=True, dereference=False):
"""Copy contents using rsync."""
if os.path.isdir(path_from):
path_from = path_from + os.sep
assert_path(path_to)
else:
assert_path(os.path.dirname(path_to) + os.sep)
args = "-rptgoD"
if preserve:
args += "t"
if dereference:
args += "l"
else:
args += "L"
if subprocess.call(["rsync", args, path_from, path_to]) != 0:
raise OSError("Error copying files: %s -> %s" % (
path_from, path_to))
def _copy_path(*args, **kwargs):
copy_path_rsync(*args, **kwargs)
else:
def copy_path_shutil(path_from, path_to, preserve=True, dereference=False):
"""Copy contents using Python's shutil."""
if os.path.isdir(path_from):
# NOTE: will fail if destination already exists
path_from = path_from + os.sep
assert_path(path_to)
shutil.copytree(path_from, path_to, symlinks=not dereference)
else:
assert_path(os.path.dirname(path_to) + os.sep)
if os.path.islink(path_from):
link_to = os.readlink(path_from)
os.symlink(link_to, path_to)
else:
shutil.copy(path_from, path_to)
if preserve:
shutil.copymode(path_from, path_to)
def _copy_path(*args, **kwargs):
copy_path_shutil(*args, **kwargs)
def copy_path(path_from, path_to, preserve=True, dereference=False):
"""Copy files."""
_copy_path(path_from, path_to, preserve=preserve, dereference=dereference)
# -----------------------------
def str2num(arg):
"""Return numeric value of a string, if possible."""
# NOTE: StringConverter barks at non-numeric strings (str/bytes confusion)
try:
return np.lib.npyio.StringConverter().upgrade(arg)
except:
return arg
# -----------------------------
def _wraps(wrapped):
return functools.wraps(wrapped=wrapped,
assigned=['__doc__'])
class ViewError(Exception):
"""Invalid operation in `OrderedSet` view."""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class OrderedSet(collections.abc.MutableSet, pp.Pretty):
"""A mutable set preserving order of insertion.
.. todo::
All help should come from `~collections.abc.MutableSet` instead of
using `_wraps`.
"""
@_wraps(collections.abc.Container.__init__)
def __init__(self, iterable=None, view_able=False):
self._view_able = view_able
if self._view_able:
self._list = np.array([], dtype=object)
else:
self._list = []
self._set_methods(False)
self._base = None
self._views = {}
self._set = set()
if iterable is not None:
self |= iterable
def set_view_able(self, view_able):
"""Set whether this object can produce "views" from it.
Objects able to produce views have lower performance when adding new
elements to them.
See also
--------
OrderedSet.view
"""
if view_able != self._view_able:
if view_able:
self._list = np.array(self._list, dtype=object)
else:
if self._views:
raise ValueError(
"cannot disable 'view_able' when views already exist")
self._list = list(self._list)
self._view_able = view_able
self._set_methods(False)
def view(self, index):
"""Create a view (sub-set) of this object.
This object also becomes a view. Modifications to the elements of a view
will also take effect on all other views of the same object.
Parameters
----------
index : slice
See also
--------
OrderedSet.set_view_able
"""
if not self._view_able:
raise ValueError("the object is not 'view_able'")
if not isinstance(index, slice):
raise TypeError("view index must be a slice")
self._set_methods(True)
res = OrderedSet([], True)
# pylint: disable=protected-access
res._list = self._list[index]
for elem in res._list:
res._set.add(elem)
res._base = self
res._set_methods(True)
self._views[id(res)] = weakref.ref(res)
return res
def __del__(self):
if self._base is not None:
# pylint: disable=protected-access
del self._base._views[id(self)]
def _set_methods(self, is_view):
if self._view_able:
if is_view:
self._append = self._append_array_view
self._remove = self._remove_array_view
self._pop = self._pop_array_view
else:
self._append = self._append_array
self._remove = self._remove_array
self._pop = self._pop_array
else:
assert not is_view
self._append = self._append_list
self._remove = self._remove_list
self._pop = self._pop_list
def _append_list(self, value):
self._list.append(value)
def _remove_list(self, value):
if self._base is not None:
self._base.remove(value)
else:
self._list.remove(value)
for view in six.itervalues(self._views):
# pylint: disable=protected-access
view()._list.remove(value)
view()._set.remove(value)
def _pop_list(self, index):
self._list.pop(index)
def _append_array(self, value):
self._list = np.append(self._list, value)
def _remove_array(self, value):
self._list = np.delete(self._list, np.where(self._list == value))
def _pop_array(self, index):
self._list = np.delete(self._list, index)
# pylint: disable=no-self-use
def _append_array_view(self, value):
raise ViewError("cannot append to a view")
# pylint: disable=no-self-use
def _remove_array_view(self, value):
raise ViewError("cannot remove from a view")
# pylint: disable=no-self-use
def _pop_array_view(self, index):
raise ViewError("cannot pop from a view")
# pylint: disable=invalid-name
def _repr_pretty_(self, p, cycle):
with self.pformat(p, cycle):
p.pretty(list(self._list))
def __repr__(self):
return pp.Pretty.__repr__(self)
def get_index(self, index):
"""Get item at the 'index'th position."""
return self._list[index]
def copy(self):
"""Make a shallow copy of this `OrderedSet`."""
return OrderedSet(self, self._view_able)
def sorted(self, *args, **kwargs):
"""Same as `sort`, but returns a sorted copy."""
res = self.copy()
res.sort(*args, **kwargs)
return res
def sort(self, key=None, reverse=False):
"""Sort set in-place.
Follows the same semantics of Python's built-in `sorted`.
"""
if self._view_able:
contents = list(self._list)
else:
contents = self._list
contents.sort(key=key, reverse=reverse)
if self._view_able:
self._list[:] = contents
# Container
@_wraps(set.__contains__)
def __contains__(self, key):
return key in self._set
# Sized
@_wraps(set.__len__)
def __len__(self):
return len(self._list)
# Iterable
@_wraps(set.__iter__)
def __iter__(self):
return iter(self._list)
# MutableSet
# pylint: disable=missing-docstring
def add(self, key):
old_length = len(self._list)
self._set.add(key)
if len(self._set) != old_length:
try:
self._append(key)
except ViewError:
self._set.remove(key)
raise
add.__doc__ = collections.abc.MutableSet.add.__doc__
# pylint: disable=missing-docstring
def discard(self, key):
old_length = len(self._list)
self._set.remove(key)
if len(self._set) != old_length:
try:
self._remove(key)
except ViewError:
self._set.add(key)
raise
discard.__doc__ = collections.abc.MutableSet.discard.__doc__
discard.__doc__ += "\n\nThis operation has a cost of O(n)."
# pylint: disable=missing-docstring
@_wraps(collections.abc.MutableSet.pop)
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self._pop(-1) if last else self._pop(0)
self._set.remove(key)
return key
# Pickling
def __getstate__(self):
odict = self.__dict__.copy()
del odict["_append"]
del odict["_remove"]
del odict["_pop"]
del odict["_base"]
del odict["_views"]
return odict
def __setstate__(self, odict):
self.__dict__.update(odict)
self._base = None
self._views = {}
self._set_methods(False)
# -----------------------------
def _template_get_initial_dir(template, template_is_abs):
# pylint: disable=cyclic-import
from . import text
start_dir = ""
for part in template.split(os.sep):
if part == "":
continue
if start_dir == "" and not template_is_abs:
cur_dir = part
else:
cur_dir = os.sep.join([start_dir, part])
try:
text.translate(cur_dir, {})
except text.VariableError:
break
if os.path.isdir(cur_dir):
start_dir = cur_dir
else:
break
return start_dir
def find_files(template, path=None, absolute_path=False, sort=True):
"""Find files matching a given template.
Returns an 'InstanceGroup' with all paths of existing files matching the
given template. Each matching file path is an `Instance` with the extracted
variables in the `template`.
Parameters
----------
template : str
Template of file paths to find.
path : str, optional
On each resulting Instance, add a variable with the given name with the
file path.
absolute_path : bool, optional
Make the value in `path` absolute.
sort : bool, optional
Sort the file paths according to the alphanumeric order of each of the
variables in `template`, in that specific order.
Raises
------
ValueError
The variable in `path` is already present in `template`.
See Also
--------
sciexp2.common.text.extract
Argument `template` is interpreted following the extraction syntax.
Notes
-----
If `template` ends with ``/`` it will search for matching paths, and will
search for matching files otherwise.
Environment variables and user home directories in `template` will be expanded.
"""
# pylint: disable=cyclic-import
from . import text
from .instance import InstanceGroup, Instance
if not isinstance(template, six.string_types):
raise ValueError("Not an expression: " + template)
if path is not None and not isinstance(path, six.string_types):
raise ValueError("path must be either None or a string")
if path in text.get_variables(template):
raise ValueError("path variable is already present in template")
template_is_dir = template[-1] == "/" if template else False
template_is_abs = os.path.isabs(template)
template = get_path(template) + "$"
start_dir = _template_get_initial_dir(template, template_is_abs)
extractor = text.Extractor(template)
res = InstanceGroup()
def add(env, target_path):
# use numbers whenever possible (for later number-aware sorting)
for key, val in six.iteritems(env):
env[key] = str2num(val)
if path is not None:
if absolute_path:
target_path = os.path.abspath(target_path)
env[path] = target_path
res.add(Instance(env))
for dir_path, _, file_list in os.walk(start_dir):
if template_is_dir:
try:
env = extractor.extract(dir_path + os.path.sep)
except text.ExtractError:
pass
else:
add(env, dir_path)
else:
for file_path in file_list:
target_path = os.path.join(dir_path, file_path)
try:
env = extractor.extract(target_path)
except text.ExtractError:
pass
else:
add(env, target_path)
if sort:
# sort result according to file sorting
variables = text.get_variables(template)
res.sort(variables)
return res
# -----------------------------
def execute_with_sigint(cmd, **kwargs):
"""Execute a command and forward SIGINT to it.
Parameters
----------
cmd : list of string
Command to execute
kwargs : dict
Additional arguments to subprocess.Popen.
Returns
-------
Integer with the command's return code.
"""
preexec_fn = kwargs.pop("preexec_fn", None)
def preexec():
os.setpgrp()
if preexec_fn:
preexec_fn()
signals = [
("SIGINT", signal.SIGINT),
("SIGTERM", signal.SIGTERM),
("SIGKILL", signal.SIGKILL),
]
state = dict(proc=None,
error=False,
signal_idx=0)
def run():
if state["proc"] is None:
if not state["error"]:
# pylint: disable=subprocess-popen-preexec-fn
state["proc"] = subprocess.Popen(cmd, preexec_fn=preexec, **kwargs)
else:
return
state["proc"].wait()
def run_with_except(depth=0):
try:
run()
except KeyboardInterrupt:
state["error"] = True
info = signals[state["signal_idx"]]
progress.log(progress.LVL_NONE,
"WARNING: Interrupting child command with %s" % info[0])
state["proc"].send_signal(info[1])
if state["signal_idx"] < len(signals) - 1:
state["signal_idx"] += 1
run_with_except(depth + 1)
if depth == 0:
raise
run_with_except()
return state["proc"].returncode | /sciexp2-expdef-2.0.13.tar.gz/sciexp2-expdef-2.0.13/sciexp2/common/utils.py | 0.624064 | 0.166167 | utils.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2023, Lluís Vilanova"
__license__ = "GPL version 3 or later"
from collections import OrderedDict
try:
from collections.abc import Mapping
except:
pass
import pystache
import re
from .utils import OrderedSet
import six
import sys
class ParseError(Exception):
pass
class VariableError(Exception):
pass
class ExtractError(Exception):
pass
def _get_parsed_elems(parsed):
return parsed._parse_tree
def _parse(text, allow_nested=True, allow_inverted=True):
try:
pystache.render(text, {})
except pystache.parser.ParsingError as e:
_, _, exc_traceback = sys.exc_info()
new_e = ParseError(str(e))
six.reraise(new_e.__class__, new_e, exc_traceback)
parsed = pystache.parse(text)
elems = _get_parsed_elems(parsed)
if len(elems) == 0 and len(text) > 0:
raise ParseError("section start tag mismatch")
def traverse(elems, nested):
seen_node = False
for elem in elems:
if not isinstance(elem, six.string_types):
seen_node = True
if isinstance(elem, six.string_types):
pass
elif isinstance(elem, (pystache.parser._EscapeNode,
pystache.parser._ChangeNode)):
pass
elif isinstance(elem, pystache.parser._SectionNode):
nested = traverse(_get_parsed_elems(elem.parsed), True)
if nested and not allow_nested:
raise ParseError(
"nested tags not allowed in section %r" % elem.key)
elif isinstance(elem, pystache.parser._InvertedNode):
if not allow_inverted:
raise ParseError("inverted sections not allowed: %s" % elem.key)
nested = traverse(_get_parsed_elems(elem.parsed_section), True)
if nested and not allow_nested:
raise ParseError(
"nested tags not allowed in inverted section %r" % elem.key)
elif isinstance(elem, pystache.parser._PartialNode):
raise ParseError(
"partial tags not allowed")
else:
raise ParseError("tag not allowed %r" % elem.__class__)
return seen_node
traverse(elems, False)
return parsed
def get_variables(text, nested=False):
"""Get the variables referenced in the given text.
Parameters
----------
text : str
Text to get variables from.
nested : optional
Whether to allow nested variables. Can have values "all" for all the
variables, or "inner" for just the inner variables.
Examples
--------
>>> get_variables("{{a}}")
['a']
>>> get_variables("{{#a}} {{b}} {{/a}}", nested="inner")
['b']
>>> get_variables("{{#a}} {{b}} {{/a}}", nested="all")
['a', 'b']
"""
if nested not in [False, "all", "inner"]:
raise ValueError("invalid nested value:", nested)
parsed = _parse(text, allow_nested=bool(nested))
if not nested: # equivalent due to exception raised above
nested = "all"
def traverse(elems, variables):
added_variables = False
for elem in elems:
if isinstance(elem, pystache.parser._SectionNode):
traversed_variables = traverse(_get_parsed_elems(elem.parsed),
variables)
if nested == "all":
variables.add(elem.key)
added_variables = True
elif nested == "inner" and not traversed_variables:
variables.add(elem.key)
added_variables = True
elif isinstance(elem, pystache.parser._InvertedNode):
traversed_variables = traverse(_get_parsed_elems(elem.parsed_section),
variables)
if nested == "all":
variables.add(elem.key)
added_variables = True
elif nested == "inner" and not traversed_variables:
variables.add(elem.key)
added_variables = True
elif isinstance(elem, (pystache.parser._EscapeNode, pystache.parser._PartialNode)):
variables.add(elem.key)
added_variables = True
else:
assert isinstance(elem, six.string_types), elem
return added_variables
elems = _get_parsed_elems(parsed)
variables = set()
traverse(elems, variables)
return sorted(variables)
class Translator(object):
"""Translate a template text with given variables."""
def __init__(self, template):
"""
Parameters
----------
template : str
Template text to translate.
"""
self._template = template
self._parsed = _parse(self._template, allow_nested=True)
def identity(arg):
return arg
self._renderer = pystache.renderer.Renderer(search_dirs=[], file_extension=False,
partials=None, escape=identity,
missing_tags="strict")
def translate(self, env, recursive=True):
"""Apply translation with given variables.
Parameters
----------
env : dict
Mapping of variable names to their values.
recursive : bool, optional
Whether to apply translations recursively.
Examples
--------
You can perform simple text translations:
>>> t = Translator('Hello {{a}}')
>>> t.translate({'a': 'you'})
'Hello you'
>>> t.translate({'a': [1, 2]})
'Hello [1, 2]'
And also recursive ones:
>>> t.translate({'a': '{{b}}', 'b': 'them'})
'Hello them'
More complex cases like conditionals are also possible:
>>> t = Translator('{{#a}}is true{{/a}}{{^a}}is false{{/a}}')
>>> t.translate({'a': 1})
'is true'
>>> t.translate({'a': 0})
'is false'
Or even calls to functions (arguments are the unexpanded text on the template):
>>> Translator('{{a}}').translate({'a': lambda: 'value'})
'value'
>>> Translator('{{#a}}{{b}}{{/a}}').translate(
... {'a': lambda arg: 2*arg, 'b': 4})
'44'
>>> Translator('{{#a}}{{b}}{{/a}}').translate(
... {'a': lambda arg: " ".join(list(arg))})
'{ { b } }'
And expansion of nested variables with multiple values is also possible:
>>> Translator('{{#a}}A.B=={{b}} {{/a}}').translate({'a': [{'b': 1}, {'b': 2}]})
'A.B==1 A.B==2 '
"""
if not isinstance(env, Mapping):
raise TypeError("not a mapping: %r" % env)
template_new = self._template
parsed_new = self._parsed
while True:
template_old = template_new
parsed_old = parsed_new
try:
template_new = self._renderer.render(parsed_new, env)
except pystache.context.KeyNotFoundError as e:
_, _, exc_traceback = sys.exc_info()
new_e = VariableError("missing variable %s" % e.key)
six.reraise(new_e.__class__, new_e, exc_traceback)
except pystache.common.TemplateNotFoundError as e:
_, _, exc_traceback = sys.exc_info()
new_e = VariableError(str(e))
six.reraise(new_e.__class__, new_e, exc_traceback)
if not recursive:
break
elif template_old == template_new:
break
parsed_new = _parse(template_new, allow_nested=True)
return template_new
def translate_many(self, envs, recursive=True, ignore_variable_error=False,
with_envs=False):
"""Apply translation with given set of variables.
Parameters
----------
envs : sequence of dict
Sequence of variable names to value mappings to apply the
translation for.
recursive : bool, optional
Whether to apply translations recursively.
ignore_variable_error : bool, optional
Ignore translations for variable maps that have missing variables.
with_envs : bool, optional
Get the set of maps that led to each translation.
Returns
-------
list of str
Translations when ``with_envs`` is ``False``.
list of (str, [env])
Translations with their corresponding variable maps when
``with_envs`` is ``True``.
Notes
-----
The result is guaranteed to maintain the order of the elements of
`envs`.
Examples
--------
You can very easily translate a sequence of variable maps:
>>> t = Translator('Hello {{a}}')
>>> t.translate_many([{'a': 'you'}, {'a': 'them'}])
['Hello you', 'Hello them']
Multiple maps can also translate into the same text:
>>> t.translate_many([{'a': 'you'}, {'a': 'them', 'b': 1}, {'a': 'them', 'b': 2}])
['Hello you', 'Hello them']
But you can also get the maps that led to each translation:
>>> t = Translator('Hello {{a}}')
>>> translated = t.translate_many([{'a': 'you'}, {'a': 'them', 'b': 1},
... {'a': 'them', 'b': 2}], with_envs=True)
>>> translated == [('Hello you', [{'a': 'you'}]),
... ('Hello them', [{'a': 'them', 'b': 1},
... {'a': 'them', 'b': 2}])]
True
"""
if with_envs:
result = OrderedDict()
def add(key, val):
if key not in result:
result[key] = []
result[key].append(val)
else:
result_track = OrderedSet()
result = []
def add(key, val):
if key not in result_track:
result_track.add(key)
result.append(key)
for env in envs:
try:
text = self.translate(env)
except VariableError:
if not ignore_variable_error:
raise
else:
add(text, env)
if with_envs:
return list(result.items())
else:
return result
def translate(template, env, **kwargs):
"""Shorthand for ``Translator(template).translate(env, **kwargs)``."""
return Translator(template=template).translate(env=env, **kwargs)
def translate_many(template, envs, **kwargs):
"""Shorthand for ``Translator(template).translate_many(envs, **kwargs)``."""
return Translator(template=template).translate_many(envs=envs, **kwargs)
class Extractor(object):
"""Extract a dict with the variable values that match a given template.
Variables and sections on the template are used to define regular
expressions, following Python's `syntax
<http://docs.python.org/library/re.html#regular-expression-syntax>`_.
"""
def __init__(self, template):
"""
Parameters
----------
template : str
Template text to extract from.
"""
self._template = template
parsed = _parse(template, allow_nested=False, allow_inverted=False)
regex = ""
variables = {}
for elem in _get_parsed_elems(parsed):
if isinstance(elem, six.string_types):
regex += elem
elif isinstance(elem, pystache.parser._SectionNode):
if elem.key in variables:
raise ParseError(
"regex for variable %s has already been set: %s" % (
elem.key, variables[elem.key]))
elem_regex = _get_parsed_elems(elem.parsed)
if len(elem_regex) == 0:
raise ParseError(
"regex for variable %s cannot be empty" % elem.key)
elem_regex = elem_regex[0]
assert len(elem_regex) > 0, template
variables[elem.key] = elem_regex
regex += "(?P<%s>%s)" % (elem.key, elem_regex)
elif isinstance(elem, pystache.parser._EscapeNode):
if elem.key in variables:
regex += "(?P=%s)" % elem.key
else:
elem_regex = ".+"
variables[elem.key] = elem_regex
regex += "(?P<%s>%s)" % (elem.key, elem_regex)
else:
# silently ignore
pass
self._cre = re.compile(regex)
def extract(self, text):
"""Apply extraction to given text.
Parameters
----------
text : str
Text to extract from.
Examples
--------
You can perform simple text extractions, where variables correspond to
the simple regex ``.+``:
>>> e = Extractor('Hello {{a}}')
>>> e.extract('Hello world')
{'a': 'world'}
>>> e.extract('Hello 123!')
{'a': '123!'}
More complex regexes can be specified using section tags:
>>> Extractor('Hello {{#a}}[0-9]+{{/a}}.*').extract('Hello 123!')
{'a': '123'}
And using the same variable on multiple tags ensures they all match the
same contents:
>>> extracted = Extractor('{{#a}}[0-9]+{{/a}}.*{{a}}{{b}}').extract('123-123456')
>>> extracted == {'a': '123', 'b': '456'}
True
"""
match = self._cre.match(text)
if match is None:
raise ExtractError(
"could not extract variables from template %r (regex: %r)" % (
self._template, self._cre.pattern))
return match.groupdict()
def extract(template, text):
"""Shorthand for ``Extractor(template).extract(text)``."""
return Extractor(template).extract(text)
__all__ = [
"ParseError", "VariableError", "ExtractError",
"get_variables",
"Translator", "translate", "translate_many",
"Extractor", "extract",
] | /sciexp2-expdef-2.0.13.tar.gz/sciexp2-expdef-2.0.13/sciexp2/common/text.py | 0.569613 | 0.231788 | text.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2020, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import collections
import re
from . import kernel
def set_freq(shell, path="cpupower", ld_library_path="", freq="max"):
"""Set frequency scaling.
Parameters
----------
shell
Target shell.
path : str, optional
Path to cpupower tool. Default is use the cpupower tool in the PATH.
ld_library_path : str, optional
Library path to run cpupower tool. Default is use the system's library
path.
freq : str, optional
Frequency to set in GHz. Default is use maximum frequency.
Notes
-----
In some systems it might be necessary to boot the Linux kernel with command
line option "intel_pstate=disable" in order to support user frequency
settings.
"""
if freq == "max":
max_freq = shell.run([
"sh", "-c",
f"sudo LD_LIBRARY_PATH={ld_library_path} {path} frequency-info | grep 'hardware limits' | sed -e 's/.* - \\(.*\\) GHz/\\1/'"],
encoding="ascii")
freq = max_freq.output[:-1]
shell.run(["sudo",
f"LD_LIBRARY_PATH={ld_library_path}", path,
"-c", "all", "frequency-set", "--governor", "userspace"])
shell.run(["sudo",
f"LD_LIBRARY_PATH={ld_library_path}", path,
"-c", "all", "frequency-set", "--freq", f"{freq}GHz"])
def _get_mask(cpu_list):
mask = 0
for cpu in cpu_list:
mask += 1 << cpu
return mask
def set_irqs(shell, *irqs, **kwargs):
"""Make irqbalance ignore the given IRQs, and instead set their SMP affinity.
Parameters
----------
shell
Target system.
irqs
IRQ descriptors.
ignore_errors : bool, optional
Ignore errors when manually setting an IRQ's SMP affinity. Implies that
irqbalance will manage that IRQ. Default is False.
irqbalance_banned_cpus : list of int, optional
CPUs that irqbalance should not use for balancing.
irqbalance_args : list of str, optional
Additional arguments to irqbalance.
Each descriptor in `irqs` is a three-element tuple:
* Type: either ``irq`` for the first column in /proc/interrupts, or
``descr`` for the interrupt description after the per-CPU counts.
* Regex: a regular expression to apply to the fields above, or `True` to
apply to all values (a shorthand to the regex ".*"), or an `int` (a
shorthand to the regex "^int_value$").
* SMP affinity: list of cpu numbers to set as the IRQ's affinity; if `True`
is used instead, let irqbalance manage this IRQ.
All matching descriptors are applied in order for each IRQ. If no descriptor
matches, or the last matching descriptor has `True` as its affinity value,
the IRQ will be managed by irqbalance as before.
Returns
-------
The new irqbalance process.
"""
ignore_errors = kwargs.pop("ignore_errors", False)
irqbalance_args = kwargs.pop("irqbalance_args", [])
irqbalance_banned_cpus = kwargs.pop("irqbalance_banned_cpus", [])
irqbalance_banned_cpus_mask = _get_mask(irqbalance_banned_cpus)
if len(kwargs) > 0:
raise Exception("unknown argument: %s" % list(kwargs.keys())[0])
irqs_parsed = []
for arg_irq in irqs:
if len(arg_irq) != 3:
raise ValueError("wrong IRQ descriptor: %s" % repr(arg_irq))
irq_type, irq_re, irq_cpus = arg_irq
if isinstance(irq_re, int):
irq_re = "^%d$" % irq_re
if not isinstance(irq_re, bool) and not isinstance(irq_re, six.string_types):
raise TypeError("wrong IRQ descriptor regex: %s" % str(irq_re))
if not isinstance(irq_re, bool):
irq_re = re.compile(irq_re)
if (not isinstance(irq_cpus, bool) and (isinstance(irq_cpus, six.string_types) or
not isinstance(irq_cpus, collections.Iterable))):
raise TypeError("wrong IRQ descriptor CPU list: %s" % str(irq_cpus))
if irq_type not in ["irq", "descr"]:
raise ValueError("wrong IRQ descriptor type: %s" % str(irq_type))
irqs_parsed.append((irq_type, irq_re, irq_cpus))
irq_manual = []
irqbalance_banned = set()
cre = re.compile(r"(?P<irq>[^:]+):(?:\s+[0-9]+)+\s+(?P<descr>.*)")
with shell.open("/proc/interrupts") as f:
for line in f.read().split("\n"):
match = cre.match(line)
if match is None:
continue
irq = match.groupdict()["irq"].strip()
descr = match.groupdict()["descr"].strip()
cpus = True
for irq_type, irq_cre, irq_cpus in irqs_parsed:
if irq_type == "irq":
if irq_cre == True or irq_cre.match(irq):
cpus = irq_cpus
elif irq_type == "descr":
if irq_cre == True or irq_cre.match(descr):
cpus = irq_cpus
else:
assert False, irq_type
if cpus != True:
irq_manual.append((irq, cpus))
irqbalance_banned.add(irq)
for irq, cpus in irq_manual:
mask = _get_mask(cpus)
try:
shell.run(["sudo", "sh", "-c",
"echo %x > /proc/irq/%s/smp_affinity" % (irq, mask)])
except:
if ignore_errors:
irqbalance_banned.remove(irq)
else:
raise
shell.run(["sudo", "service", "irqbalance", "stop"])
proc = shell.spawn(["sudo", "IRQBALANCE_BANNED_CPUS=%x" % irqbalance_banned_cpus_mask,
"irqbalance"] + irqbalance_args +
["--banirq=%s" % banned
for banned in irqbalance_banned],
encoding="ascii")
return proc
def get_cpus(shell, node=None, package=None, core=None, pu=None, cgroup=None):
"""Get a set of all physical CPU indexes in the system.
It uses the hwloc-calc program to report available CPUs.
Parameters
----------
shell
Target shell.
node : int or str, optional
NUMA nodes to check. Defaults to all.
package : int or str, optional
Core packages to check on selected NUMA nodes. Defaults to all.
core : int or str, optional
Cores to check on selected core packages. Defaults to all.
pu : int or str, optional
PUs to check on selected cores. Defaults to all.
cgroup : str, optional
Cgroup path.
Returns
-------
set of int
Physical CPU indexes (as used by Linux).
Notes
-----
The combination of all the arguments is a flexible way to get all the
information in the system. Each of these arguments can have any of the forms
described under "Hwloc Indexes" in manpage hwloc(7). A few examples.
Second thread of each core:
>>> get_cpus(shell, pu=1)
First thread of each core in first NUMA node:
>>> get_cpus(node=0, pu=0)
Hardware threads in first core of the entire system:
>>> get_cpus(node=0, package=0, core=0)
"""
cmd = ["hwloc-calc", "--intersect", "PU",
"--li", "--po", ""]
def add_level(name, value):
if value is None:
value = "all"
cmd[-1] += ".%s:%s" % (name, str(value))
add_level("node", node)
add_level("package", package)
add_level("core", core)
add_level("pu", pu)
cmd[-1] = cmd[-1][1:]
if cgroup is not None:
cmd = ["sudo", "cgexec", "-g", cgroup] + cmd
res = shell.run(cmd, encoding="ascii")
line = res.output.split("\n")[0]
if line == "":
raise ValueError("hwloc-calc: %r" % res.stderr_output)
return [int(i) for i in res.output.split(",")]
__all__ = [
"set_freq", "set_irqs", "get_cpus",
] | /sciexp2-exprun-0.3.3.tar.gz/sciexp2-exprun-0.3.3/sciexp2/exprun/cpu.py | 0.815416 | 0.357343 | cpu.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2020, Lluís Vilanova"
__license__ = "GPL version 3 or later"
from contextlib import contextmanager
import joblib
@contextmanager
def step(message, logger=print):
"""Show simple progress messages around a piece of code.
Parameters
----------
message : str
Message to print.
logger : function, optinal
Logging function. Defaults to `print`.
Examples
--------
>>> with step("Doing something")
print("some text")
Doing something...
some text
Doing something... done
"""
logger(message, "...")
yield
logger(message, "... done")
class threaded(object):
"""Context manager to run functions in parallel using threads.
Examples
--------
Run two processes in parallel and wait until both are finished:
>>> with step("Running in parallel"), threaded() as t:
@t.start
def f1():
shell = LocalShell()
shell.run(["sleep", "2"])
print("f1")
@t.start
def f2():
shell = LocalShell()
shell.run(["sleep", "1"])
print("f2")
Running in parallel...
f2
f1
Running in parallel... done
"""
def __init__(self, n_jobs=None):
if n_jobs is None:
n_jobs = -1
self._n_jobs = n_jobs
self._jobs = []
self.result = None
def __enter__(self):
return self
def __exit__(self, *args):
pool = joblib.Parallel(backend="threading", n_jobs=self._n_jobs)
self.result = pool(joblib.delayed(job, check_pickle=False)(*args, **kwargs)
for job, args, kwargs in self._jobs)
def start(self, target):
"""Decorator to start a function on a separate thread."""
self._jobs.append((target, [], {}))
def start_args(self, *args, **kwargs):
"""Callable decorator to start a function on a separate thread.
Examples
--------
>>> with threaded() as t:
@t.start_args(1, b=2)
def f(a, b):
print(a, b)
1, 2
"""
def wrapper(target):
self._jobs.append((target, args, kwargs))
return wrapper
__all__ = [
"step", "threaded",
] | /sciexp2-exprun-0.3.3.tar.gz/sciexp2-exprun-0.3.3/sciexp2/exprun/util.py | 0.775095 | 0.168446 | util.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2020, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import logging
from . import wait
logger = logging.getLogger(__name__)
def check_version(shell, version, fail=True):
"""Check that a specific linux kernel version is installed.
Parameters
----------
shell
Target shell.
version : str
Target kernel version.
fail : bool, optional
Whether to raise an exception when a different version is
installed. Default is True.
Returns
-------
bool
Whether the target kernel version is installed.
"""
res = shell.run(["uname", "-r"])
current = res.output.split("\n")[0]
if current == version:
return True
else:
if fail:
raise Exception("Invalid kernel version: target=%s current=%s" % (version, current))
return False
def install_version(shell, version, package_base):
"""Install and reboot into a given linux kernel version if it is not the current.
Parameters
----------
shell
Target shell.
version : str
Target kernel version.
package_base : str
Base directory in target shell where kernel packages can be installed
from.
"""
if check_version(shell, version, fail=False):
return
for name in ["linux-image-%(v)s_%(v)s-*.deb",
"linux-headers-%(v)s_%(v)s-*.deb",
"linux-libc-dev_%(v)s-*.deb"]:
name = os.path.join(package_base, name % {"v": version})
res = shell.run(["sh", "-c", "ls %s" % name])
files = res.output.split("\n")
for path in files:
if path == "":
continue
logger.warn("Installing %s..." % path)
shell.run(["sudo", "dpkg", "-i", path])
res = shell.run(["grep", "-E", "menuentry .* %s" % version, "/boot/grub/grub.cfg"])
grub_ids = res.output.split("\n")
pattern = r" '([a-z0-9.-]+-%s-[a-z0-9.-]+)' {" % re.escape(version)
grub_id = re.search(pattern, grub_ids[0]).group(1)
with step("Updating GRUB %s..." % path, logger=logger.warn):
shell.run(["sudo", "sed", "-i", "-e",
"s/^GRUB_DEFAULT=/GRUB_DEFAULT=\"saved\"/",
"/etc/default/grub"])
shell.run(["sudo", "update-grub"])
shell.run(["sudo", "grub-set-default", grub_id])
with step("Rebooting into new kernel...", logger=logger.warn):
shell.run(["sudo", "reboot"], allow_error=True)
wait.ssh(shell)
check_version(shell, version)
def check_cmdline(shell, arg):
"""Check the linux kernel was booted with the given commandline.
Parameters
----------
shell
Target shell.
arg : str
Command line argument to check.
"""
shell.run(["grep", arg, "/proc/cmdline"])
def check_module_param(shell, module, param, value, fail=True):
"""Check that a linux kernel module was loaded with the given parameter value.
Parameters
----------
shell
Target shell.
module : str
Module name.
param : str
Module name.
value
Module value (will be coverted to str).
fail : bool, optional
Raise an exception if the value is not equal. Default is True.
Returns
-------
bool
Whether the given kernel module was loaded with the given parameter
value.
"""
with shell.open("/sys/module/%s/parameters/%s" % (module, param), "r") as f:
f_val = f.read().split("\n")[0]
if f_val != value:
if fail:
raise Exception("invalid kernel parameter value: target=%s current=%s" % (value, f_val))
return False
else:
return True
__all__ = [
"check_version", "install_version", "check_cmdline",
"check_module_param",
] | /sciexp2-exprun-0.3.3.tar.gz/sciexp2-exprun-0.3.3/sciexp2/exprun/kernel.py | 0.780244 | 0.165593 | kernel.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2020, Lluís Vilanova"
__license__ = "GPL version 3 or later"
from . import spur
def install(shell, package):
"""Install given `package` using `shell`."""
if spur.is_ssh_shell(shell):
hostname = shell.hostname
else:
hostname = "localhost"
shell.run([
"bash", "-c",
"dpkg -s %s >/dev/null 2>&1 || sudo apt-get install -y %s" % (package,
package),
])
def install_deps(shell):
"""Install all needed system packages.
Must be called on a local shell before using other functions that require a
shell, and before using other functions through the same shell.
Parameters
----------
shell
Target system.
"""
install(shell, "cgroup-tools")
install(shell, "hwloc")
install(shell, "rsync")
install(shell, "netcat-traditional")
install(shell, "psmisc")
install(shell, "util-linux")
def rsync(src_shell, src_path, dst_shell, dst_path, run_shell=None, args=[]):
"""Synchronize two directories using rsync.
Parameters
----------
src_shell
Source shell.
src_path
Source directory.
dst_shell
Destination shell.
dst_path
Destination directory.
run_shell : optional
Shell where to run rsync. Default is local machine.
args : list of str, optional
Additional arguments to rsync. Default is none.
"""
if (not spur.is_local_shell(src_shell) and not spur.is_local_shell(dst_shell) and
run_shell is not src_shell and run_shell is not dst_shell):
raise Exception("rsync cannot work with two remote shells")
if run_shell is None:
run_shell = spur.LocalShell()
ssh_port = 22
cmd_pass = []
if spur.is_local_shell(src_shell) or run_shell is src_shell:
cmd_src = [src_path]
else:
ssh_port = src_shell._port
if src_shell._password is not None:
cmd_pass = ["sshpass", "-p", src_shell._password]
cmd_src = ["%s@%s:%s" % (src_shell.username, src_shell.hostname, src_path)]
if spur.is_local_shell(dst_shell) or run_shell is dst_shell:
cmd_dst = [dst_path]
else:
ssh_port = dst_shell._port
if dst_shell._password is not None:
cmd_pass = ["sshpass", "-p", dst_shell._password]
cmd_dst = ["%s@%s:%s" % (dst_shell.username, dst_shell.hostname, dst_path)]
cmd = []
cmd += cmd_pass
cmd += ["rsync", "-az"]
cmd += ["-e", "ssh -p %d -o StrictHostKeyChecking=no" % ssh_port]
cmd += cmd_src
cmd += cmd_dst
cmd += args
run_shell.run(cmd) | /sciexp2-exprun-0.3.3.tar.gz/sciexp2-exprun-0.3.3/sciexp2/exprun/files.py | 0.560012 | 0.215846 | files.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2020, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import re
import io
import time
from . import spur
def run(shell, *args, **kwargs):
"""Run command with a timeout.
Parameters
----------
shell
Shell used to run given command.
timeout : int, optional
Timeout before erroring out (in seconds). Default is no timeout.
rerun_error : bool, optional
Rerun command every time it fails. Default is False.
args, kwargs
Paramaters to the shell's spawn method.
Returns
-------
spur.ExecutionResult
"""
timeout = kwargs.pop("timeout", 0)
rerun_error = kwargs.pop("rerun_error", False)
allow_error = kwargs.pop("allow_error", False)
proc = None
t_start = time.time()
while True:
t_now = time.time()
if t_now - t_start > timeout > 0:
raise Exception("Wait timed out" + repr((t_now - t_start, timeout)))
if proc is None:
proc = shell.spawn(*args, allow_error=True, **kwargs)
if proc.is_running():
time.sleep(2)
else:
res = proc.wait_for_result()
if res.return_code == 0:
return res
if not allow_error:
if rerun_error:
proc = None
time.sleep(2)
else:
raise res.to_error()
else:
return res
def connection(shell, address, port, timeout=0):
"""Wait until we can connect to given address/port."""
cmd = ["sh", "-c", "echo | nc %s %d" % (address, port)]
run(shell, cmd, timeout=timeout, rerun_error=True)
def ssh(shell, timeout=0):
"""Wait until we can ssh through given shell."""
if spur.is_local_shell(shell):
return
local = spur.LocalShell()
cmd = [
# pylint: disable=protected-access
"sshpass", "-p", shell._password,
"ssh",
"-o", "ConnectTimeout=1",
"-o", "StrictHostKeyChecking=no",
# pylint: disable=protected-access
"-p", str(shell._port), shell.username+"@"+shell.hostname,
"true",
]
run(local, cmd, timeout=timeout, rerun_error=True)
def print_stringio(obj, file=None):
"""Print contents of a StringIO object as they become available.
Useful in combination with `stringio` to print an output while processing
it.
Parameters
----------
obj : StringIO
StringIO object to print.
file : file or function, optional
File or function to print object's contents. Defaults to stdout.
Examples
--------
>>> stdout = StringIO.StringIO()
>>> thread.start_new_thread(print_stringio, (stdout,))
>>> shell.run(["sh", "-c", "sleep 1 ; echo start ; sleep 2; echo end ; sleep 1"],
... stdout=stdout)
start
end
See also
--------
stringio
"""
if not isinstance(obj, io.StringIO):
raise TypeError("expected a StringIO object")
if callable(file):
def flush_file():
pass
print_file = file
else:
def flush_file():
if file is not None:
file.flush()
def print_file(message):
print(message, end="", file=file)
seen = 0
while True:
time.sleep(0.5)
contents = obj.getvalue()
missing = contents[seen:]
if missing:
print_file(missing)
flush_file()
seen += len(missing)
def stringio(obj, pattern, timeout=0):
"""Wait until a StringIO's contents match the given regex.
Useful to trigger operations when a process generates certain output.
Examples
--------
Count time between the "start" and "end" lines printed by a process:
>>> stdout = io.StringIO()
>>> def timer(obj):
stringio(obj, "^start$")
t_start = time.time()
stringio(obj, "^end$")
t_end = time.time()
print("time:", int(t_end - t_start))
>>> thread.start_new_thread(timer, (stdout,))
>>> shell.run(["sh", "-c", "sleep 1 ; echo start ; sleep 2; echo end ; sleep >>> 1"],
... stdout=stdout)
time: 2
See also
--------
print_stringio
"""
if not isinstance(obj, io.StringIO):
raise TypeError("expected a StringIO object")
cre = re.compile(pattern, re.MULTILINE)
t_start = time.time()
while True:
t_now = time.time()
if t_now - t_start > timeout > 0:
raise Exception("Wait timed out" + repr((t_now - t_start, timeout)))
time.sleep(0.5)
contents = obj.getvalue()
match = cre.findall(contents)
if match:
return
__all__ = [
"run", "connection", "ssh", "print_stringio", "stringio",
] | /sciexp2-exprun-0.3.3.tar.gz/sciexp2-exprun-0.3.3/sciexp2/exprun/wait.py | 0.738292 | 0.18352 | wait.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2020, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import collections
import os
import re
import six
# pylint: disable=redefined-builtin
def get_tids(shell, pid, filter=None):
"""Get ids of all threads in a given process.
Parameters
----------
shell
Target shell.
pid : int
Target process pid.
filter : str or cre, optional
Return pids that match given filter in process name. Default is all
pids.
Returns
-------
list of int
List of the selected process pids.
Notes
-----
When using a string for `filter` it will simply check it is part of the
process name.
"""
pids = shell.run(["ps", "H", "-o", "tid comm", str(pid)], encoding="utf-8")
lines = pids.output.split("\n")
res = []
for line in lines[1:]:
line = line.strip()
if line == "":
continue
pid, name = line.split(" ", 1)
if filter:
if isinstance(filter, six.string_types):
if filter not in name:
continue
else:
if not filter.match(name):
continue
res.append(int(pid))
return res
def pin(shell, pid, cpus, **shell_kwargs):
"""Pin pid to given physical CPU list.
Parameters
----------
shell
Target shell.
pid : int
Target pid or tid to pin.
cpus : list of int
Physical CPUs to pin the pid to.
shell_kwargs : optional
Arguments to `shell.run`
"""
shell.run(["sudo", "taskset", "-p",
"-c", ",".join(str(c) for c in cpus), str(pid)],
**shell_kwargs)
def cgroup_create(shell, controller, path, **kwargs):
"""Create a cgroup for given subsystem.
Parameters
----------
shell
Target shell.
controller : str
Cgroup controller to configure.
path : str
New cgroup path.
kwargs : dict
Controller parameters to set. Lists are comma-concatenated, all elements
are transformed to str.
"""
shell.run(["sudo", "cgcreate", "-g", controller+":"+path])
for key, val in kwargs.items():
if isinstance(val, six.string_types) or not isinstance(val, collections.Iterable):
val = [val]
val = ",".join(str(v) for v in val)
shell.run(["sudo", "cgset", "-r", "%s.%s=%s" % (controller, key, val), path])
def cgroup_pids(shell, path=None):
"""Get pids in given cgroup path.
Parameters
----------
shell
Target shell.
path : str, optional
Cgroup path to analyze (defaults to entire system).
Returns
-------
list of int
Pids in the given cgroup.
"""
res = set()
base = "/sys/fs/cgroup"
if path is None:
path = ""
cre = re.compile(os.path.join(base, "[^/]*", path))
proc = shell.run(["find", base, "-name", "tasks"], encoding="ascii")
for filepath in proc.output.split("\n"):
if cre.match(os.path.dirname(filepath)):
# pylint: disable=invalid-name
with shell.open(filepath, "r") as f:
pids = (int(pid) for pid in f.read().split("\n")
if pid != "")
res.update(set(pids))
return list(res)
def cgroup_move(shell, controller, path, pids):
"""Move pids to a cgroup.
Parameters
----------
shell
Target shell.
controller : str
Cgroup controller.
path : str
Cgroup path.
pids : pid or list of pid
Pids to move into the cgroup. All elements are transformed to str.
Notes
-----
If you move the process that is serving this shell, you might have to
reconnect the shell to continue using it.
"""
if isinstance(pids, six.string_types) or not isinstance(pids, collections.Iterable):
pids = [pids]
pids_str = " ".join(str(p) for p in pids if str(p) != "")
shell.run([
"sh", "-c",
f"for p in {pids_str}; do sudo cgclassify -g {controller}:{path} $p || true; done"])
__all__ = [
"get_tids", "pin",
"cgroup_create", "cgroup_pids", "cgroup_move",
] | /sciexp2-exprun-0.3.3.tar.gz/sciexp2-exprun-0.3.3/sciexp2/exprun/process.py | 0.808748 | 0.251947 | process.py | pypi |
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2020, Lluís Vilanova"
__license__ = "GPL version 3 or later"
import atexit
import collections
import logging
import os
import signal
import sys
import threading
import time
import traceback
import six
import spur
import spur.ssh
_LOGGER = logging.getLogger(__name__)
def is_local_shell(shell):
"""Whether the given shell is a `spur.LocalShell` or derivative."""
return isinstance(shell, spur.LocalShell)
def is_ssh_shell(shell):
"""Whether the given shell is a `spur.SshShell` or derivative."""
return isinstance(shell, spur.SshShell)
# Patch spur to update the _is_killed attribute when sending signals
def _patch_send_signal(func):
def send_signal_wrapper(self, signum):
if signum in [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM, signal.SIGKILL]:
# pylint: disable=protected-access
self._is_killed = True
shell, kill_args = self._kill
if kill_args:
cmd = []
for arg in kill_args:
if isinstance(arg, with_pid):
cmd.append(arg(self.pid))
elif isinstance(arg, with_signum):
cmd.append(arg(signum))
else:
cmd.append(arg)
shell.run(cmd)
else:
return func(self, signum)
return send_signal_wrapper
spur.local.LocalProcess.send_signal = _patch_send_signal(spur.local.LocalProcess.send_signal)
spur.ssh.SshProcess.send_signal = _patch_send_signal(spur.ssh.SshProcess.send_signal)
# Monitor background processes for failures, so we can error out early
_EXITING = False
_LOCK = threading.RLock()
def _kill_all():
# pylint: disable=global-statement
global _EXITING
_EXITING = True
# pylint: disable=protected-access
LocalShell._atexit_cb()
# pylint: disable=protected-access
SshShell._atexit_cb()
atexit.register(_kill_all)
# pylint: disable=invalid-name,too-few-public-methods
class with_pid:
"""Decorator to define a kill argument that takes the process' pid.
To be used as an element in the `kill` argument to a shell's `run` or
`spawn` method::
shell.run(["sudo", "program"], kill=["sudo", "kill", with_pid()])
Can be used in three ways, depending on the type of `func`:
- `None`: replace with the stringified process pid.
- `str`: format with the process' pid on the ``pid`` key.
- otherwise: call with the pid as an argument.
"""
def __init__(self, func=None):
self._func = func
def __call__(self, pid):
if self._func is None:
return str(pid)
if isinstance(self._func, six.string_types):
return self._func.format(pid=pid)
return self._func(pid)
# pylint: disable=invalid-name,too-few-public-methods
class with_signum:
"""Decorator to define a kill argument that takes the user's signal number.
To be used as an element in the `kill` argument to a shell's `run` or
`spawn` method::
shell.run(["sudo", "program"], kill=["sudo", "kill", with_signum()])
Can be used in three ways, depending on the type of `func`:
- `None`: replace with the stringified signal number.
- `str`: format with the signal number on the ``signum`` key.
- otherwise: call with the signal number as an argument.
"""
def __init__(self, func=None):
self._func = func
def __call__(self, signum):
if self._func is None:
return str(signum)
if isinstance(self._func, six.string_types):
return self._func.format(signum=signum)
return self._func(signum)
def _print_traceback(cmd_msg, stack_info=None):
if stack_info is None:
stack_info = traceback.extract_stack()
stack_idx = 0 if stack_info[0][2] == "<module>" else 6
print("Traceback (most recent call last):")
msg = traceback.format_list(stack_info[stack_idx:-1])
print("".join(msg), end="")
exc_type, exc_value, tb = sys.exc_info()
info = traceback.extract_tb(tb)
msg = traceback.format_list(info)
print("".join(msg), end="")
print("%s.%s: %s" % (exc_type.__module__, exc_type.__name__, exc_value))
print("command:", cmd_msg)
def _watchdog_thread(shell, obj, cmd_msg, exit_on_error):
stack_info = traceback.extract_stack()
def watchdog():
while obj.is_running():
time.sleep(1)
try:
obj.wait_for_result()
# pylint: disable=bare-except
except:
# pylint: disable=protected-access
shell._child_remove(obj)
_LOGGER.info("- %s", cmd_msg)
# pylint: disable=protected-access
if not obj._is_killed and not _EXITING:
_print_traceback(cmd_msg, stack_info)
if exit_on_error:
_kill_all()
os._exit(1)
else:
# pylint: disable=protected-access
shell._child_remove(obj)
_LOGGER.info("- %s", cmd_msg)
thread = threading.Thread(target=watchdog)
thread.daemon = True
thread.start()
# pylint: disable=function-redefined
class LocalShell(spur.LocalShell):
"""An extended version of `spur.LocalShell`.
It will properly kill all created processes when exiting.
The `run` and `spawn` methods have two new arguments:
- ``exit_on_error``: bool, optional
Whether to exit the program when the process fails to execute.
- ``kill``: list of str, optional
Command to execute when killing the process. Useful when process is
run with sudo.
"""
__CHILDREN = collections.OrderedDict()
def _child_add(self, obj):
with _LOCK:
LocalShell.__CHILDREN[obj] = None
@classmethod
def _child_remove(cls, obj):
with _LOCK:
cls.__CHILDREN.pop(obj, None)
# pylint: disable=arguments-differ
def run(self, *args, **kwargs):
process = self.spawn(*args, **kwargs, exit_on_error=False)
result = process.wait_for_result()
return result
def spawn(self, *args, **kwargs):
exit_on_error = kwargs.pop("exit_on_error", True)
kill = kwargs.pop("kill", None)
cmd = args[0]
cmd_msg = " ".join(cmd)
_LOGGER.info("+ %s", cmd_msg)
try:
obj = spur.LocalShell.spawn(self, *args, **kwargs, store_pid=True)
# pylint: disable=bare-except
except:
if exit_on_error:
stack_info = traceback.extract_stack()
_print_traceback(cmd_msg, stack_info)
_kill_all()
# pylint: disable=protected-access
os._exit(1)
else:
raise
else:
obj._is_killed = False # pylint: disable=protected-access
obj._kill = (self, kill) # pylint: disable=protected-access
self._child_add(obj)
_watchdog_thread(self, obj, cmd_msg, exit_on_error)
return obj
@classmethod
def _atexit_cb(cls):
while True:
with _LOCK:
if not cls.__CHILDREN:
return
child = next(iter(cls.__CHILDREN.keys()))
# pylint: disable=protected-access
child._is_killed = True
if child.is_running():
try:
child.send_signal(signal.SIGTERM)
child.wait_for_result()
# pylint: disable=bare-except
except:
pass
# pylint: disable=protected-access
cls._child_remove(child)
class SshShell(spur.SshShell):
"""An extended version of `spur.SshShell`.
It will properly kill all created processes when exiting.
The shell object has two new members:
- ``hostname``: str
Target host name.
- ``username``: str
Target user name.
The `run` and `spawn` methods have two new arguments:
- ``exit_on_error``: bool, optional
Whether to exit the program when the process fails to execute.
- ``kill``: list of str, optional
Command to execute when killing the process. Useful when process is
run with sudo.
"""
__CHILDREN = collections.OrderedDict()
def _child_add(self, obj):
with _LOCK:
SshShell.__CHILDREN[obj] = None
@classmethod
def _child_remove(cls, obj):
with _LOCK:
cls.__CHILDREN.pop(obj, None)
def __init__(self, *args, **kwargs):
spur.SshShell.__init__(self, *args, **kwargs)
self.hostname = self._hostname
self.username = self._username
# pylint: disable=arguments-differ
def run(self, *args, **kwargs):
process = self.spawn(*args, **kwargs, exit_on_error=False)
result = process.wait_for_result()
return result
# pylint: disable=arguments-differ
def spawn(self, *args, **kwargs):
exit_on_error = kwargs.pop("exit_on_error", True)
kill = kwargs.pop("kill", None)
cmd = args[0]
cmd_msg = "ssh -p %d %s@%s %s" % (self._port, self.username, self.hostname, " ".join(cmd))
_LOGGER.info("+ %s", cmd_msg)
try:
obj = spur.SshShell.spawn(self, *args, **kwargs, store_pid=True)
# pylint: disable=bare-except
except:
if exit_on_error:
stack_info = traceback.extract_stack()
_print_traceback(cmd_msg, stack_info)
_kill_all()
# pylint: disable=protected-access
os._exit(1)
else:
raise
else:
obj._is_killed = False # pylint: disable=protected-access
obj._kill = (self, kill) # pylint: disable=protected-access
self._child_add(obj)
_watchdog_thread(self, obj, cmd_msg, exit_on_error)
return obj
@classmethod
def _atexit_cb(cls):
while True:
with _LOCK:
if not cls.__CHILDREN:
return
child = next(iter(cls.__CHILDREN.keys()))
# pylint: disable=protected-access
child._is_killed = True
if child.is_running():
try:
child.send_signal(signal.SIGTERM)
child.wait_for_result()
# pylint: disable=bare-except
except:
pass
# pylint: disable=protected-access
cls._child_remove(child)
def get_shell(server, user=None, password=None, port=22):
"""Get a new shell.
If `server` is a spur shell, return that instead.
Parameters
----------
server : str or object
user : str, optional
password : str, optional
port : int, optional
"""
if is_ssh_shell(server) or is_local_shell(server):
if is_ssh_shell(server):
server = server.hostname
else:
server = "localhost"
if user is None:
user = server.username
if password is None:
password = server._password
return SshShell(hostname=server,
username=user,
password=password,
port=port,
missing_host_key=spur.ssh.MissingHostKey.accept)
__all__ = [
"is_local_shell", "is_ssh_shell", "get_shell",
"with_pid",
]
__all__ += spur.__all__ | /sciexp2-exprun-0.3.3.tar.gz/sciexp2-exprun-0.3.3/sciexp2/exprun/spur.py | 0.561696 | 0.15746 | spur.py | pypi |
<p align="center">
<img src="https://raw.githubusercontent.com/SciFin-Team/SciFin/master/docs/logos/logo_scifin_github.jpg" width=400 title="hover text">
</p>
# SciFin
SciFin is a python package for Science and Finance.
## Summary
The SciFin package is a Python package designed to gather and develop methods for scientific studies and financial services. It originates from the observation that numerous methods developed in scientific fields (such as mathematics, physics, biology and climate sciences) have direct applicability in finance and that, conversely, multiple methods developed in finance can benefit science.
The development goal of this package is to offer a toolbox that can be used both in research and business. Its purpose is not only to bring these fields together, but also to increase interoperability between them, helping science turn into business and finance to get new insights from science. Some functions are thus neutral to any scientific or economical fields, while others are more specialized to precise tasks. The motivation behind this design is to provide tools that perform advanced tasks while remaining simple (not depending on too many parameters).
## Table of Contents
- **[Development Stage](#development-stage)**<br>
- **[Installation](#installation)**<br>
- **[Usage](#usage)**<br>
- **[Contributing](#contributing)**<br>
- **[Credits](#credits)**<br>
- **[License](#license)**<br>
- **[Contacts](#contacts)**<br>
## Development Stage
The current development is focused on the following topics:
| Subpackage | Short Description | Development Stage |
| :-----: | :-----: | :-----: |
| [`classifier`](https://github.com/SciFin-Team/SciFin/tree/master/scifin/classifier) | classification techniques | ■ □ □ □ □ |
| [`fouriertrf`](https://github.com/SciFin-Team/SciFin/tree/master/scifin/fouriertrf) | Fourier transforms | ■ □ □ □ □ |
| [`geneticalg`](https://github.com/SciFin-Team/SciFin/tree/master/scifin/geneticalg) | genetic algorithms | ■ ■ ■ □ □ |
| [`marketdata`](https://github.com/SciFin-Team/SciFin/tree/master/scifin/marketdata) | reading market data | ■ □ □ □ □ |
| [`montecarlo`](https://github.com/SciFin-Team/SciFin/tree/master/scifin/montecarlo) | Monte Carlo simulations | ■ □ □ □ □ |
| [`neuralnets`](https://github.com/SciFin-Team/SciFin/tree/master/scifin/neuralnets) | neural networks | □ □ □ □ □ |
| [`statistics`](https://github.com/SciFin-Team/SciFin/tree/master/scifin/statistics) | basic statistics | ■ □ □ □ □ |
| [`timeseries`](https://github.com/SciFin-Team/SciFin/tree/master/scifin/timeseries) | time series analysis | ■ ■ ■ ■ □ |
The topics already developed are time series analysis, genetic algorithms and statistics.
A lot of development still needs to be done. Other topics will also later follow.
## Installation
Installing SciFin on Linux or Mac is very easy, you can simply run this on a terminal:
`pip install SciFin`
You can also access the last version of the package [on PyPI](https://pypi.org/project/scifin/).
If you encounter problems during installation or after and think you know how the problem can be improved, please share it with me.
Version 0.0.8 may lead to a small problem from pandas. If you get an error message such as:
`ImportError: cannot import name 'urlencode' from 'pandas.io.common'`
it is advised to install pandas version 1.0.3 using e.g. the command line:
`pip install pandas==1.0.3`.
## Usage
The code is growing fast and many classes and function acquire new features. Hence, one version can be significantly different from the previous one at the moment. That's what makes development exciting! But that can also be confusing.
A documentation of the code should help users. Once ready, this documentation will start appearing on [SciFin's Wiki page](https://github.com/SciFin-Team/SciFin/wiki).
If you encounter any problem while using SciFin, please do not hesitate to report it to us by [creating an issue](https://docs.github.com/en/github/managing-your-work-on-github/creating-an-issue).
## Contributing
The package tries to follow the style guide for Python code [PEP8](https://www.python.org/dev/peps/pep-0008/). If you find any part of the code unclear or departing from this style, please let me know. As for docstrings, the format we try to follow here is given by the [numpy doc style](https://numpydoc.readthedocs.io/en/latest/format.html).
It is strongly advised to have a fair knowledge of Python to contribute, at least a strong motivation to learn, and recommanded to read the following [Python3 Tutorial](https://www.python-course.eu/python3_course.php) before joining the project.
To know more about the (evolving) rules that make the project self-consistent and eases interaction between contributors, please refer to details in the [Contributing](https://github.com/SciFin-Team/SciFin/blob/master/CONTRIBUTING.md) file.
## Credits
All the development up to now has been done by Fabien Nugier. New contributors will join soon.
## License
SciFin is currently developed under the MIT license.
Please keep in mind that SciFin and its developers hold no responsibility for any wrong usage or losses related to the package usage.
For more details, please refer to the [license](https://github.com/SciFin-Team/SciFin/blob/master/LICENSE).
## Contacts
If you have comments or suggestions, please reach Fabien Nugier. Thank you very much in advance for your feedback.
| /SciFin-0.1.0.tar.gz/SciFin-0.1.0/README.md | 0.700383 | 0.895751 | README.md | pypi |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AspectActlog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('aspects_lookup_id', models.IntegerField()),
('aspects_file_id', models.IntegerField()),
('activitycode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'aspect_actlog',
'managed': False,
},
),
migrations.CreateModel(
name='AspectErrors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('aspects_lookup_id', models.IntegerField()),
('aspects_file_id', models.IntegerField()),
('errorcode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'aspect_errors',
'managed': False,
},
),
migrations.CreateModel(
name='AspectFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('aspect_lookup_id', models.IntegerField()),
('file', models.TextField()),
('type', models.CharField(max_length=32)),
('version', models.IntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'aspect_files',
'managed': False,
},
),
migrations.CreateModel(
name='AspectLookup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uniqueid', models.CharField(max_length=128)),
('title', models.CharField(max_length=256)),
('type', models.CharField(max_length=16)),
('graphname', models.CharField(max_length=256)),
('currentversion', models.IntegerField()),
('auth_user_id', models.PositiveIntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'aspect_lookup',
'managed': False,
},
),
migrations.CreateModel(
name='FacetActlog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('facets_lookup_id', models.IntegerField()),
('facets_file_id', models.IntegerField()),
('activitycode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'facet_actlog',
'managed': False,
},
),
migrations.CreateModel(
name='FacetErrors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('facets_lookup_id', models.IntegerField()),
('facets_file_id', models.IntegerField()),
('errorcode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'facet_errors',
'managed': False,
},
),
migrations.CreateModel(
name='FacetFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('facet_lookup_id', models.IntegerField()),
('file', models.TextField()),
('type', models.CharField(max_length=32)),
('version', models.IntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'facet_files',
'managed': False,
},
),
migrations.CreateModel(
name='FacetLookup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uniqueid', models.CharField(max_length=128)),
('title', models.CharField(max_length=256)),
('type', models.CharField(max_length=16)),
('graphname', models.CharField(max_length=256)),
('currentversion', models.IntegerField()),
('auth_user_id', models.PositiveIntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'facet_lookup',
'managed': False,
},
),
migrations.CreateModel(
name='JsonActlog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session', models.CharField(max_length=24)),
('json_lookup_id', models.IntegerField()),
('json_file_id', models.IntegerField()),
('activitycode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'json_actlog',
'managed': False,
},
),
migrations.CreateModel(
name='JsonErrors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session', models.CharField(max_length=24)),
('json_lookup_id', models.IntegerField()),
('json_file_id', models.IntegerField()),
('errorcode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'json_errors',
'managed': False,
},
),
migrations.CreateModel(
name='JsonFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('json_lookup_id', models.IntegerField()),
('file', models.TextField()),
('type', models.CharField(max_length=32)),
('version', models.IntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'json_files',
'managed': False,
},
),
migrations.CreateModel(
name='JsonLookup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dataset_id', models.IntegerField()),
('uniqueid', models.CharField(max_length=128)),
('title', models.CharField(max_length=256)),
('graphname', models.CharField(max_length=256)),
('currentversion', models.IntegerField()),
('auth_user_id', models.PositiveIntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'json_lookup',
'managed': False,
},
),
] | /sciflow-0.2.tar.gz/sciflow-0.2/datafiles/migrations/0001_initial.py | 0.589598 | 0.17575 | 0001_initial.py | pypi |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Descriptors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(default='', max_length=128)),
('value', models.CharField(default='', max_length=768)),
('source', models.CharField(default='', max_length=64)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'descriptors',
'managed': False,
},
),
migrations.CreateModel(
name='Identifiers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('casrn', 'CAS Registry Number'), ('inchi', 'IUPAC InChI String'), ('inchikey', 'IUPAC InChI Key'), ('csmiles', 'Canonical SMILES'), ('ismiles', 'Isomeric SMILES'), ('chemspider', 'Chemspider ID'), ('pubchem', 'PubChem Compound ID'), ('iupacname', 'IUPAC Name'), ('springer', 'Springer ID'), ('othername', 'Other Name'), ('atc', 'ATC Code'), ('reaxys', 'Reaxys ID'), ('gmelin', 'Gmelin ID'), ('chebi', 'ChEBI ID'), ('chembl', 'ChEMBL ID'), ('rtecs', 'RTECS ID'), ('dsstox', 'DSSTOX ID')], default='casrn', max_length=10)),
('value', models.CharField(default='', max_length=768)),
('iso', models.CharField(max_length=5, null=True)),
('source', models.CharField(default='', max_length=64)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'identifiers',
'managed': False,
},
),
migrations.CreateModel(
name='Sources',
fields=[
('id', models.AutoField(db_column='Id', primary_key=True, serialize=False)),
('source', models.CharField(max_length=32)),
('result', models.CharField(max_length=1)),
('notes', models.CharField(blank=True, max_length=2000, null=True)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'sources',
'managed': False,
},
),
migrations.CreateModel(
name='Substances',
fields=[
('id', models.SmallAutoField(primary_key=True, serialize=False)),
('name', models.CharField(default='', max_length=256)),
('formula', models.CharField(default='', max_length=256)),
('monomass', models.FloatField(default=0.0)),
('molweight', models.FloatField(default=0.0)),
('casrn', models.CharField(default='', max_length=16)),
('graphdb', models.CharField(max_length=256, null=True)),
('comments', models.CharField(max_length=256, null=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'substances',
'managed': False,
},
),
migrations.CreateModel(
name='SubstancesSystems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('substance_id', models.SmallIntegerField()),
('system_id', models.SmallIntegerField()),
('role', models.CharField(blank=True, max_length=13, null=True)),
('constituent', models.PositiveIntegerField(blank=True, null=True)),
('mixture_id', models.IntegerField(blank=True, null=True)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'substances_systems',
'managed': False,
},
),
migrations.CreateModel(
name='Templates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=16)),
('json', models.TextField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'templates',
'managed': False,
},
),
migrations.CreateModel(
name='Systems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=1024)),
('composition', models.CharField(choices=[('PS', 'pure compound'), ('BM', 'binary mixture'), ('TM', 'ternary mixture'), ('QM', 'quaternary mixture'), ('NM', 'quinternary mixture')], default='PS', max_length=2)),
('identifier', models.CharField(default='', max_length=128)),
('updated', models.DateTimeField(auto_now=True)),
('substance1', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='substance1', to='substances.substances')),
('substance2', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='substance2', to='substances.substances')),
('substance3', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='substance3', to='substances.substances')),
('substance4', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='substance4', to='substances.substances')),
('substance5', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='substance5', to='substances.substances')),
],
options={
'db_table': 'systems',
'managed': False,
},
),
] | /sciflow-0.2.tar.gz/sciflow-0.2/substances/migrations/0001_initial.py | 0.573559 | 0.160135 | 0001_initial.py | pypi |
from datafiles.df_functions import *
from pathlib import Path
from sciflow.settings import *
def testimport():
""" import test data from static/files in the DB"""
folder = Path(BASE_DIR + "/static/files/")
for file in folder.iterdir():
if str(file).endswith('.jsonld'):
filename = str(file).split("\\")[-1]
filetype = None
with open(filename, "r") as f:
data = json.load(f)
if "/aspect/" in data["@id"]:
filetype = "aspect"
elif "/data/" in data["@id"]:
filetype = "data"
elif "/facet/" in data["@id"]:
filetype = "facet"
if adddatafile({"path": filename}, filetype):
print("Imported " + filename)
# ----- MySQL Functions -----
# used in datasets/mysql.py:getcodenames
def getdatasetnames():
""" retrieve the shortnames of all the datasets """
qset = Datasets.objects.all().values_list(
'datasetname', flat=True).order_by('id')
lst = list(qset)
return lst
# used in datasets/mysql.py:getcodenames
def getsourcecodes():
""" retrieve the shortnames of all the datasets """
qset = Datasets.objects.all().values_list(
'sourcecode', flat=True).order_by('id')
lst = list(qset)
return lst
# used in validation.py:validate
def getcodesnames():
""" create unique string to match a file to a dataset """
codes = getsourcecodes()
names = getdatasetnames()
output = {}
for i in range(len(codes)):
output.update({names[i]: codes[i] + ":" + names[i]})
return output
# used to update dataset stats
def updatestats():
"""update the number of files for each different dataset"""
# data
sets = Datasets.objects.exclude(sourcecode='chalklab').\
values_list('id', flat=True)
for setid in sets:
count = JsonLookup.objects.filter(dataset_id=setid).count()
sett = Datasets.objects.get(id=setid)
sett.count = count
sett.save()
# facets
dnames = Datasets.objects.filter(sourcecode='chalklab').\
values_list('id', 'datasetname')
for setid, dname in dnames:
count = FacetLookup.objects.filter(type=dname).count()
sett = Datasets.objects.get(id=setid)
sett.count = count
sett.save()
return | /sciflow-0.2.tar.gz/sciflow-0.2/datasets/ds_functions.py | 0.450359 | 0.306611 | ds_functions.py | pypi |
import json
from typing import Literal
import re
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from tabulate import tabulate
from sciform import Formatter, ExpMode, RoundMode, SignMode, FormatOptions
def get_scale_and_offset_from_offset_str(
ax: plt.Axes, axis: Literal['x', 'y']) -> tuple[float, float]:
"""
Extract the scale and offset for a particular axis from the existing
offset text when the axis is formatted in scientific mode.
"""
plt.draw()
if axis == 'x':
offset_text_obj = ax.xaxis.get_offset_text()
elif axis == 'y':
offset_text_obj = ax.yaxis.get_offset_text()
else:
raise ValueError(f'axis must be \'x\' or \'y\', not '
f'\'{axis}\'.')
ax.ticklabel_format(axis=axis, style='sci')
ax.get_figure().canvas.draw() # Redraw canvas to update offset text
offset_text = offset_text_obj.get_text()
# Replace minus sign with hyphen minus sign
offset_text = offset_text.replace('\u2212', '-')
pattern = re.compile(r'^(?P<scale>1e[+-]?\d+)?(?P<offset>[+-]1e\d+)?$')
match = re.match(pattern, offset_text)
scale = float(match.group('scale') or 1)
offset = float(match.group('offset') or 0)
return scale, offset
def prefix_exp_ticks(ax: plt.Axes, axis: Literal['x', 'y'],
shifted: bool = False) -> None:
"""
Use prefix notation for axis tick labels. Scale the tick labels by
the multiplier that appears in the offset text and format the labels
into SI prefix format. Format any remaining offset value in the
offset text into SI prefix format as well.
"""
if not shifted:
exp_mode = ExpMode.ENGINEERING
else:
exp_mode = ExpMode.ENGINEERING_SHIFTED
tick_formatter = Formatter(FormatOptions(
exp_mode=exp_mode,
prefix_exp=True))
offset_formatter = Formatter(FormatOptions(
sign_mode=SignMode.ALWAYS,
exp_mode=exp_mode,
prefix_exp=True))
ax.ticklabel_format(axis=axis, style='sci')
if axis == 'x':
old_ticklabels = ax.get_xticklabels()
elif axis == 'y':
old_ticklabels = ax.get_yticklabels()
else:
raise ValueError(f'axis must be \'x\' or \'y\', not \'{axis}\'.')
scale, offset = get_scale_and_offset_from_offset_str(ax, axis)
new_tick_locations = list()
new_tick_labels = list()
for old_ticklabel in old_ticklabels:
x, y = old_ticklabel.get_position()
if axis == 'x':
new_tick_locations.append(x)
else:
new_tick_locations.append(y)
# Replace minus sign with hyphen minus sign
old_label_str = old_ticklabel.get_text().replace('\u2212', '-')
val = float(old_label_str) * scale
new_str = tick_formatter(val)
new_tick_labels.append(new_str)
if offset != 0:
offset_str = offset_formatter(offset)
else:
offset_str = ''
if axis == 'x':
ax.set_xticks(new_tick_locations, new_tick_labels)
ax.text(x=1.01, y=0, s=offset_str, transform=ax.transAxes)
else:
ax.set_yticks(new_tick_locations, new_tick_labels)
ax.text(x=0, y=1.01, s=offset_str, transform=ax.transAxes)
def quadratic(x, c, x0, y0):
return (c / 2) * (x - x0) ** 2 + y0
def main():
fit_results_formatter = Formatter(FormatOptions(
exp_mode=ExpMode.ENGINEERING,
round_mode=RoundMode.SIG_FIG,
bracket_unc=True,
ndigits=2))
with open('data/fit_data.json', 'r') as f:
data_dict = json.load(f)
color_list = ['red', 'blue', 'purple']
fit_results_list = list()
fig, ax = plt.subplots(1, 1)
for idx, single_data_dict in enumerate(data_dict.values()):
x = single_data_dict['x']
y = single_data_dict['y']
y_err = single_data_dict['y_err']
fit_results_dict = dict()
color = color_list[idx]
ax.errorbar(x, y, y_err, marker='o', linestyle='none', color=color,
label=color)
popt, pcov = curve_fit(quadratic, x, y, sigma=y_err, p0=(2e13, 0, 1e9))
model_x = np.linspace(min(x), max(x), 100)
model_y = quadratic(model_x, *popt)
ax.plot(model_x, model_y, color=color)
fit_results_dict['color'] = color
fit_results_dict['curvature'] = fit_results_formatter(
popt[0], np.sqrt(pcov[0, 0]))
fit_results_dict['x0'] = fit_results_formatter(
popt[1], np.sqrt(pcov[1, 1]))
fit_results_dict['y0'] = fit_results_formatter(
popt[2], np.sqrt(pcov[2, 2]))
fit_results_list.append(fit_results_dict)
ax.grid(True)
ax.legend()
prefix_exp_ticks(ax, 'x')
prefix_exp_ticks(ax, 'y', shifted=True)
fig.savefig('outputs/fit_plot_with_sciform.png', facecolor='white')
plt.show()
table_str = tabulate(fit_results_list, headers='keys', tablefmt='grid')
with open('outputs/fit_plot_with_sciform_table.txt', 'w') as f:
f.write(table_str)
print(table_str)
if __name__ == "__main__":
main() | /sciform-0.28.2.tar.gz/sciform-0.28.2/examples/fit_plot_with_sciform.py | 0.718989 | 0.425247 | fit_plot_with_sciform.py | pypi |
# scify-file-reader
The scify-file-reader package provides a convenient class for handling multiple files with the same structure in a directory. It offers functionality to read and process data from various file types, including CSV, XLSX, Parquet, and JSON.
## Installation
You can install scify-file-reader using pip:
```shell
pip install scify-file-reader
```
## Usage
To use scify-file-reader, follow these steps:
1. Import the `FileReader` class:
```python
from scify_file_reader import FileReader
```
2. Create an instance of the FileReader class, providing the content you want to read. The content can be a string representing a `file path`, a `Path` object, or a `zipfile.ZipFile` object:
```python
content = 'path/to/directory'
reader = FileReader(content)
```
3. Read the files using the read_files method:
```python
data = reader.read_files()
```
The `read_files` method returns a dictionary where the keys are the filenames (without the extension) and the values are pandas DataFrames containing the file data.
**For more details on the available methods and parameters, refer to the package documentation.**
## Examples:
Here's an example that demonstrates how to use scify-file-reader:
### Normal Output
```python
from scify_file_reader import FileReader
PATH = '/path/to/directory'
"""
# Supomos que temos estes arquivos dentro do nosso diretório
print(os.listdir(PATH))
# OUT: ['file_1.csv'', 'log_2.csv', 'test_3.csv',
'file_%Y%m%d%H%M%S.csv', 'log_%Y%m%d%H%M%S.csv', 'test_%Y%m%d%H%M%S.csv',
'file_%Y%m%d_%H%M%S.csv', 'log_%Y%m%d_%H%M%S.csv', 'test_%Y%m%d_%H%M%S.csv',
"""
# Example: Reading files from a directory
reader = FileReader('/path/to/directory')
data = reader.read_files() # read_files accept kwargs from pandas read_ methods
"""
OUTPUT: print(data)
{
'file_1.csv': <pd.DataFrame>,
'log_2.csv': <pd.DataFrame>,
'test_3.csv': <pd.DataFrame>,
'file_%Y%m%d%H%M%S.csv': <pd.DataFrame>,
'log_%Y%m%d%H%M%S.csv': <pd.DataFrame>,
'test_%Y%m%d%H%M%S.csv': <pd.DataFrame>,
'file_%Y%m%d_%H%M%S.csv': <pd.DataFrame>,
'log_%Y%m%d_%H%M%S.csv': <pd.DataFrame>,
'test_%Y%m%d_%H%M%S.csv': <pd.DataFrame>
}
"""
```
### Concatenating patterns:
Use this method when you need to concatenate multiple files with similar patterns into a single consolidated file.
**E.g.** In the last example, we demonstrate the use of scify-file-reader with a directory containing 9 files that follow common naming patterns, such as 'file', 'log', and 'test'. By joining these files, we can consolidate and analyze their data more effectively. Let's take a look at the example to understand how they are joined.
```python
from scify_file_reader import FileReader
PATH = '/path/to/directory'
"""
# Let's suppose we have these files inside our directory.
print(os.listdir(PATH))
# OUT: ['file_1.csv'', 'log_2.csv', 'test_3.csv',
'file_%Y%m%d%H%M%S.csv', 'log_%Y%m%d%H%M%S.csv', 'test_%Y%m%d%H%M%S.csv',
'file_%Y%m%d_%H%M%S.csv', 'log_%Y%m%d_%H%M%S.csv', 'test_%Y%m%d_%H%M%S.csv',
"""
# Example: Reading files from a directory
reader = FileReader('/path/to/directory')
data = reader.read_files(join_prefixes=True) #
"""
OUTPUT: print(data)
{
'file': <pd.DataFrame>,
'log': <pd.DataFrame>,
'test': <pd.DataFrame>,
}
"""
```
### Using a specific regular expression
In the example above, all files with common prefixes, such as `file_1.csv`, `file_%Y%m%d%H%M%S.csv`, and `file_%Y%m%d_%H%M%S.csv`, were joined together under the file key in the output.
If you want to use a specific regular expression for filtering your files, you can follow these steps:
```python
from scify_file_reader import FileReader
PATH = '/path/to/directory'
# Example: Reading files from a directory
reader = FileReader('/path/to/directory')
regex = '<some_regex>'
reader.set_prefix_file_pattern_regex(regex)
data = reader.read_files(join_prefixes=True)
```
By default the regular expression is `^([A-Z]+)_\d+`.
### Speficic prefixes instead of regular expressions
If you prefer to use specific prefixes instead of regular expressions, you can utilize the `join_custom_prefixes` argument. This argument accepts a tuple of prefixes that you want to join together.
```python
from scify_file_reader import FileReader
PATH = '/path/to/directory'
"""
# Supomos que temos estes arquivos dentro do nosso diretório
print(os.listdir(PATH))
# OUT: ['file_1.csv'', 'log_2.csv', 'test_3.csv',
'file_%Y%m%d%H%M%S.csv', 'log_%Y%m%d%H%M%S.csv', 'test_%Y%m%d%H%M%S.csv',
'file_%Y%m%d_%H%M%S.csv', 'log_%Y%m%d_%H%M%S.csv', 'test_%Y%m%d_%H%M%S.csv',
"""
# Example: Reading files from a directory
reader = FileReader('/path/to/directory')
specific_prefixes = ('file', 'log', 'test')
data = reader.read_files(join_prefixes=True)
"""
OUTPUT: print(data)
{
'file': <pd.DataFrame>,
'log': <pd.DataFrame>,
'test': <pd.DataFrame>,
}
"""
```
## Contributing
Contributions are welcome! If you have any suggestions, bug reports, or feature requests, please open an issue or submit a pull request on the [scify-file-reader](https://github.com/Jeferson-Peter/scify-file-reader) repository.
| /scify-file-reader-0.0.2.tar.gz/scify-file-reader-0.0.2/README.md | 0.892829 | 0.888324 | README.md | pypi |
import os
import re
import zipfile
from io import BytesIO
from pathlib import Path
from typing import Union, IO, Tuple
import pandas as pd
import pyarrow.parquet as pq
class FileReader:
"""
A class to handle and process multiple files with identical structures within a directory or a zip archive.
Args:
content (Union[str, Path, zipfile.ZipFile]): The content to read. It can be a string representing
a file path, a Path object, or a zipfile.ZipFile object.
Attributes:
content (Union[str, Path, zipfile.ZipFile]): The content to read.
is_dir (bool): Indicates if the content is a directory.
is_zipfile (bool): Indicates if the content is a zip archive.
_available_exts (Tuple[str]): Available file extensions to consider when reading files.
_prefix_file_pattern_regex (re.Pattern): Regular expression pattern for file prefixes.
"""
def __init__(self, content: Union[str, Path, zipfile.ZipFile]):
self.content = content
self.is_dir = False
self.is_zipfile = False
self._available_exts = ('.csv', '.xlsx', '.parquet', '.json')
self._prefix_file_pattern_regex = re.compile(r'^([A-Z]+)_\d+')
self._check_content_type()
self._check_extensions()
def set_prefix_file_pattern_regex(self, regex: str):
"""
Set a custom regular expression pattern for file prefixes.
Args:
regex (str): The custom regular expression pattern.
"""
self._prefix_file_pattern_regex = re.compile(regex)
def _check_content_type(self):
"""
Check the type of the content (directory or zip archive) and update the corresponding attributes.
"""
if isinstance(self.content, (str, Path)):
self.content = Path(self.content)
self.is_dir = self.content.is_dir()
if self.content.is_file() and self.content.suffix.lower() == '.zip':
self.is_zipfile = True
self.content = zipfile.ZipFile(self.content)
elif isinstance(self.content, zipfile.ZipFile):
self.is_zipfile, self.is_dir = True, False
def _check_extensions(self):
"""
Check the available file extensions in the content and validate if they are supported.
"""
exts = set()
if self.is_dir:
exts = set([os.path.splitext(x)[1] for x in os.listdir(self.content)
if os.path.splitext(x)[1] != ''])
elif self.is_zipfile:
exts = set([os.path.splitext(x)[1] for x in self.content.namelist()
if os.path.splitext(x)[1] != ''])
if len(exts) <= 0:
raise Exception(f"No data found inside {self.content}")
elif len(exts) > 1:
raise Exception(f"Multiple file types found in content '{self.content}': {exts}")
elif len(exts) == 1:
ext_is_available = list(exts)[0] in self._available_exts
if not ext_is_available:
raise Exception(f"'{list(exts)[0]}' not available. The available file types are {', '.join(self._available_exts)}")
def _get_files_to_read(self):
"""
Get the files to read based on the content type.
Returns:
List[str]: List of file names to read.
"""
if self.is_zipfile:
return self.content.namelist()
elif self.is_dir:
return os.listdir(self.content)
def _zip_file_reader(self, data: dict, file: str, **kwargs):
"""
Read a file from a zip archive and add it to the data dictionary.
Args:
data (dict): Dictionary to store the file data.
file (str): File name to read.
**kwargs: Additional arguments to pass to the pandas read methods.
Returns:
dict: Updated data dictionary.
"""
filename, ext = os.path.splitext(file)
if ext.lower() == '.csv':
with self.content.open(file) as f:
data[filename] = pd.read_csv(f, **kwargs)
elif ext.lower() == '.xlsx':
with self.content.open(file) as f:
data[filename] = pd.read_excel(f, **kwargs)
elif ext.lower() == '.parquet':
with self.content.open(file) as f:
data[filename] = pq.read_table(f, **kwargs).to_pandas()
elif ext.lower() == '.json':
with self.content.open(file) as f:
data[filename] = pd.read_json(f, **kwargs)
return data
def _path_file_reader(self, data: dict, file: str, **kwargs):
"""
Read a file from a directory and add it to the data dictionary.
Args:
data (dict): Dictionary to store the file data.
file (str): File name to read.
**kwargs: Additional arguments to pass to the pandas read methods.
Returns:
dict: Updated data dictionary.
"""
filename, ext = os.path.splitext(file)
path_to_read = os.path.join(self.content, file)
if ext.lower() == '.csv':
data[filename] = pd.read_csv(path_to_read, **kwargs)
elif ext.lower() == '.xlsx':
data[filename] = pd.read_excel(path_to_read, **kwargs)
elif ext.lower() == '.parquet':
data[filename] = pq.read_table(path_to_read, **kwargs).to_pandas()
elif ext.lower() == '.json':
data[filename] = pd.read_json(path_to_read, **kwargs)
return data
def __get_file_pattern(self, filenames: list):
"""
Get the unique file patterns based on the file names.
Args:
filenames (list): List of file names.
Returns:
set: Set of unique file patterns.
"""
prefixes = set([re.match(self._prefix_file_pattern_regex, filename).group(1) for filename in filenames if
re.match(self._prefix_file_pattern_regex, filename)])
return prefixes
def read_files(self,
join_prefixes: bool = False,
regex: bool = True,
join_custom_prefixes: Tuple[str] = None,
**kwargs):
"""
Read and process the files.
Args:
join_prefixes (bool, optional): Whether to join files with the same prefix into a single DataFrame.
Defaults to False.
regex (bool, optional): Whether to use regular expressions to identify file prefixes. Defaults to True.
join_custom_prefixes (Tuple[str], optional): Custom prefixes to join together. Defaults to None.
**kwargs: Additional arguments to pass to the pandas read methods.
Returns:
dict: A dictionary where the keys are the filenames (or prefixes if join_prefixes is True) and
the values are pandas DataFrames containing the file data.
"""
data = {}
files = self._get_files_to_read()
if self.is_zipfile:
for file in files:
data.update(self._zip_file_reader(data, file, **kwargs))
elif self.is_dir:
for file in files:
data.update(self._path_file_reader(data, file, **kwargs))
if join_prefixes:
if not regex and join_custom_prefixes:
unique_file_prefixes = set(join_custom_prefixes)
else:
unique_file_prefixes = self.__get_file_pattern(list(data.keys()))
for prefix in unique_file_prefixes:
file_prefixes = [x for x in data.keys() if prefix in x]
data[prefix] = pd.concat([data[x] for x in file_prefixes], ignore_index=True)
[data.pop(x) for x in file_prefixes]
del file_prefixes
return data
else:
return data | /scify-file-reader-0.0.2.tar.gz/scify-file-reader-0.0.2/scify_file_reader/file_reader.py | 0.738858 | 0.297011 | file_reader.py | pypi |
from scipy.spatial import cKDTree as KDTree
import numpy as np
class IDW(object):
"""
# https://mail.python.org/pipermail/scipy-user/2010-June/025920.html
# https://github.com/soonyenju/pysy/blob/master/pysy/scigeo.py
inverse-distance-weighted interpolation using KDTree:
invdisttree = Invdisttree(X, z)
-- points, values
interpol = invdisttree(q, k=6, eps=0)
-- interpolate z from the 6 points nearest each q;
q may be one point, or a batch of points
"""
def __init__(self, X, z, leafsize = 10):
super()
self.tree = KDTree(X, leafsize=leafsize) # build the tree
self.z = z
def __call__(self, q, k = 8, eps = 0):
# q is coor pairs like [[lon1, lat1], [lon2, lat2], [lon3, lat3]]
# k nearest neighbours of each query point --
# format q if only 1d coor pair passed like [lon1, lat1]
if not isinstance(q, np.ndarray):
q = np.array(q)
if q.ndim == 1:
q = q[np.newaxis, :]
self.distances, self.ix = self.tree.query(q, k = k,eps = eps)
interpol = [] # np.zeros((len(self.distances),) +np.shape(z[0]))
for dist, ix in zip(self.distances, self.ix):
if dist[0] > 1e-10:
w = 1 / dist
wz = np.dot(w, self.z[ix]) / np.sum(w) # weightz s by 1/dist
else:
wz = self.z[ix[0]]
interpol.append(wz)
return interpol
def gen_buffer(lon, lat, step, shape = "rectangle"):
if shape == "rectangle":
# clockwise
coors = [
[lon - step, lat + step], # upper left
[lon + step, lat + step], # upper right
[lon + step, lat - step], # lower right
[lon - step, lat - step], # lower left
]
return coors
def dms2ddm(deg, min_, sec):
# covert Degrees Minutes Seconds (DMS) to Degrees Decimal Minutes (DDM)
min_ = min_ + sec / 60
ddm = deg + min_ / 60
return ddm
def deg2km(lat):
# earth radius: 6371 km
return 6371 * np.cos(lat) * 2* np.pi / 360 | /scigeo-0.0.10.tar.gz/scigeo-0.0.10/scigeo-history-versions/scigeo-0.0.2/geobox.py | 0.884601 | 0.489503 | geobox.py | pypi |
import math
import datetime
class Sunriseset:
def __init__(self, timestamp = None, format = r"%Y-%m-%d"):
if isinstance(timestamp, str):
timestamp = datetime.datetime.strptime(timestamp, format)
self.timestamp = timestamp
def __call__(self, lon, lat):
coords = {'longitude' : lon, 'latitude' : lat}
# Sunrise time UTC (decimal, 24 hour format)
sunrise = self.getSunriseTime(coords)['decimal']
# Sunset time UTC (decimal, 24 hour format)
sunset = self.getSunsetTime(coords)['decimal']
return {
"sunrise": sunrise,
"sunset": sunset
}
def getSunriseTime(self, coords):
return self.calcSunTime(coords, True)
def getSunsetTime(self, coords):
return self.calcSunTime(coords, False)
def getCurrentUTC(self):
now = datetime.datetime.now()
return [now.day, now.month, now.year]
def calcSunTime(self, coords, isRiseTime, zenith = 90.8):
# isRiseTime == False, returns sunsetTime
if self.timestamp:
timestamp = self.timestamp
try:
day, month, year = [timestamp.day, timestamp.month, timestamp.year]
except:
day, month, year = timestamp
# else:
# raise Exception("Wrong input time format...")
else:
print("Use current time...")
day, month, year = self.getCurrentUTC()
longitude = coords['longitude']
latitude = coords['latitude']
TO_RAD = math.pi/180
#1. first calculate the day of the year
N1 = math.floor(275 * month / 9)
N2 = math.floor((month + 9) / 12)
N3 = (1 + math.floor((year - 4 * math.floor(year / 4) + 2) / 3))
N = N1 - (N2 * N3) + day - 30
#2. convert the longitude to hour value and calculate an approximate time
lngHour = longitude / 15
if isRiseTime:
t = N + ((6 - lngHour) / 24)
else: #sunset
t = N + ((18 - lngHour) / 24)
#3. calculate the Sun's mean anomaly
M = (0.9856 * t) - 3.289
#4. calculate the Sun's true longitude
L = M + (1.916 * math.sin(TO_RAD*M)) + (0.020 * math.sin(TO_RAD * 2 * M)) + 282.634
L = self.forceRange( L, 360 ) #NOTE: L adjusted into the range [0,360)
#5a. calculate the Sun's right ascension
RA = (1/TO_RAD) * math.atan(0.91764 * math.tan(TO_RAD*L))
RA = self.forceRange( RA, 360 ) #NOTE: RA adjusted into the range [0,360)
#5b. right ascension value needs to be in the same quadrant as L
Lquadrant = (math.floor( L/90)) * 90
RAquadrant = (math.floor(RA/90)) * 90
RA = RA + (Lquadrant - RAquadrant)
#5c. right ascension value needs to be converted into hours
RA = RA / 15
#6. calculate the Sun's declination
sinDec = 0.39782 * math.sin(TO_RAD*L)
cosDec = math.cos(math.asin(sinDec))
#7a. calculate the Sun's local hour angle
cosH = (math.cos(TO_RAD*zenith) - (sinDec * math.sin(TO_RAD*latitude))) / (cosDec * math.cos(TO_RAD*latitude))
if cosH > 1:
return {'status': False, 'msg': 'the sun never rises on this location (on the specified date)'}
if cosH < -1:
return {'status': False, 'msg': 'the sun never sets on this location (on the specified date)'}
#7b. finish calculating H and convert into hours
if isRiseTime:
H = 360 - (1/TO_RAD) * math.acos(cosH)
else: #setting
H = (1/TO_RAD) * math.acos(cosH)
H = H / 15
#8. calculate local mean time of rising/setting
T = H + RA - (0.06571 * t) - 6.622
#9. adjust back to UTC
UT = T - lngHour
UT = self.forceRange( UT, 24) # UTC time in decimal format (e.g. 23.23)
#10. Return
hr = self.forceRange(int(UT), 24)
min = round((UT - int(UT))*60,0)
return {
'status': True,
'decimal': UT,
'hr': hr,
'min': min
}
def forceRange(self, v, max):
# force v to be >= 0 and < max
if v < 0:
return v + max
elif v >= max:
return v - max
return v
'''
# example:
if __name__ == "__main__":
srs = Sunriseset("2018-01-01")
print(srs(0, 50))
''' | /scigeo-0.0.10.tar.gz/scigeo-0.0.10/scigeo-history-versions/scigeo-0.0.2/sun.py | 0.596551 | 0.399812 | sun.py | pypi |
from pathlib import Path
from shapely.geometry import Polygon
import rasterio as rio
from rasterio.mask import mask
from rasterio.enums import Resampling
import geopandas as gpd
import warnings
import numpy as np
class Raster(object):
"""
the wrapper of rasterio
"""
def __init__(self, path):
super() # or: super(Raster, self).__init__()
self.path = path
def __del__(self):
if hasattr(self, "src"):
self.src.closed
def read(self):
warnings.filterwarnings("ignore")
with rio.open(self.path) as src:
array = src.read()
profile = src.profile
return {"array": array, "meta": profile}
def fopen(self, src_only = True):
"""
c, a, b, f, d, e = src.transform
gt = rasterio.transform.Affine.from_gdal(c, a, b, f, d, e)
proj = src.crs
count = src.count
name = src.name
mode = src.mode
closed = src.closed
width = src.width
height = src.height
bounds = src.bounds
idtypes = {i: dtype for i, dtype in zip(
src.indexes, src.dtypes)}
meta = src.meta
src = src.affine
"""
self.src = rio.open(self.path)
if not src_only:
self.data = self.src.read()
self.profile = self.src.profile
def write(self, array, fulloutpath, profile, dtype = rio.float64):
count=profile["count"]
# bug fix, can't write a 3D array having a shape like (1, *, *)
if array.ndim == 3 and array.shape[0] == 1:
array = array[0, :, :]
profile.update(dtype = dtype, count = count, compress='lzw')
with rio.open(fulloutpath, 'w', **profile) as dst:
dst.write(array.astype(dtype), count)
def clip(self, polygon):
self.clip_arr, self.clip_transform = mask(self.src, polygon, crop=True)
def resampling(self, new_shape):
"""
new_shape format: height, width, count in order
Resample: default Resampling.bilinear, other choice includes Resampling.average
"""
height, width, count = new_shape
resampled_data = self.src.read(
out_shape = (height, width, count),
resampling = Resampling.bilinear
)
return resampled_data
def get_pntpairs(self, **kwargs):
# compatible with affine format, rather than geotransform
if not kwargs:
# print("Inside data")
affine = self.profile["transform"]
cols = self.profile["width"]
rows = self.profile["height"]
data = self.data.ravel()
else:
# print("Outside data")
# NOTICE: try to transform the geotransform to affine.
affine = kwargs["affine"]
# NOTICE: the first dimension of rasterio array is band.
cols = kwargs["data"].shape[2]
rows = kwargs["data"].shape[1]
data = kwargs["data"].ravel()
# print(affine)
# print(profile)
lats = [idx * affine[4] + affine[5] for idx in range(rows)]
lons = [idx * affine[0] + affine[2] for idx in range(cols)]
lons, lats = np.meshgrid(lons, lats)
pntpairs = np.vstack([lons.ravel(), lats.ravel()]).T
return pntpairs, data
class Vector(object):
"""docstring for Vector"""
def __init__(self, **kwargs):
super(Vector, self).__init__()
if "path" in kwargs.keys():
vector_path = kwargs["path"]
try:
self.path = vector_path.as_posix()
except Exception as e:
print(e)
self.path = vector_path
def __del__(self):
pass
def read(self):
gdf = gpd.read_file(self.path)
return gdf
def write(self, gpd, fulloutpath):
# filetype = fulloutpath.split('.')[-1]
gpd.to_file(fulloutpath)
def create_polygon(self, coors, epsg_code = "4326"):
polygon_geom = Polygon(coors)
crs = {"init": "epsg:" + epsg_code}
poly = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom]) # gdf
# gjs = poly.to_json()
return poly | /scigeo-0.0.10.tar.gz/scigeo-0.0.10/scigeo-history-versions/scigeo-0.0.2/geoface.py | 0.643553 | 0.396594 | geoface.py | pypi |
import hashlib
import json
import random
import re
import time
from typing import Optional
import requests
from scihub_cn.models import PaperDetailDescription
def translate(content: str, proxy=None) -> str:
"""对文本content进行翻译"""
lts = str(int(time.time() * 1000))
salt = lts + str(random.randint(0, 9))
sign_str = 'fanyideskweb' + content + salt + 'Ygy_4c=r#e#4EX^NUGUc5'
m = hashlib.md5()
m.update(sign_str.encode())
sign = m.hexdigest()
url = 'https://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
headers = {
"Referer": "https://fanyi.youdao.com/",
"Cookie": '[email protected]; JSESSIONID=aaamH0NjhkDAeAV9d28-x; OUTFOX_SEARCH_USER_ID_NCOO=1827884489.6445506; fanyi-ad-id=305426; fanyi-ad-closed=1; ___rl__test__cookies=1649216072438',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36"
}
data = {
"i": content,
"from": "AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": salt,
"sign": sign,
"lts": lts,
"bv": "a0d7903aeead729d96af5ac89c04d48e",
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action": "FY_BY_REALTlME",
}
res = requests.post(url, headers=headers, data=data, proxies=proxy)
response = json.loads(res.text)
value = response['translateResult'][0][0]['tgt']
return value.replace(" ", "").replace("。", "")
def split_description(content: str) -> Optional[PaperDetailDescription]:
"""将抓取的转换成"""
# description: authors, title, publisher, doi
# test case1: {"doi": '10.1109/ACC.1999.786344'}, 无authors
# test case2: {"doi": "10.1016/j.biopha.2019.109317"} authors, title, publisher, doi齐全
pattern = re.compile(
r"^(?P<authors>(?:.*?, )+\w+\. \(\d+\)\. )?(?P<title>[A-Z].*?\. )(?P<publisher>[A-Z].*?\. )(?P<doi>(?:doi:|https:).*?)$")
res = re.search(pattern, content)
if res:
return PaperDetailDescription(
authors=res.group("authors"),
# 去掉末尾的字符
title=res.group("title").strip(". "),
publisher=res.group("publisher"),
doi=res.group("doi")
)
else:
return None
if __name__ == '__main__':
http_proxy = "socks5h://127.0.0.1:10808"
https_proxy = "socks5h://127.0.0.1:10808"
proxies = {
"https": https_proxy,
"http": http_proxy
}
translated_str = translate("你好", proxy=proxies)
print(translated_str) | /scihub-cn-0.1.1.tar.gz/scihub-cn-0.1.1/scihub_cn/utils.py | 0.545528 | 0.162945 | utils.py | pypi |
Subsets and Splits