Dataset Viewer
source
stringlengths 3
92
| c
stringlengths 26
2.25M
|
---|---|
gt_gtf.c
|
#include "gt_gtf.h"
GT_INLINE gt_gtf_entry* gt_gtf_entry_new(const uint64_t start, const uint64_t end, const gt_strand strand, gt_string* const type){
gt_gtf_entry* entry = malloc(sizeof(gt_gtf_entry));
entry->uid = 0;
entry->start = start;
entry->end = end;
entry->num_children = 0;
entry->type = type;
entry->strand = strand;
entry->gene_type = NULL;
entry->gene_id = NULL;
entry->transcript_id = NULL;
entry->length = 0;
return entry;
}
GT_INLINE void gt_gtf_entry_delete(gt_gtf_entry* const entry){
free(entry);
}
GT_INLINE gt_gtf_ref* gt_gtf_ref_new(void){
gt_gtf_ref* ref = malloc(sizeof(gt_gtf_ref));
ref->entries = gt_vector_new(GTF_DEFAULT_ENTRIES, sizeof(gt_gtf_entry*));
return ref;
}
GT_INLINE void gt_gtf_ref_delete(gt_gtf_ref* const ref){
register uint64_t s = gt_vector_get_used(ref->entries);
register uint64_t i = 0;
for(i=0; i<s; i++){
gt_gtf_entry_delete( (gt_vector_get_elm(ref->entries, i, gt_gtf_entry)));
}
gt_vector_delete(ref->entries);
free(ref);
}
GT_INLINE gt_gtf* gt_gtf_new(void){
gt_gtf* gtf = malloc(sizeof(gt_gtf));
gtf->refs = gt_shash_new();
gtf->types = gt_shash_new();
gtf->gene_ids = gt_shash_new();
gtf->transcript_ids = gt_shash_new();
gtf->gene_types = gt_shash_new();
gtf->genes = gt_shash_new();
gtf->transcripts = gt_shash_new();
return gtf;
}
GT_INLINE void gt_gtf_delete(gt_gtf* const gtf){
gt_shash_delete(gtf->refs, true);
gt_shash_delete(gtf->types, true);
gt_shash_delete(gtf->gene_ids, true);
gt_shash_delete(gtf->transcript_ids, true);
gt_shash_delete(gtf->gene_types, true);
gt_shash_delete(gtf->genes, false);
gt_shash_delete(gtf->transcripts, false);
free(gtf);
}
GT_INLINE gt_gtf_hits* gt_gtf_hits_new(void){
gt_gtf_hits* hits = malloc(sizeof(gt_gtf_hits));
hits->exon_hits = gt_vector_new(16, sizeof(gt_gtf_hit*));
hits->num_genes = 0;
hits->num_protein_coding =0;
hits->num_paired_genes =0;
return hits;
}
GT_INLINE void gt_gtf_hits_delete(gt_gtf_hits* const hits){
gt_gtf_hits_clear(hits);
gt_vector_delete(hits->exon_hits);
free(hits);
}
GT_INLINE void gt_gtf_hits_clear(gt_gtf_hits* const hits){
uint64_t i = 0;
for(i=0; i<gt_vector_get_used(hits->exon_hits); i++){
gt_gtf_hit* hit = *gt_vector_get_elm(hits->exon_hits, i, gt_gtf_hit*);
gt_gtf_hit_delete(hit);
}
hits->num_genes = 0;
hits->num_protein_coding =0;
hits->num_paired_genes =0;
hits->junction_hit_ration = 0.0;
gt_vector_clear(hits->exon_hits);
}
GT_INLINE gt_gtf_count_parms* gt_gtf_count_params_new(bool coverage){
gt_gtf_count_parms* p = gt_malloc_(1, sizeof(gt_gtf_count_parms), false, false);
p->num_maps = 0;
p->exon_overlap = 0;
p->unweighted_counts = true;
p->single_pair_counts = false;
p->num_junctions = 0;
p->count_bases = false;
p->num_annotated_junctions = 0;
if(coverage){
p->single_transcript_coverage = GT_GTF_INIT_COVERAGE();
p->gene_body_coverage = GT_GTF_INIT_COVERAGE();
}else{
p->single_transcript_coverage = NULL;
p->gene_body_coverage = NULL;
}
return p;
}
GT_INLINE void gt_gtf_count_params_delete(gt_gtf_count_parms* params){
if(params->single_transcript_coverage != NULL){
free(params->single_transcript_coverage);
}
if(params->gene_body_coverage != NULL){
free(params->gene_body_coverage);
}
free(params);
}
GT_INLINE gt_string* gt_gtf_get_type(const gt_gtf* const gtf, char* const type){
if(!gt_gtf_contains_type(gtf, type)){
gt_string* s = gt_string_set_new(type);
gt_shash_insert_string(gtf->types, type, s);
}
return gt_shash_get(gtf->types, type, gt_string);
}
GT_INLINE bool gt_gtf_contains_type(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->types, name);
}
GT_INLINE gt_gtf_ref* gt_gtf_get_ref(const gt_gtf* const gtf, char* const name){
if(!gt_gtf_contains_ref(gtf, name)){
gt_gtf_ref* rr = gt_gtf_ref_new();
gt_shash_insert(gtf->refs, name, rr, gt_gtf_ref*);
}
return gt_shash_get(gtf->refs, name, gt_gtf_ref);
}
GT_INLINE bool gt_gtf_contains_ref(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->refs, name);
}
GT_INLINE gt_string* gt_gtf_get_gene_id(const gt_gtf* const gtf, char* const name){
if(!gt_gtf_contains_gene_id(gtf, name)){
gt_string* const gene_id = gt_string_set_new(name);
gt_shash_insert(gtf->gene_ids, name, gene_id, gt_string*);
}
return gt_shash_get(gtf->gene_ids, name, gt_string);
}
GT_INLINE bool gt_gtf_contains_gene_id(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->gene_ids, name);
}
GT_INLINE gt_string* gt_gtf_get_transcript_id(const gt_gtf* const gtf, char* const name){
if(!gt_gtf_contains_transcript_id(gtf, name)){
gt_string* const gene_id = gt_string_set_new(name);
gt_shash_insert(gtf->transcript_ids, name, gene_id, gt_string*);
}
return gt_shash_get(gtf->transcript_ids, name, gt_string);
}
GT_INLINE bool gt_gtf_contains_transcript_id(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->transcript_ids, name);
}
GT_INLINE gt_string* gt_gtf_get_gene_type(const gt_gtf* const gtf, char* const name){
if(!gt_gtf_contains_gene_type(gtf, name)){
gt_string* const gene_type = gt_string_set_new(name);
gt_shash_insert(gtf->gene_types, name, gene_type, gt_string*);
}
return gt_shash_get(gtf->gene_types, name, gt_string);
}
GT_INLINE bool gt_gtf_contains_gene_type(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->gene_types, name);
}
GT_INLINE gt_gtf_entry* gt_gtf_get_gene_by_id(const gt_gtf* const gtf, char* const key){
if(gt_shash_is_contained(gtf->genes, key)){
return gt_shash_get_element(gtf->genes, key);
}
return NULL;
}
GT_INLINE gt_gtf_entry* gt_gtf_get_transcript_by_id(const gt_gtf* const gtf, char* const key){
if(gt_shash_is_contained(gtf->transcripts, key)){
return gt_shash_get_element(gtf->transcripts, key);
}
return NULL;
}
/**
* Comparator that compares two gtf_entries by starting position
*/
GT_INLINE int gt_gtf_sort_by_start_cmp_(const gt_gtf_entry** a, const gt_gtf_entry** b){
uint64_t p1 = (*a)->start;
uint64_t p2 = (*b)->start;
return p1 < p2 ? -1 : (p1>p2 ? 1 : gt_string_cmp( (*a)->type, (*b)->type ));
}
/**
* Comparator that compares two gtf_entries by ending position
*/
GT_INLINE int gt_gtf_sort_by_end_cmp_(const gt_gtf_entry** a, const gt_gtf_entry** b){
uint64_t p1 = (*a)->end;
uint64_t p2 = (*b)->end;
return p1 < p2 ? -1 : (p1>p2 ? 1 : gt_string_cmp( (*a)->type, (*b)->type ));
}
/**
* Sort vector of gt_gtf_entries by starting position
*/
GT_INLINE void gt_gtf_sort_by_start(gt_vector* entries) {
qsort(gt_vector_get_mem(entries, gt_gtf_entry*),
gt_vector_get_used(entries),
sizeof(gt_gtf_entry**),
(int (*)(const void *,const void *))gt_gtf_sort_by_start_cmp_);
}
/**
* Sort vector of gt_gtf_entries by ending position
*/
GT_INLINE void gt_gtf_sort_by_end( gt_vector* entries) {
qsort(gt_vector_get_mem(entries, gt_gtf_entry*),
gt_vector_get_used(entries),
sizeof(gt_gtf_entry**),
(int (*)(const void *,const void *))gt_gtf_sort_by_end_cmp_);
}
GT_INLINE gt_gtf_node* gt_gtf_create_node(gt_vector* entries){
const uint64_t len = gt_vector_get_used(entries);
if(len == 0){
return NULL;
}
gt_gtf_node* const node = malloc(sizeof(gt_gtf_node));
const gt_gtf_entry* mid = *gt_vector_get_elm(entries, len/2, gt_gtf_entry*);
node->midpoint = mid->start + ((mid->end - mid->start)/2);
node->entries_by_end = gt_vector_new(16, sizeof(gt_gtf_entry*));
node->entries_by_start = gt_vector_new(16, sizeof(gt_gtf_entry*));
gt_vector* to_left = gt_vector_new(16, sizeof(gt_gtf_entry*));
gt_vector* to_right = gt_vector_new(16, sizeof(gt_gtf_entry*));
GT_VECTOR_ITERATE(entries, element, counter, gt_gtf_entry*){
if((*element)->end < node->midpoint){
gt_vector_insert(to_left, (*element), gt_gtf_entry*);
}else if((*element)->start > node->midpoint){
gt_vector_insert(to_right, (*element), gt_gtf_entry*);
}else{
gt_vector_insert(node->entries_by_end, (*element), gt_gtf_entry*);
gt_vector_insert(node->entries_by_start, (*element), gt_gtf_entry*);
}
}
// sort the start and end lists
gt_gtf_sort_by_start(node->entries_by_start);
gt_gtf_sort_by_end(node->entries_by_end);
// delete incoming entry list
gt_vector_delete(entries);
if(gt_vector_get_used(to_left) > 0){
// create left node
node->left = gt_gtf_create_node(to_left);
}else{
node->left = NULL;
gt_vector_delete(to_left);
}
if(gt_vector_get_used(to_right) > 0){
// create right node
node->right = gt_gtf_create_node(to_right);
}else{
node->right = NULL;
gt_vector_delete(to_right);
}
return node;
}
/*
* Read next tab separated field from line or return NULL if no such field exists
*/
GT_INLINE char* gt_gtf_read_gtf_field_(char** line){
char* current = *line;
GT_READ_UNTIL(line, **line=='\t');
if(GT_IS_EOL(line)) return NULL;
**line = EOS;
GT_NEXT_CHAR(line);
return current;
}
GT_INLINE gt_status gt_gtf_read_attributes_(char** line, gt_shash* attrs){
gt_shash_clear(attrs, false);
while(!GT_IS_EOL(line)){
while(**line == ' ') GT_NEXT_CHAR(line);
if(**line == EOL || **line == EOS) return GT_STATUS_OK;
// get the attribute name
char* name = *line;
GT_READ_UNTIL(line, **line==' ')
if(GT_IS_EOL(line)){
gt_error_msg("Error parsing GTF attributes. Expected space but found end of line");
return GT_GTF_INVALID_LINE;
}
**line = EOS;
GT_NEXT_CHAR(line);
// skip to attribute start
while(**line == ' ') GT_NEXT_CHAR(line);
// remove starting quote
if(**line == '"') GT_NEXT_CHAR(line);
char* attr = *line;
// skip until the closing ;
while(**line != ';') GT_NEXT_CHAR(line);
if(GT_IS_EOL(line)) return GT_GTF_INVALID_LINE;
// remove trailing quotes and add EOS
if(*(*line-1) == '"') *(*line-1) = EOS;
else **line = EOS;
GT_NEXT_CHAR(line);
// add attribute
if(gt_shash_is_contained(attrs, name)){
gt_shash_remove(attrs, name, false);
}
gt_shash_insert(attrs, name, attr, char*);
if(gt_shash_is_contained(attrs, "gene_id") &&
gt_shash_is_contained(attrs, "gene_type") &&
gt_shash_is_contained(attrs, "transcript_id")){
return GT_STATUS_OK;
}
}
return GT_STATUS_OK;
}
/**
* Parse a single GTF line
*/
GT_INLINE gt_status gt_gtf_read_line(char* line, gt_gtf* const gtf, uint64_t counter, gt_shash* attrs){
// skip comments
if(line[0] == '#'){
return GT_STATUS_OK;
}
char* ref = NULL;
char* type = NULL;
uint64_t start = 0;
uint64_t end = 0;
gt_strand strand = UNKNOWN;
char* current = line;
ref = gt_gtf_read_gtf_field_(&line);
if(ref == NULL){
gt_error_msg("Unable to parse name: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// SKIP source
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse source: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// type
type = gt_gtf_read_gtf_field_(&line);
if(type == NULL){
gt_error_msg("Unable to parse type: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// start
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse start: '%s'", line);
return GT_GTF_INVALID_LINE;
}
start = atol(current);
// end
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse end: '%s'", line);
return GT_GTF_INVALID_LINE;
}
end = atol(current);
// SKIP score
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse score: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// strand
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL) return GT_GTF_INVALID_LINE;
if(current == NULL){
gt_error_msg("Unable to parse strand: '%s'", line);
return GT_GTF_INVALID_LINE;
}
if(*current == '+'){
strand = FORWARD;
}else if(*current == '-'){
strand = REVERSE;
}
// SIKP last thing where i can not remember what it was
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse last: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// WARNING >>> the attribute parser stops after
// the currently used feels are found. If you want
// to add a field, also update the attribute parser
if(gt_gtf_read_attributes_(&line, attrs) != GT_STATUS_OK){
gt_error_msg("Unable to parse attributes: '%s'", line);
return GT_GTF_INVALID_ATTRIBUTES;
}
// get the type or create it
gt_string* tp = gt_gtf_get_type(gtf, type);
gt_gtf_entry* e = gt_gtf_entry_new(start, end, strand, tp);
e->uid = counter;
if(gt_shash_is_contained(attrs, "gene_id")){
e->gene_id = gt_gtf_get_gene_id(gtf, gt_shash_get(attrs, "gene_id", char));
}
if(gt_shash_is_contained(attrs, "gene_type")){
e->gene_type = gt_gtf_get_gene_type(gtf, gt_shash_get(attrs, "gene_type", char));
}
if(gt_shash_is_contained(attrs, "transcript_id")){
e->transcript_id = gt_gtf_get_transcript_id(gtf, gt_shash_get(attrs, "transcript_id", char));
}
// get the ref or create it
gt_gtf_ref* gtref = gt_gtf_get_ref(gtf, ref);
gt_vector_insert(gtref->entries, e, gt_gtf_entry*);
if(strcmp(e->type->buffer, "gene") == 0){
gt_shash_insert(gtf->genes, e->gene_id->buffer, e, gt_gtf_entry*);
}
if(strcmp(e->type->buffer, "transcript") == 0){
gt_shash_insert(gtf->transcripts, e->transcript_id->buffer, e, gt_gtf_entry*);
}
return GT_STATUS_OK;
}
bool gt_gtf_hits_junction(gt_map* map, gt_gtf_entry* e){
uint64_t rs = gt_map_get_begin_mapping_position(map);
uint64_t re = gt_map_get_end_mapping_position(map);
bool hit = (rs==e->start) || (rs==e->end) || (re == e->end) || (re == e->start);
return hit;
}
GT_INLINE uint64_t gt_gtf_get_map_begin(gt_map* const map){
return gt_map_get_begin_mapping_position(map) + gt_map_get_left_trim_length(map);
}
GT_INLINE uint64_t gt_gtf_get_map_end(gt_map* const map){
return gt_map_get_end_mapping_position(map);
}
/**
* Iterate over the map blocks and count exon-exon junctions that are annotated
*/
GT_INLINE uint64_t gt_gtf_count_junction(const gt_gtf* const gtf, gt_map* const map){
uint64_t blocks = gt_map_get_num_blocks(map);
if(blocks <= 1) return 0; // single block map
uint64_t num_junctions = 0;
char* seq_name = gt_map_get_seq_name(map);
gt_vector* hits = gt_vector_new(16, sizeof(gt_gtf_entry*));
gt_shash* last_hits = NULL;
GT_MAP_ITERATE(map, block){
uint64_t start = gt_map_get_begin_mapping_position(block);
uint64_t end = gt_map_get_end_mapping_position(block);
if(last_hits != NULL){
// there was a block before, check if we found an annotated junction
gt_gtf_search(gtf, hits, seq_name, start, start, true);
GT_VECTOR_ITERATE(hits, e, c, gt_gtf_entry*){
gt_gtf_entry* hit = *e;
if(hit->transcript_id != NULL && hit->type != NULL && strcmp(hit->type->buffer, "exon") == 0){
if(gt_shash_is_contained(last_hits, hit->transcript_id->buffer)){
num_junctions++;
break;
}
}
}
}
if(last_hits == NULL) last_hits = gt_shash_new();
else gt_shash_clear(last_hits, true);
// search for the overlaps with the end of the block
gt_gtf_search(gtf, hits, seq_name, end, end, true);
GT_VECTOR_ITERATE(hits, e, c, gt_gtf_entry*){
gt_gtf_entry* hit = *e;
if(hit->transcript_id != NULL && hit->type != NULL && strcmp(hit->type->buffer, "exon") == 0){
gt_gtf_count_(last_hits, hit->transcript_id->buffer);
}
}
}
gt_vector_delete(hits);
gt_shash_delete(last_hits, true);
return num_junctions;
}
void gt_gtf_print_entry_(FILE* target, gt_gtf_entry* e, gt_map* map){
if(map != NULL){
gt_output_map_fprint_map(target, map, NULL);
fprintf(target, " ==> ");
}
if(e->type != NULL){
fprintf(target, "%s : %"PRIu64" - %"PRIu64" (%c)", e->type->buffer, e->start, e->end, (e->strand==FORWARD?'+':'-') );
}
if(e->gene_id != NULL){
fprintf(target, " GID:%s", e->gene_id->buffer);
}
if(e->transcript_id != NULL){
fprintf(target, " TID:%s", e->transcript_id->buffer);
}
if(e->type != NULL){
fprintf(target, " [%s]", e->type->buffer);
}
if(e->gene_type != NULL){
fprintf(target, " [%s]", e->gene_type->buffer);
}
fprintf(target, " [#transcripts: %"PRIu64"]", e->num_children);
if(map != NULL && gt_gtf_hits_junction(map, e)){
fprintf(target, " [Hits JS]");
}
fprintf(target, "\n");
}
GT_INLINE gt_gtf_hit* gt_gtf_hit_new(void){
gt_gtf_hit* hit = malloc(sizeof(gt_gtf_hit));
hit->exon_overlap = 0.0;
hit->intron_length = 0.0;
hit->is_protein_coding = false;
hit->junction_hits = 0.0;
hit->map = NULL;
hit->num_junctions = 0;
hit->pairs_transcript = false;
hit->pairs_splits = false;
hit->pairs_gene = false;
hit->num_junctions_hits =0;
hit->num_template_blocks = 0;
hit->transcripts = NULL;
hit->genes = NULL;
hit->hits_exon = false;
return hit;
}
GT_INLINE void gt_gtf_hit_delete(gt_gtf_hit* hit){
if(hit->transcripts != NULL){
gt_shash_delete(hit->transcripts, true);
}
if(hit->genes != NULL){
gt_shash_delete(hit->genes, true);
}
free(hit);
}
GT_INLINE gt_status gt_gtf_reload_buffer(gt_buffered_input_file* const buffered_fasta_input) {
GT_BUFFERED_INPUT_FILE_CHECK(buffered_fasta_input);
// Dump buffer if BOF it attached to input, and get new out block (always FIRST)
gt_buffered_input_file_dump_attached_buffers(buffered_fasta_input->attached_buffered_output_file);
// Read new input block
const uint64_t read_lines = gt_buffered_input_file_get_block(buffered_fasta_input, GT_NUM_LINES_50K);
if (gt_expect_false(read_lines==0)) return GT_INPUT_FILE_EOF;
// Assign block ID
gt_buffered_input_file_set_id_attached_buffers(buffered_fasta_input->attached_buffered_output_file,buffered_fasta_input->block_id);
return GT_STATUS_OK;
}
GT_INLINE gt_status gt_gtf_get_line(gt_buffered_input_file* const buffered_input, gt_string* const line) {
GT_BUFFERED_INPUT_FILE_CHECK(buffered_input);
GT_STRING_CHECK(line);
gt_status error_code;
// Check the end_of_block. Reload buffer if needed
if (gt_buffered_input_file_eob(buffered_input)) {
if ((error_code=gt_gtf_reload_buffer(buffered_input))!=GT_IMP_OK) return error_code;
}
// Prepare the template
char* const line_start = buffered_input->cursor;
gt_string_clear(line);
GT_INPUT_FILE_SKIP_LINE(buffered_input);
gt_string_set_nstring_static(line, line_start, (buffered_input->cursor - line_start));
return GT_IMP_OK;
}
GT_INLINE uint64_t gt_gtf_merge_(const gt_gtf* const target, gt_gtf* source, uint64_t counter){
// get the type or create it
GT_SHASH_BEGIN_KEY_ITERATE(source->refs, key){
gt_gtf_ref* source_ref = gt_gtf_get_ref(source, key);
gt_gtf_ref* target_ref = gt_gtf_get_ref(target, key);
GT_VECTOR_ITERATE(source_ref->entries, value, c, gt_gtf_entry*){
gt_gtf_entry* e = *value;
e->uid = counter++;
if(e->gene_id != NULL){
e->gene_id = gt_gtf_get_gene_id(target, gt_string_get_string(e->gene_id));
}
if(e->transcript_id != NULL){
e->transcript_id = gt_gtf_get_transcript_id(target, gt_string_get_string(e->transcript_id));
}
if(e->type != NULL)e->type = gt_gtf_get_type(target, gt_string_get_string(e->type));
if(e->gene_type != NULL)e->gene_type = gt_gtf_get_gene_type(target, gt_string_get_string(e->gene_type));
gt_vector_insert(target_ref->entries, e, gt_gtf_entry*);
if(strcmp(e->type->buffer, GT_GTF_TYPE_GENE) == 0 && !gt_shash_is_contained(target->genes, e->gene_id->buffer)){
gt_shash_insert(target->genes, e->gene_id->buffer, e, gt_gtf_entry*);
}
if(strcmp(e->type->buffer, GT_GTF_TYPE_TRANSCRIPT) == 0 && !gt_shash_is_contained(target->transcripts, e->transcript_id->buffer)){
gt_shash_insert(target->transcripts, e->transcript_id->buffer, e, gt_gtf_entry*);
}
}
}GT_SHASH_END_ITERATE;
return counter;
}
GT_INLINE gt_gtf* gt_gtf_read_from_stream(FILE* input, uint64_t threads){
gt_input_file* input_file = gt_input_stream_open(input);
return gt_gtf_read(input_file, threads);
}
GT_INLINE gt_gtf* gt_gtf_read_from_file(char* input, uint64_t threads){
gt_input_file* input_file = gt_input_file_open(input, false);
return gt_gtf_read(input_file, threads);
}
GT_INLINE gt_gtf* gt_gtf_read(gt_input_file* input_file, const uint64_t threads){
GT_NULL_CHECK(input_file);
GT_ZERO_CHECK(threads);
uint64_t counter = 0;
uint64_t i = 0;
gt_gtf* const gtf = gt_gtf_new();
gt_gtf** gtfs = gt_calloc(threads-1, gt_gtf*, true);
for(i=0; i<threads-1; i++){
gtfs[i] = gt_gtf_new();
}
#pragma omp parallel num_threads(threads)
{
uint64_t tid = omp_get_thread_num();
gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file);
gt_string* buffered_line = gt_string_new(GTF_MAX_LINE_LENGTH);
gt_gtf* thread_gtf;
if(tid == 0){
thread_gtf = gtf;
}else{
thread_gtf = gtfs[tid-1];
}
gt_shash* attrs = gt_shash_new();
while(gt_gtf_get_line(buffered_input, buffered_line)){
if(gt_gtf_read_line(buffered_line->buffer, thread_gtf, buffered_input->current_line_num, attrs) != GT_STATUS_OK){
// raise error
gt_fatal_error_msg("Failed to parse GTF line '%s'", buffered_line->buffer);
}
counter++;
}
gt_shash_delete(attrs, false);
gt_buffered_input_file_close(buffered_input);
gt_string_delete(buffered_line);
}
gt_input_file_close(input_file);
counter = 0;
// merge all the thread gtfs into a single one
for(i=0; i<threads-1; i++){
counter = gt_gtf_merge_(gtf, gtfs[i], counter);
gt_gtf_delete(gtfs[i]);
}
free(gtfs);
gt_string* const exon_t = gt_string_set_new("exon");
gt_string* const transcript_t = gt_string_set_new("transcript");
gt_string* const intron_t = gt_string_set_new("intron");
// sort the refs
GT_SHASH_BEGIN_ELEMENT_ITERATE(gtf->refs,shash_element,gt_gtf_ref) {
// sort by start position
gt_gtf_sort_by_start(shash_element->entries);
uint64_t size = gt_vector_get_used(shash_element->entries);
uint64_t i = 0;
gt_shash* last_exons = gt_shash_new();
gt_shash* exons_counts = gt_shash_new();
for(i=0; i<size; i++){
gt_gtf_entry* entry = *gt_vector_get_elm(shash_element->entries, i, gt_gtf_entry*);
if(entry->type != NULL && gt_string_equals(exon_t, entry->type)){
gt_string* transcript_id = entry->transcript_id;
if(transcript_id != NULL){
// set exon id and count the exon for the transcript
entry->num_children = gt_gtf_get_count_(exons_counts, transcript_id->buffer);
gt_gtf_count_(exons_counts, transcript_id->buffer);
if(!gt_shash_is_contained(last_exons, gt_string_get_string(transcript_id))){
gt_shash_insert(last_exons, gt_string_get_string(transcript_id), entry, gt_gtf_entry*);
}else{
gt_gtf_entry* prev_exon = gt_shash_get_element(last_exons, gt_string_get_string(transcript_id));
gt_gtf_entry* intron = gt_gtf_entry_new(prev_exon->end+1,
entry->start-1,
prev_exon->strand,
intron_t);
intron->transcript_id = transcript_id;
intron->gene_id = prev_exon->gene_id;
intron->uid = counter++;
gt_vector_insert(shash_element->entries, intron, gt_gtf_entry*);
gt_shash_remove(last_exons, gt_string_get_string(transcript_id),false);
gt_shash_insert(last_exons, gt_string_get_string(transcript_id), entry, gt_gtf_entry*);
}
// add exon counts
gt_gtf_entry* transcript = gt_gtf_get_transcript_by_id(gtf, gt_string_get_string(entry->transcript_id));
if(transcript != NULL){
transcript->num_children++;
entry->length = transcript->length;
transcript->length += (entry->end - entry->start) + 1;
}
}
}else if(entry->type != NULL && gt_string_equals(transcript_t, entry->type)){
// sum transcript counts for gene id
if(entry->gene_id != NULL){
gt_gtf_entry* gene = gt_gtf_get_gene_by_id(gtf, gt_string_get_string(entry->gene_id));
gene->num_children++;
}
}
}
gt_shash_delete(last_exons, false);
gt_shash_delete(exons_counts, true);
// create a interval tree node for each ref
shash_element->node = gt_gtf_create_node(shash_element->entries);
} GT_SHASH_END_ITERATE
return gtf;
}
/*
* Binary search for start position
*/
GT_INLINE uint64_t gt_gtf_bin_search(gt_vector* const entries, const uint64_t t, const uint64_t end){
uint64_t used = gt_vector_get_used(entries);
uint64_t l = 0;
uint64_t h = used - 1;
uint64_t m = 0;
register gt_gtf_entry* e = *gt_vector_get_elm(entries, h, gt_gtf_entry*);
while(l < h ){
m = (l + h) / 2;
e = *gt_vector_get_elm(entries, m, gt_gtf_entry*);
if(e->start < t){
l = m + 1;
}else{
h = m;
}
}
e = *gt_vector_get_elm(entries, l, gt_gtf_entry*);
if (h == l){
return l;
}else{
return m;
}
}
GT_INLINE void gt_gtf_search_node_(gt_gtf_node* node, const uint64_t start, const uint64_t end, gt_vector* const target){
if(node == NULL) return;
// add overlapping intervals from this node
GT_VECTOR_ITERATE(node->entries_by_start, element, counter, gt_gtf_entry*){
if((*element)->start > end){
break;
}
gt_gtf_entry* e = *element;
//if((*element)->start <= start && (*element)->end >= end){
if((start < e->end && end > e->start)
|| (start >= e->start && end <=e->end)
|| (start < e->end && end >= e->end)
|| (start < e->start && end > e->end)){
gt_vector_insert(target, (*element), gt_gtf_entry*);
}
}
if(end < node->midpoint || start < node->midpoint){
// search left tree
gt_gtf_search_node_(node->left, start, end, target);
}
if (start > node->midpoint || end > node->midpoint){
gt_gtf_search_node_(node->right, start, end, target);
}
}
GT_INLINE uint64_t gt_gtf_search(const gt_gtf* const gtf, gt_vector* const target, char* const ref, const uint64_t start, const uint64_t end, const bool clear_target){
if(clear_target)gt_vector_clear(target);
// make sure the target ref is contained
if (! gt_shash_is_contained(gtf->refs, ref)){
return 0;
}
const gt_gtf_ref* const source_ref = gt_gtf_get_ref(gtf, ref);
gt_gtf_search_node_(source_ref->node, start, end, target);
return gt_vector_get_used(target);
}
GT_INLINE void gt_gtf_count_(gt_shash* const table, char* const element){
if(!gt_shash_is_contained(table, element)){
uint64_t* v = gt_malloc_uint64();
*v = 1;
gt_shash_insert(table, element, v, uint64_t);
}else{
uint64_t* v = gt_shash_get(table,element,uint64_t);
++(*v);
}
}
GT_INLINE void gt_gtf_count_custom_(gt_shash* const table, char* const element, uint64_t c){
if(!gt_shash_is_contained(table, element)){
uint64_t* v = gt_malloc_uint64();
*v = c;
gt_shash_insert(table, element, v, uint64_t);
}else{
uint64_t* v = gt_shash_get(table,element,uint64_t);
*v += c;
}
}
GT_INLINE void gt_gtf_count_sum_(gt_shash* const table, char* const element, uint64_t value){
if(!gt_shash_is_contained(table, element)){
uint64_t* v = gt_malloc_uint64();
*v = value;
gt_shash_insert(table, element, v, uint64_t);
}else{
uint64_t* v = gt_shash_get(table,element,uint64_t);
*v += value;
}
}
GT_INLINE void gt_gtf_count_weight_(gt_shash* const table, char* const element, double weight){
if(!gt_shash_is_contained(table, element)){
double* v = malloc(sizeof(double*));
*v = weight;
gt_shash_insert(table, element, v, double);
}else{
double* v = gt_shash_get(table,element,double);
*v += weight;
}
}
GT_INLINE uint64_t gt_gtf_get_count_(gt_shash* const table, char* const element){
if(!gt_shash_is_contained(table, element)){
return 0;
}
uint64_t* v = gt_shash_get(table,element,uint64_t);
return *v;
}
GT_INLINE float gt_gtf_get_count_weight(gt_shash* const table, char* const element){
if(!gt_shash_is_contained(table, element)){
return 0.0;
}
double* v = gt_shash_get(table,element,double);
return *v;
}
GT_INLINE void gt_gtf_create_hit(gt_vector* search_hits, gt_shash* all_genes, gt_gtf_hits* hits, gt_gtf_hit* template_hit){
template_hit->transcripts = gt_shash_new();
template_hit->genes = gt_shash_new();
template_hit->is_protein_coding = false;
template_hit->hits_exon = false;
bool counted_protein = false;
// set gene count
GT_SHASH_BEGIN_ITERATE(all_genes, gene_id, c, uint64_t){
gt_gtf_count_sum_(template_hit->genes, gene_id, *c);
}GT_SHASH_END_ITERATE;
GT_VECTOR_ITERATE(search_hits, v, c, gt_gtf_entry*){
gt_gtf_entry* e = *v;
// count transcript
if(e->transcript_id != NULL){
gt_gtf_count_(template_hit->transcripts, gt_string_get_string(e->transcript_id));
}
if(!template_hit->hits_exon && strcmp(e->type->buffer, "exon") == 0){
template_hit->hits_exon = true;
}
if(!counted_protein && e->gene_type != NULL){
template_hit->is_protein_coding |= (strcmp(e->gene_type->buffer, "protein_coding") == 0);
hits->num_protein_coding++;
counted_protein = true;
}
}
template_hit->pairs_gene = (gt_shash_get_num_elements(all_genes) > 1); // single gene
template_hit->pairs_transcript = (gt_shash_get_num_elements(template_hit->transcripts) == 1); // single gene
hits->num_paired_genes += (template_hit->pairs_gene ? 1 : 0);
gt_vector_insert(hits->exon_hits, template_hit, gt_gtf_hit*);
}
GT_INLINE void gt_gtf_search_template_hits(const gt_gtf* const gtf, gt_gtf_hits* const hits, gt_template* const template_src){
gt_vector* const search_hits = gt_vector_new(32, sizeof(gt_gtf_entry*));
// reset the hits
gt_gtf_hits_clear(hits);
gt_shash* all_genes = gt_shash_new();
// process paired alignment
GT_TEMPLATE_ITERATE_MMAP__ATTR_(template_src,mmap,mmap_attr) {
gt_gtf_hit* template_hit = gt_gtf_hit_new();
template_hit->num_template_blocks = gt_template_get_num_blocks(template_src);
template_hit->mmap = mmap;
template_hit->map = NULL;
template_hit->map_attributes = mmap_attr;
template_hit->num_junctions = (gt_map_get_num_blocks(mmap[0]) + gt_map_get_num_blocks(mmap[1])) - 2;
template_hit->num_junctions_hits = gt_gtf_count_junction(gtf, mmap[0]) + gt_gtf_count_junction(gtf, mmap[1]);
double junction_ratio = template_hit->num_junctions == 0 ? -1.0 : (double)template_hit->num_junctions_hits/(double)template_hit->num_junctions;
if(junction_ratio > 0 && junction_ratio > hits->junction_hit_ration) hits->junction_hit_ration = junction_ratio;
gt_shash_clear(all_genes, true);
gt_gtf_count_map(gtf, mmap[0], mmap[1], NULL, all_genes, NULL, NULL);
gt_gtf_search_map(gtf, search_hits, mmap[0], true);
gt_gtf_search_map(gtf, search_hits, mmap[1], false);
gt_gtf_create_hit(search_hits, all_genes, hits, template_hit);
hits->num_genes += gt_shash_get_num_elements(all_genes);
}
gt_shash_delete(all_genes, true);
gt_vector_delete(search_hits);
}
GT_INLINE void gt_gtf_search_alignment_hits(const gt_gtf* const gtf, gt_gtf_hits* const hits, gt_alignment* const alignment){
gt_vector* const search_hits = gt_vector_new(32, sizeof(gt_gtf_entry*));
// reset the hits
gt_gtf_hits_clear(hits);
gt_shash* all_genes = gt_shash_new();
// process paired alignment
GT_ALIGNMENT_ITERATE(alignment, map){
gt_gtf_hit* template_hit = gt_gtf_hit_new();
template_hit->map = map;
template_hit->mmap = NULL;
template_hit->num_junctions = gt_map_get_num_blocks(map) - 1;
template_hit->num_junctions_hits = gt_gtf_count_junction(gtf, map);
template_hit->num_template_blocks = 1;
double junction_ratio = template_hit->num_junctions == 0 ? -1.0 : (double)template_hit->num_junctions_hits/(double)template_hit->num_junctions;
if(junction_ratio > 0 && junction_ratio > hits->junction_hit_ration) hits->junction_hit_ration = junction_ratio;
gt_shash_clear(all_genes, false);
gt_gtf_count_map(gtf, map, NULL, NULL, all_genes, NULL, NULL);
gt_gtf_search_map(gtf, search_hits, map, true);
gt_gtf_create_hit(search_hits, all_genes, hits, template_hit);
hits->num_genes += gt_shash_get_num_elements(all_genes);
}
gt_shash_delete(all_genes, false);
gt_vector_delete(search_hits);
}
GT_INLINE void gt_gtf_count_add_(gt_shash* const source, gt_shash* const target){
GT_SHASH_BEGIN_ITERATE(source, key, value, uint64_t){
if(!gt_shash_is_contained(target, key)){
uint64_t* v = gt_malloc_uint64();
*v = *value;
gt_shash_insert(target, key, v, uint64_t);
}else{
uint64_t* v = gt_shash_get(target,key,uint64_t);
*v += (*value);
}
}GT_SHASH_END_ITERATE;
}
GT_INLINE void gt_gtf_add_coverage(uint64_t* store, const uint64_t transcript_length, const uint64_t bucket){
// add to all
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_ALL, bucket)] += 1;
if(transcript_length <= 150){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_150, bucket)] += 1;
}
if(transcript_length > 150 && transcript_length <= 250){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_250, bucket)] += 1;
}
if(transcript_length > 250 && transcript_length <= 500){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_500, bucket)] += 1;
}
if(transcript_length > 500 && transcript_length <= 1000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_1000, bucket)] += 1;
}
if(transcript_length > 1000 && transcript_length <= 2500){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_2500, bucket)] += 1;
}
if(transcript_length > 2500 && transcript_length <= 5000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_5000, bucket)] += 1;
}
if(transcript_length > 5000 && transcript_length <= 7500){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_7500, bucket)] += 1;
}
if(transcript_length > 7500 && transcript_length <= 10000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_10000, bucket)] += 1;
}
if(transcript_length > 10000 && transcript_length <= 15000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_15000, bucket)] += 1;
}
if(transcript_length > 15000 && transcript_length <= 20000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_20000, bucket)] += 1;
}
}
GT_INLINE void gt_gtf_count_coverage_(const gt_gtf* const gtf, gt_map* const map, char* gene_id,
gt_gtf_count_parms* params){
// get coordinates
uint64_t start = gt_gtf_get_map_begin(map);
uint64_t end = gt_gtf_get_map_end(map);
if(start > end){
return; // happens for (1)>123*... trim followed by split
}
uint64_t map_length = (end-start)+1;
if(map_length <= 1){
// count only maps with at least 2 bases in length
return;
}
// store the search hits and search
gt_vector* const hits = gt_vector_new(32, sizeof(gt_gtf_entry*));
gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, true);
GT_VECTOR_ITERATE(hits, e, i, gt_gtf_entry*){
gt_gtf_entry* hit = *e;
if(hit->transcript_id == NULL) continue; // no transcript id
if(hit->type == NULL || strcmp("exon", hit->type->buffer) != 0) continue; // no exon or no type
if(gene_id != NULL && (hit->gene_id == NULL || strcmp(hit->gene_id->buffer, gene_id) != 0)) continue; // we are looking for a specific gene_id
gt_gtf_entry* transcript = gt_gtf_get_transcript_by_id(gtf, hit->transcript_id->buffer);
if(transcript == NULL || transcript->length <= 100){
continue;
}
if(hit->gene_id == NULL) continue; // no gene id on the hit
gt_gtf_entry* gene = gt_gtf_get_gene_by_id(gtf,hit->gene_id->buffer);
if(gene == NULL) continue; // no gene found
if(gene_id != NULL && strcmp(gene_id, gene->gene_id->buffer) != 0) continue; // we are looking for a specific hit
uint64_t exon_length = (hit->end - hit->start) + 1;
int64_t rel_start = start - hit->start;
int64_t rel_end = (rel_start + map_length) - 1;
if(rel_start < 0){
rel_start = 0;
}
if(rel_end > exon_length){
rel_end = exon_length;
}
if(rel_start >= 0 && rel_end <= exon_length){
// contained in range
// count for exon count
uint64_t start_bucket = (((rel_start/(double)exon_length) * 100.0) + 0.5) - 1;
uint64_t end_bucket = (((rel_end/(double)exon_length) * 100.0) + 0.5) - 1;
uint64_t s = 0;
if(start_bucket >= 0 && start_bucket < 100 && end_bucket >= start_bucket && end_bucket < 100){
// handle reverse strand and flip coordinates
if(hit->strand == REVERSE){
uint64_t tmp = start_bucket;
start_bucket = (GT_GTF_COVERAGE_BUCKETS - 1) - end_bucket;
end_bucket = (GT_GTF_COVERAGE_BUCKETS - 1) - tmp;
}
// scale up
// count for global count and make exon coordinates relative to transcript
// coordinate range
uint64_t hit_start_on_transcript = hit->length;
if(hit->strand == REVERSE){
// flip the bucket start if this is a gene on reverse strand
// the exon start/end is already flipped
// so we just flip the order of the exons here
hit_start_on_transcript = (transcript->length - hit_start_on_transcript) - exon_length;
}
uint64_t trans_start_bucket = ((((double)hit_start_on_transcript / (double)transcript->length) * 100.0) + 0.5) - 1;
double scale = (double)exon_length / (double) transcript->length;
start_bucket = (scale * (double)start_bucket) + trans_start_bucket;
end_bucket = (scale * (double)end_bucket) + trans_start_bucket;
if(start_bucket >= 0 && start_bucket < 100 && end_bucket >= start_bucket && end_bucket < 100){
for(s=start_bucket;s<=end_bucket; s++){
//fprintf(stderr, ">>>GLOBAL COUNT %s : %"PRIu64" S/E: %"PRIu64" %"PRIu64" (%"PRIu64") Exon: %"PRIu64" %"PRIu64"\n", transcript->transcript_id->buffer, s, start, end, map_length, hit->start, hit->end);
// count gene body coverage
gt_gtf_add_coverage(params->gene_body_coverage, transcript->length, s);
// count single transcript
if( gene->num_children == 1){
gt_gtf_add_coverage(params->single_transcript_coverage, transcript->length, s);
}
}
}
}else{
gt_fatal_error_msg("Coverage overlap out of range %"PRIu64" %"PRIu64, start_bucket, end_bucket);
}
}
}
gt_vector_delete(hits);
}
/**
* This counts a single continuous block and takes the. Note that we do not perform any checks on
* splits/pairs here and simply count for this single continuous map
*
* @param gt_gtf* gtf the gtf reference
* @param gt_map* continuous map block
* @param gt_shash* type_counts the type counts, i.e exon/intron etc
* @param gt_shash* gene_counts the gene counts with the gene_id's hit by the map.
* @param gt_shash* exon_counts the exon counts with the gene_id's hit by the map.
* @param gt_shash* junction_counts the number of annotated junctions that are hit per gene
* @param float* overlap float pointer that is set to the maximum exon overlap of this block
* @return uint64_t num_gene_exons number of unique gene_ids hit by exons
*/
GT_INLINE uint64_t gt_gtf_count_map_(const gt_gtf* const gtf, gt_map* const map,
gt_shash* const type_counts,
gt_shash* const gene_counts,
gt_shash* const exon_counts,
gt_shash* const junction_counts,
float* overlap, uint64_t total_map_length,
gt_gtf_count_parms* params){
// get coordinates
uint64_t start = gt_gtf_get_map_begin(map);
uint64_t end = gt_gtf_get_map_end(map);
if(start > end){
gt_gtf_count_(type_counts, GT_GTF_TYPE_EMPTY_BLOCK);
return 0; // happens for (1)>123*... where map starts with trim followed by split
}
uint64_t map_length = (end-start)+1;
// store the search hits and search
gt_vector* const hits = gt_vector_new(32, sizeof(gt_gtf_entry*));
gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, true);
// we do a complete local count for this block
// and then merge the local count with the global count
// to be able to resolve genes/gene_types that are
// through wither the pair information or split information,
// assuming that the counts for the other pair and/or the other split
// are already contained in the globally presented count maps
gt_shash* const local_type_counts = gt_shash_new();
gt_shash* local_gene_counts = gt_shash_new();
gt_shash* local_exon_gene_counts = gt_shash_new();
float max_overlap = 0.0;
GT_VECTOR_ITERATE(hits, e, i, gt_gtf_entry*){
gt_gtf_entry* hit = *e;
// count type
gt_gtf_count_(local_type_counts, gt_string_get_string(hit->type));
// count gene id
if(hit->gene_id != NULL){
gt_gtf_count_(local_gene_counts, gt_string_get_string(hit->gene_id));
}
// count gene_id from exons
if(hit->type != NULL && hit->gene_id != NULL && strcmp("exon", hit->type->buffer) == 0){
if(gt_gtf_hits_junction(map, hit)){
gt_gtf_count_(junction_counts, gt_string_get_string(hit->gene_id));
}
gt_gtf_count_(local_exon_gene_counts, gt_string_get_string(hit->gene_id));
gt_gtf_count_(exon_counts, gt_string_get_string(hit->gene_id));
int64_t o = ((hit->end < end ? hit-> end : end) - (hit->start > start ? hit->start : start)) + 1;
float block_overlap = o <= 0 ? 0.0 : ((float)o)/((float)(map_length));
if(block_overlap > max_overlap) max_overlap = block_overlap;
if(block_overlap > 1.0){
gt_fatal_error_msg("Block overlap > 1.0\nMap : %"PRIu64" %"PRIu64" (%"PRIu64")\nExon :%"PRIu64" %"PRIu64" ", start, end, map_length, hit->start, hit->end);
}
}
}
*overlap += (max_overlap * ( (float)map_length / (float) total_map_length));
if(*overlap > 1.000001){
gt_output_map_fprint_map(stderr, map, NULL); fprintf(stderr, "\n");
gt_fatal_error_msg("Block overlap > 1.0 :: %.10f\nMap length : %"PRIu64" Total length: %"PRIu64" max overlap: %.10f", *overlap, map_length, total_map_length, max_overlap);
}
uint64_t num_gene_hit_exons = gt_shash_get_num_elements(local_exon_gene_counts);
// count types and merge them with the global
// counts. NOTE that the order matters here, so
// we:
// 1. check for NA hits where nothing is found
// 2. count exon hits
// 3. count intron hits
// 4. count unknown if the hit was neither an intron nor exon hit
// all counting steps are exclusive, thats why the order matters!
if(gt_vector_get_used(hits) == 0){
// count 'NA' type if we did not hit anything
gt_gtf_count_(type_counts, GT_GTF_TYPE_NA);
}else if(gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON) > 0){
gt_gtf_count_(type_counts, GT_GTF_TYPE_EXON);
}else if(gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON) > 0){
gt_gtf_count_(type_counts, GT_GTF_TYPE_INTRON);
}else{
gt_gtf_count_(type_counts, GT_GTF_TYPE_UNKNOWN);
}
// make gene counts based on exon hits if we found at least one
if(num_gene_hit_exons > 0){
GT_SHASH_BEGIN_KEY_ITERATE(local_exon_gene_counts, key){
gt_gtf_count_(gene_counts, key);
}GT_SHASH_END_ITERATE;
}else{
// add all gene counts
GT_SHASH_BEGIN_KEY_ITERATE(local_gene_counts, key){
gt_gtf_count_(gene_counts, key);
}GT_SHASH_END_ITERATE;
}
// if(params->single_transcript_coverage != NULL){
// gt_gtf_count_coverage_(gtf, map, NULL, params);
// }
gt_shash_delete(local_gene_counts, true);
gt_shash_delete(local_type_counts, true);
gt_shash_delete(local_exon_gene_counts, true);
gt_vector_delete(hits);
return num_gene_hit_exons;
}
GT_INLINE uint64_t gt_gtf_join_(gt_string* buf, char* base, bool multi_gene, uint64_t blocks){
if(blocks == 0) return 0;
uint64_t i = 0;
uint64_t len = strlen(base);
for(i=0; i<blocks; i++){
gt_string_right_append_string(buf, base, len);
if(multi_gene){
gt_string_right_append_string(buf, "_mg", 3);
}
if(i<blocks-1){
gt_string_append_char(buf, '^');
}
}
return blocks;
}
GT_INLINE double gt_gtf_count_get_sum_(gt_shash* table){
double v = 0;
GT_SHASH_BEGIN_ELEMENT_ITERATE(table, value, uint64_t){
v += *value;
}GT_SHASH_END_ITERATE;
return v;
}
GT_INLINE uint64_t gt_gtf_get_map_length(gt_map* const maps){
uint64_t map_length = 0;
GT_MAP_ITERATE(maps, map){
// get coordinates
uint64_t start = gt_gtf_get_map_begin(map);
uint64_t end = gt_gtf_get_map_end(map);
if(start > end){
continue; // happens for wired thigs like (1)>231*... where the map start with a trim followed by a split
}
map_length += (end-start)+1;
}
return map_length;
}
/**
* Count a map. This respects split maps and unifies gene_id's based on the
* the split. If the both sides of the split match multiple gene_ids but there is
* a common gene_id on both side, only that id is counted. Otherwise a count is set
* for all gene_ids.
* In addition to the counts, if a pattern string is given, it is filled with the type
* pattern with respect to split maps. For example:
*
* exon -> exon
* exon and intron (split map) -> exon^intron
* exon in multiple genes -> exon_mg
*
* The function returns the number of gene_ids hit by the map.
*
* The first map has to be specified, but the second one is options. If it is set,
* the second map block is also checked and counted.
*
*
* @param gt_gtf* gtf the gtf reference
* @param gt_map* map1 the first map
* @param gt_map* map2 the scond map
* @param gt_shash* type_counts the type counts
* @param gt_shash* gene_counts the gene counts
* @param gt_string pattern the pattern string filled based on the types
* @return uint64_t num_gene_hits the number of gene_ids hit by the map
*/
GT_INLINE uint64_t gt_gtf_count_map(const gt_gtf* const gtf, gt_map* const map1, gt_map* const map2,
gt_shash* const pattern_counts, gt_shash* const gene_counts,
gt_string* pattern, gt_gtf_count_parms* params){
// clear patterns
if(pattern != NULL)gt_string_clear(pattern);
// get number of blocks and ensure we have at least one
uint64_t blocks = gt_map_get_num_blocks(map1);
if(map2 != NULL){
blocks += gt_map_get_num_blocks(map2);
}
if(blocks == 0) return 0;
// local counts for all blocks
// and store the number of multi gene exon hits for each block
// in addition we create the base pattern per block here
gt_shash* const local_type_counts = gt_shash_new();
gt_shash* local_gene_counts = gt_shash_new();
gt_shash* local_gene_counts_1 = gt_shash_new();
gt_shash* local_gene_counts_2 = gt_shash_new();
gt_shash* local_junction_counts_1 = gt_shash_new();
gt_shash* local_junction_counts_2 = gt_shash_new();
gt_shash* local_exon_counts_1 = gt_shash_new();
gt_shash* local_exon_counts_2 = gt_shash_new();
uint64_t* const local_exon_gene_hits = malloc(blocks * sizeof(uint64_t));
gt_vector* const local_type_patterns = gt_vector_new(2, sizeof(char*));
uint64_t exons, introns, unknown, not_annotated, empty_blocks;
exons = introns = unknown = not_annotated = empty_blocks =0;
uint64_t i = 0;
float block_1_overlap = 0.0;
float block_2_overlap = 0.0;
uint64_t map_1_length = gt_gtf_get_map_length(map1);
GT_MAP_ITERATE(map1, map_block){
local_exon_gene_hits[i++] = gt_gtf_count_map_(gtf, map_block, local_type_counts, local_gene_counts_1, local_exon_counts_1,local_junction_counts_1, &block_1_overlap, map_1_length, params);
uint64_t _exons = exons + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON);
uint64_t _introns = introns + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON);
uint64_t _unknown = unknown + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_UNKNOWN);
uint64_t _not_annotated = not_annotated + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_NA);
uint64_t _empty_block = empty_blocks + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EMPTY_BLOCK);
// add the pattern string based in the count value that changed
if(_exons > exons) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EXON, char*);
if(_introns > introns) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_INTRON, char*);
if(_unknown > unknown) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_UNKNOWN, char*);
if(_not_annotated > not_annotated) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_NA, char*);
if(_empty_block > empty_blocks) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EMPTY_BLOCK, char*);
exons = _exons;
introns = _introns;
unknown = _unknown;
not_annotated = _not_annotated;
empty_blocks = _empty_block;
}
// if we hit more than one gene,
// try to unify the gene by checking the other blocks for
// overlaps. If we find genes that are covered by all the
// blocks we count only them.
if(gt_shash_get_num_elements(local_gene_counts_1) > 1){
gt_shash* merged_counts = gt_shash_new();
uint64_t blocks1 = gt_map_get_num_blocks(map1);
// search for the best junction hit
uint64_t hits_junctions = 0;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){
uint64_t m = gt_gtf_get_count_(local_junction_counts_1,gene_id);
if(*count == blocks1 && m > 0){
if(m > hits_junctions) hits_junctions = m;
}
}GT_SHASH_END_ITERATE;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){
if(*count == blocks1 && (hits_junctions == 0 || gt_gtf_get_count_(local_junction_counts_1,gene_id) == hits_junctions)){
gt_gtf_count_sum_(merged_counts, gene_id, blocks1);
}
}GT_SHASH_END_ITERATE;
// if we found some unique ids that are covered by both
// we flip over to the merged counts
gt_shash_delete(local_gene_counts_1, true);
local_gene_counts_1 = merged_counts;
// we fliped so we reset the exon gene hit counts to ones as well
if(gt_shash_get_num_elements(merged_counts) > 0){
for(i=0;i<blocks1;i++){
if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1;
}
}
}
if(map2 != NULL){
uint64_t map_2_length = gt_gtf_get_map_length(map2);
GT_MAP_ITERATE(map2, map_block){
local_exon_gene_hits[i++] = gt_gtf_count_map_(gtf, map_block, local_type_counts, local_gene_counts_2, local_exon_counts_2, local_junction_counts_2, &block_2_overlap, map_2_length, params);
uint64_t _exons = exons + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON);
uint64_t _introns = introns + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON);
uint64_t _unknown = unknown + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_UNKNOWN);
uint64_t _not_annotated = not_annotated + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_NA);
uint64_t _empty_block = empty_blocks + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EMPTY_BLOCK);
// add the pattern string based in the count value that changed
if(_exons > exons) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EXON, char*);
if(_introns > introns) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_INTRON, char*);
if(_unknown > unknown) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_UNKNOWN, char*);
if(_not_annotated > not_annotated) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_NA, char*);
if(_empty_block > empty_blocks) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EMPTY_BLOCK, char*);
exons = _exons;
introns = _introns;
unknown = _unknown;
not_annotated = _not_annotated;
empty_blocks = _empty_block;
}
// unify the gene counts based on the number of blocks.
// the gene_counts are reduced to either the ones that are found in
// all blocks or they are kept as they are
if(gt_shash_get_num_elements(local_gene_counts_2) > 1){
gt_shash* merged_counts = gt_shash_new();
uint64_t blocks2 = gt_map_get_num_blocks(map2);
// search for the best junction hit
uint64_t hits_junctions = 0;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_2, gene_id, count, uint64_t){
uint64_t m = gt_gtf_get_count_(local_junction_counts_2,gene_id);
if(*count == blocks2 && m > 0){
if(m > hits_junctions) hits_junctions = m;
}
}GT_SHASH_END_ITERATE;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_2, gene_id, count, uint64_t){
if(*count == blocks2 && (hits_junctions == 0 || gt_gtf_get_count_(local_junction_counts_2,gene_id) == hits_junctions)){
gt_gtf_count_sum_(merged_counts, gene_id, blocks2);
}
}GT_SHASH_END_ITERATE;
// if we found some unique ids that are covered by both
// we flip over to the merged counts
gt_shash_delete(local_gene_counts_2, true);
local_gene_counts_2 = merged_counts;
if(gt_shash_get_num_elements(merged_counts) > 0){
uint64_t blocks1 = gt_map_get_num_blocks(map1);
// we flipped so we reset the exon gene hit counts to ones as well
for(i=blocks1;i<(blocks1+blocks2);i++){
if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1;
}
}
}
}
/**
* Merge everything into a single merged map
*/
gt_shash* merged_counts = gt_shash_new();
uint64_t blocks1 = gt_map_get_num_blocks(map1);
uint64_t blocks2 = 0;
if(map2 != NULL){
blocks2 = gt_map_get_num_blocks(map2);
}
float overlap = (block_1_overlap + block_2_overlap) / (float) (map2==NULL?1.0:2.0);
uint64_t map2_hits = map2 != NULL ? gt_shash_get_num_elements(local_gene_counts_2) : 0;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){
if( (gt_shash_is_contained(local_gene_counts_2, gene_id) || map2_hits == 0) && (params == NULL || params->exon_overlap <= 0.0 || overlap >= params->exon_overlap)){
uint64_t nv =*count + gt_gtf_get_count_(local_gene_counts_2, gene_id);
gt_gtf_count_sum_(merged_counts, gene_id, nv);
if(overlap > 1.000001){
gt_fatal_error_msg("Exon Overlap %.10f > 1.0 from %.10f %.10f!", overlap, block_1_overlap, block_2_overlap);
}
}
}GT_SHASH_END_ITERATE;
uint64_t unique_genes_between_pairs = gt_shash_get_num_elements(merged_counts);
// we found unique genes through the pair, so we can use
// the merged map to do the final counts
if(unique_genes_between_pairs > 0){
// we flip the exon gene hit counts in case
if(unique_genes_between_pairs == 1){
for(i=0;i<blocks;i++){
if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1;
}
}
// merge the gene counts weighted to a single map
GT_SHASH_BEGIN_KEY_ITERATE(merged_counts, gene_id){
double v = 0.0;
if(gt_shash_is_contained(local_exon_counts_1, gene_id) || ((params == NULL || params->exon_overlap <= 0.0) && gt_shash_is_contained(local_gene_counts_1, gene_id))){
v+= 1.0;
}
if(gt_shash_is_contained(local_exon_counts_2, gene_id) || ((params == NULL || params->exon_overlap <= 0.0 )&& gt_shash_is_contained(local_gene_counts_2, gene_id))){
v+=1.0;
}
if(v > 0.0) gt_gtf_count_weight_(local_gene_counts, gene_id, v);
}GT_SHASH_END_ITERATE;
}
// get the number of hits of this map
uint64_t num_gene_hits = gt_shash_get_num_elements(local_gene_counts);
if(pattern_counts != NULL){
// now iterate the blocks and construct final pattern
for(i=0; i<blocks; i++){
char* p = *(gt_vector_get_elm(local_type_patterns, i, char*));
if(strcmp(p, GT_GTF_TYPE_EMPTY_BLOCK) == 0) continue;
// for exons check that in case we have a single gene hit, its exons, in case of a multi-gene hit, append _mg if
// the multi gene hit comes from the current block
gt_gtf_join_(pattern, p, (strcmp("exon",p) == 0) ? ((num_gene_hits == 1) ? false : (local_exon_gene_hits[i] > 1)) : false, 1);
// add paired end spacer
if(map2 != NULL && i == (blocks1-1)){
gt_string_append_char(pattern, '|');
}else{
if(i<blocks-1){
gt_string_append_char(pattern, '^');
}
}
}
gt_string_append_eos(pattern);
// count global type based on the constructed pattern
gt_gtf_count_(pattern_counts, gt_string_get_string(pattern));
}
if(params != NULL && params->num_maps == 1){
// count junctions for single mapping reads
if(blocks1 > 1){
params->num_junctions += blocks1 - 1;
params->num_annotated_junctions += gt_gtf_count_junction(gtf, map1);
}
if(blocks2 > 1){
params->num_junctions += blocks2 - 1;
params->num_annotated_junctions += gt_gtf_count_junction(gtf, map2);
}
}
if(gene_counts != NULL){
// count the gene ids
GT_SHASH_BEGIN_ITERATE(local_gene_counts, key, e, double){
if(gt_shash_is_contained(gene_counts, key)){
double current = gt_gtf_get_count_weight(gene_counts, key);
if(current < *e){
// set to max count
gt_gtf_count_weight_(gene_counts, key, (*e)-current);
}
}else{
gt_gtf_count_weight_(gene_counts, key, *e);
}
}GT_SHASH_END_ITERATE;
}
if(params->single_transcript_coverage != NULL){
// do coverage counts for merged genes
GT_SHASH_BEGIN_KEY_ITERATE(local_gene_counts, key){
// count map1
GT_MAP_ITERATE(map1, map_block){
gt_gtf_count_coverage_(gtf, map_block, key, params);
}
if(map2 != NULL){
GT_MAP_ITERATE(map2, map_block){
gt_gtf_count_coverage_(gtf, map_block, key, params);
}
}
}GT_SHASH_END_ITERATE;
}
// cleanup
gt_vector_delete(local_type_patterns);
gt_shash_delete(local_gene_counts, true);
// cleanup
gt_shash_delete(local_gene_counts_1, true);
gt_shash_delete(local_gene_counts_2, true);
gt_shash_delete(local_exon_counts_1, true);
gt_shash_delete(local_exon_counts_2, true);
gt_shash_delete(local_junction_counts_1, true);
gt_shash_delete(local_junction_counts_2, true);
gt_shash_delete(local_type_counts, true);
gt_shash_delete(merged_counts, true);
free(local_exon_gene_hits);
return gt_shash_get_num_elements(gene_counts);
}
GT_INLINE uint64_t gt_gtf_count_alignment(gt_gtf* const gtf, gt_alignment* const alignment, gt_shash* const pattern_count, gt_shash* const gene_counts, gt_gtf_count_parms* params){
uint64_t hits = 0;
gt_string* pattern = gt_string_new(16);
params->num_maps = gt_alignment_get_num_maps(alignment);
GT_ALIGNMENT_ITERATE(alignment,map) {
hits = gt_gtf_count_map(gtf, map, NULL, pattern_count, gene_counts, pattern, params);
gt_string_clear(pattern);
}
gt_string_delete(pattern);
return hits;
}
GT_INLINE uint64_t gt_gtf_count_template(gt_gtf* const gtf, gt_template* const template, gt_shash* const pattern_count, gt_shash* const gene_counts, gt_gtf_count_parms* params){
uint64_t hits = 0;
gt_string* pattern = gt_string_new(16);
params->num_maps = gt_template_get_num_mmaps(template);
GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,mmap,mmap_attr) {
hits = gt_gtf_count_map(gtf, mmap[0], mmap[1], pattern_count, gene_counts, pattern, params);
gt_string_clear(pattern);
}
gt_string_delete(pattern);
return hits;
}
GT_INLINE void gt_gtf_search_map(const gt_gtf* const gtf, gt_vector* const hits, gt_map* const map, const bool clean_target){
GT_MAP_ITERATE(map, block){
uint64_t start = gt_map_get_begin_mapping_position(map);
uint64_t end = gt_map_get_end_mapping_position(map);
gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, clean_target);
}
}
GT_INLINE void gt_gtf_search_alignment(const gt_gtf* const gtf, gt_vector* const hits, gt_alignment* const alignment){
GT_ALIGNMENT_ITERATE(alignment, map){
gt_gtf_search_map(gtf, hits, map, true);
}
}
GT_INLINE void gt_gtf_search_template(const gt_gtf* const gtf, gt_vector* const hits, gt_template* const template){
GT_TEMPLATE_IF_REDUCES_TO_ALINGMENT(template, alignment){
gt_gtf_search_alignment(gtf,hits, alignment);
}GT_TEMPLATE_END_REDUCTION__RETURN;
gt_gtf_search_alignment(gtf,hits, gt_template_get_block(template, 0));
gt_gtf_search_alignment(gtf,hits, gt_template_get_block(template, 1));
}
|
GB_binop__rminus_uint16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint16)
// A*D function (colscale): GB (_AxD__rminus_uint16)
// D*A function (rowscale): GB (_DxB__rminus_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint16)
// C=scalar+B GB (_bind1st__rminus_uint16)
// C=scalar+B' GB (_bind1st_tran__rminus_uint16)
// C=A+scalar GB (_bind2nd__rminus_uint16)
// C=A'+scalar GB (_bind2nd_tran__rminus_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_UINT16 || GxB_NO_RMINUS_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mpifft.c
|
/* -*- mode: C; tab-width: 2; indent-tabs-mode: nil; fill-column: 79; coding: iso-latin-1-unix -*- */
/* mpifft.c
*/
#include <hpcc.h>
#include "hpccfft.h"
#include "wrapmpifftw.h"
double *HPCC_fft_timings_forward, *HPCC_fft_timings_backward;
static void
MPIFFT0(HPCC_Params *params, int doIO, FILE *outFile, MPI_Comm comm, int locN,
double *UGflops, s64Int_t *Un, double *UmaxErr, int *Ufailure) {
int commRank, commSize, failure, flags;
s64Int_t i, n;
s64Int_t locn, loc0, alocn, aloc0, tls;
double maxErr, tmp1, tmp2, tmp3, t0, t1, t2, t3, Gflops;
double deps;
fftw_complex *inout, *work;
fftw_mpi_plan p;
hpcc_fftw_mpi_plan ip;
int sAbort, rAbort;
#ifdef USING_FFTW
int ilocn, iloc0, ialocn, ialoc0, itls;
#endif
failure = 1;
Gflops = -1.0;
deps = HPL_dlamch( HPL_MACH_EPS );
maxErr = 1.0 / deps;
MPI_Comm_size( comm, &commSize );
MPI_Comm_rank( comm, &commRank );
n = locN;
/* number of processes have been factored out - need to put it back in */
n *= commSize;
n *= commSize; /* global vector size */
#ifdef USING_FFTW
/* FFTW ver. 2 only supports vector sizes that fit in 'int' */
if (n > (1<<30)-1+(1<<30)) {
#ifdef HPCC_FFTW_CHECK32
goto no_plan;
#else
if (doIO) {
fprintf( outFile, "Warning: problem size too large: %ld*%d*%d\n", (long)(n / commSize / commSize), commSize, commSize );
}
#endif
}
#endif
#ifdef HPCC_FFTW_ESTIMATE
flags = FFTW_ESTIMATE;
#else
flags = FFTW_MEASURE;
#endif
t1 = -MPI_Wtime();
p = fftw_mpi_create_plan( comm, n, FFTW_FORWARD, flags );
t1 += MPI_Wtime();
if (! p) goto no_plan;
#ifdef USING_FFTW
fftw_mpi_local_sizes( p, &ilocn, &iloc0, &ialocn, &ialoc0, &itls );
locn = ilocn;
loc0 = iloc0;
alocn = ialocn;
aloc0 = ialoc0;
tls = itls;
#else
fftw_mpi_local_sizes( p, &locn, &loc0, &alocn, &aloc0, &tls );
#endif
inout = (fftw_complex *)HPCC_fftw_malloc( tls * (sizeof *inout) );
work = (fftw_complex *)HPCC_fftw_malloc( tls * (sizeof *work) );
sAbort = 0;
if (! inout || ! work) sAbort = 1;
MPI_Allreduce( &sAbort, &rAbort, 1, MPI_INT, MPI_SUM, comm );
if (rAbort > 0) {
fftw_mpi_destroy_plan( p );
goto comp_end;
}
/* Make sure that `inout' and `work' are initialized in parallel if using
Open MP: this will ensure better placement of pages if first-touch policy
is used by a distrubuted shared memory machine. */
#ifdef _OPENMP
#pragma omp parallel for
for (i = 0; i < tls; ++i) {
c_re( inout[i] ) = c_re( work[i] ) = 0.0;
c_re( inout[i] ) = c_im( work[i] ) = 0.0;
}
#endif
t0 = -MPI_Wtime();
HPCC_bcnrand( 2 * tls, 53 * commRank * 2 * tls, inout );
t0 += MPI_Wtime();
t2 = -MPI_Wtime();
fftw_mpi( p, 1, inout, work );
t2 += MPI_Wtime();
fftw_mpi_destroy_plan( p );
ip = HPCC_fftw_mpi_create_plan( comm, n, FFTW_BACKWARD, FFTW_ESTIMATE );
if (ip) {
t3 = -MPI_Wtime();
HPCC_fftw_mpi( ip, 1, inout, work );
t3 += MPI_Wtime();
HPCC_fftw_mpi_destroy_plan( ip );
}
HPCC_bcnrand( 2 * tls, 53 * commRank * 2 * tls, work ); /* regenerate data */
maxErr = 0.0;
for (i = 0; i < locn; ++i) {
tmp1 = c_re( inout[i] ) - c_re( work[i] );
tmp2 = c_im( inout[i] ) - c_im( work[i] );
tmp3 = sqrt( tmp1*tmp1 + tmp2*tmp2 );
maxErr = maxErr >= tmp3 ? maxErr : tmp3;
}
MPI_Allreduce( &maxErr, UmaxErr, 1, MPI_DOUBLE, MPI_MAX, comm );
maxErr = *UmaxErr;
if (maxErr / log(n) / deps < params->test.thrsh) failure = 0;
if (t2 > 0.0) Gflops = 1e-9 * (5.0 * n * log(n) / log(2.0)) / t2;
if (doIO) {
fprintf( outFile, "Number of nodes: %d\n", commSize );
fprintf( outFile, "Vector size: %20.0f\n", tmp1 = (double)n );
fprintf( outFile, "Generation time: %9.3f\n", t0 );
fprintf( outFile, "Tuning: %9.3f\n", t1 );
fprintf( outFile, "Computing: %9.3f\n", t2 );
fprintf( outFile, "Inverse FFT: %9.3f\n", t3 );
fprintf( outFile, "max(|x-x0|): %9.3e\n", maxErr );
fprintf( outFile, "Gflop/s: %9.3f\n", Gflops );
}
comp_end:
if (work) HPCC_fftw_free( work );
if (inout) HPCC_fftw_free( inout );
no_plan:
*UGflops = Gflops;
*Un = n;
*UmaxErr = maxErr;
*Ufailure = failure;
}
int
HPCC_MPIFFT(HPCC_Params *params) {
int commRank, commSize;
int locN, procCnt, isComputing, doIO, failure = 0;
s64Int_t n;
double Gflops = -1.0, maxErr = -1.0;
MPI_Comm comm;
FILE *outFile;
MPI_Comm_size( MPI_COMM_WORLD, &commSize );
MPI_Comm_rank( MPI_COMM_WORLD, &commRank );
doIO = commRank == 0 ? 1 : 0;
if (doIO) {
outFile = fopen( params->outFname, "a" );
if (! outFile) outFile = stderr;
}
/*
There are two vectors of size 'n'/'commSize': inout, work,
and internal work: 2*'n'/'commSize'; it's 4 vectors then.
FFTE requires that the global vector size 'n' has to be at least
as big as square of number of processes. The square is calculated
in each factor independently. In other words, 'n' has to have
at least twice as many 2 factors as the process count, twice as many
3 factors and twice as many 5 factors.
*/
#ifdef HPCC_FFT_235
locN = 0; procCnt = commSize + 1;
do {
int f[3];
procCnt--;
for ( ; procCnt > 1 && HPCC_factor235( procCnt, f ); procCnt--)
; /* EMPTY */
/* Make sure the local vector size is greater than 0 */
locN = HPCC_LocalVectorSize( params, 4*procCnt, sizeof(fftw_complex), 0 );
for ( ; locN >= 1 && HPCC_factor235( locN, f ); locN--)
; /* EMPTY */
} while (locN < 1);
#else
/* Find power of two that is smaller or equal to number of processes */
for (procCnt = 1; procCnt <= (commSize >> 1); procCnt <<= 1)
; /* EMPTY */
/* Make sure the local vector size is greater than 0 */
while (1) {
locN = HPCC_LocalVectorSize( params, 4*procCnt, sizeof(fftw_complex), 1 );
if (locN) break;
procCnt >>= 1;
}
#endif
isComputing = commRank < procCnt ? 1 : 0;
HPCC_fft_timings_forward = params->MPIFFTtimingsForward;
HPCC_fft_timings_backward = params->MPIFFTtimingsBackward;
if (commSize == procCnt)
comm = MPI_COMM_WORLD;
else
MPI_Comm_split( MPI_COMM_WORLD, isComputing ? 0 : MPI_UNDEFINED, commRank, &comm );
if (isComputing)
MPIFFT0( params, doIO, outFile, comm, locN, &Gflops, &n, &maxErr, &failure );
if (commSize != procCnt && isComputing && comm != MPI_COMM_NULL)
MPI_Comm_free( &comm );
params->MPIFFT_N = n;
params->MPIFFT_Procs = procCnt;
params->MPIFFT_maxErr = maxErr;
MPI_Bcast( &Gflops, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
params->MPIFFTGflops = Gflops;
params->FFTEnblk = FFTE_NBLK;
params->FFTEnp = FFTE_NP;
params->FFTEl2size = FFTE_L2SIZE;
if (failure)
params->Failure = 1;
if (doIO) if (outFile != stderr) fclose( outFile );
return 0;
}
|
optimized_cluster_tree.h
|
#pragma once
#include "bct_kernel_type.h"
#include "optimized_bct_types.h"
namespace rsurfaces
{
struct BVHSettings
{
mint split_threshold = 8;
// bool use_old_prepost = false;
// TreePercolationAlgorithm tree_perc_alg = TreePercolationAlgorithm::Chunks;
// TreePercolationAlgorithm tree_perc_alg = TreePercolationAlgorithm::Tasks;
TreePercolationAlgorithm tree_perc_alg = TreePercolationAlgorithm::Sequential;
};
// a global instance to store default settings
extern BVHSettings BVHDefaultSettings;
struct Cluster2 // slim POD container to hold only the data relevant for the construction phase in the tree, before it is serialized
{
public:
Cluster2(){};
~Cluster2(){
// delete left;
// delete right;
};
Cluster2(mint begin_, mint end_, mint depth_);
mint begin = 0; // position of first triangle in cluster relative to array ordering
mint end = 0; // position behind last triangle in cluster relative to array ordering
mint depth = 0; // depth within the tree -- not absolutely necessary but nice to have for plotting images
mint max_depth = 0; // used to compute the maximal depth in the tree
mint descendant_count = 0;
mint descendant_leaf_count = 0;
Cluster2 *left = nullptr;
Cluster2 *right = nullptr;
}; //Cluster2
class OptimizedClusterTree // binary cluster tree; layout mostly in Struct of Array fashion in order to prepare SIMDization. Note SIMDized, yet, though.
{
public:
OptimizedClusterTree(){};
// Solving interface problems by using standard types
// This way, things are easier to port. For example, I can call this from Mathematica for faster debugging.
OptimizedClusterTree(
const mreal * restrict const P_coords_, // coordinates per primitive used for clustering; assumed to be of size primitive_count x dim
const mint primitive_count_,
const mint dim_,
const mreal * restrict const P_hull_coords_, // points that define the convex hulls of primitives; assumed to be array of size primitive_count x hull_count x dim
const mint hull_count_,
const mreal * restrict const P_near_, // data used actual interaction computation; assumed to be of size primitive_count x near_dim. For a triangle mesh in 3D, we want to feed each triangles i), area ii) barycenter and iii) normal as a 1 + 3 + 3 = 7 vector
const mint near_dim_,
const mreal * restrict const P_far_, // data used actual interaction computation; assumed to be of size primitive_count x far_dim. For a triangle mesh in 3D, we want to feed each triangles i), area ii) barycenter and iii) orthoprojector onto normal space as a 1 + 3 + 6 = 10 vector
const mint far_dim_,
// const mreal * const restrict P_moments_, // Interface to deal with higher order multipole expansion. Not used, yet.
// const mint moment_count_,
const mint * restrict const ordering_, // A suggested preordering of primitives; this gets applied before the clustering begins in the hope that this may improve the sorting within a cluster --- at least in the top level(s). This could, e.g., be the ordering obtained by a tree for similar data set.
MKLSparseMatrix &DiffOp,
MKLSparseMatrix &AvOp,
BVHSettings settings_ = BVHDefaultSettings
);
mint dim = 3;
mint near_dim = 7; // = 1 + 3 + 3 for weight, center, normal, stored consecutively
mint far_dim = 10; // = 1 + 3 + 3 * (3 + 1)/2 for weight, center, projector, stored consecutively
mint hull_count = 3;
mint tree_thread_count = 1;
mint thread_count = 1;
mint primitive_count = 0;
mint cluster_count = 0;
mint leaf_cluster_count = 0;
mint max_buffer_dim = 0;
mint buffer_dim = 0;
// mint moment_count = 22;
BVHSettings settings;
mint *restrict P_ext_pos = nullptr; // Reordering of primitives; crucial for communication with outside world
mint *restrict inverse_ordering = nullptr; // Inverse ordering of the above; crucial for communication with outside world
// A_Vector<mint> P_leaf; // Index of the leaf cluster to which the primitive belongs
// "C_" stands for "cluster", "P_" stands for "primitive"
mint *restrict C_begin = nullptr;
mint *restrict C_end = nullptr;
mint *restrict C_depth = nullptr;
mint *restrict C_next = nullptr;
mint *restrict C_left = nullptr; // list of index of left children; entry is -1 if no child is present
mint *restrict C_right = nullptr; // list of index of right children; entry is -1 if no child is present
bool *restrict C_is_chunk_root = nullptr;
// Primitive double data, stored in Structure of Arrays fashion
A_Vector<mreal *> P_near; //weight, center, normal, stored consecutively; assumed to be matrix of size near_dim x primitive_count!
A_Vector<mreal *> P_far; //weight, center, projector, stored consecutively; assumed to be matrix of size far_dim x primitive_count!
A_Vector<mreal *> P_coords; //clustering coordinates, stored as dim x primitive_count matrix
A_Vector<mreal *> P_min; //lower bounding box point, stored as dim x primitive_count matrix
A_Vector<mreal *> P_max; //upper bounding box point, stored as dim x n matrix
// A_Vector<mreal * restrict> P_moments;
mreal *restrict P_in = nullptr;
mreal *restrict P_out = nullptr;
// mreal * restrict P_moment_buffer = nullptr;
// Cluster double data, stored in Structure of Arrays fashion
A_Vector<mreal *> C_far; //weight, center, normal, stored consecutively; assumed to be matrix of size data_dim x n
A_Vector<mreal *> C_coords; //clustering coordinate
A_Vector<mreal *> C_min;
A_Vector<mreal *> C_max;
// A_Vector<mreal * restrict> C_moments;
mreal *restrict C_in = nullptr;
mreal *restrict C_out = nullptr;
// mreal * restrict C_moment_buffer = nullptr;
mreal *restrict C_squared_radius = nullptr;
mint *restrict leaf_clusters = nullptr;
mint *restrict leaf_cluster_lookup = nullptr;
mint *restrict leaf_cluster_ptr = nullptr; // point to __end__ of each leaf cluster
A_Vector<A_Vector<mreal>> P_D_near;
A_Vector<A_Vector<mreal>> P_D_far;
A_Vector<A_Vector<mreal>> C_D_far;
// mint scratch_size = 12;
// A_Vector<A_Vector<mreal>> scratch;
MKLSparseMatrix hi_pre;
MKLSparseMatrix hi_post;
MKLSparseMatrix lo_pre;
MKLSparseMatrix lo_post;
MKLSparseMatrix P_to_C;
MKLSparseMatrix C_to_P;
A_Vector<A_Vector<mint>> chunk_roots;
mint tree_max_depth = 0;
bool chunks_prepared = false;
~OptimizedClusterTree()
{;
ptic("~OptimizedClusterTree");
// pointer arrays come at the cost of manual deallocation...
#pragma omp parallel
{
#pragma omp single
{
// #pragma omp task
// {
// for( mint k = 0; k < moment_count; ++ k )
// {
// safe_free(P_moments[k]);
// }
// }
//
// #pragma omp task
// {
// for( mint k = 0; k < moment_count; ++ k )
// {
// safe_free(C_moments[k]);
// }
// }
#pragma omp task
{
for (mint k = 0; k < static_cast<mint>(P_coords.size()); ++k)
{
safe_free(P_coords[k]);
}
}
#pragma omp task
{
for (mint k = 0; k < static_cast<mint>(C_coords.size()); ++k)
{
safe_free(C_coords[k]);
}
}
#pragma omp task
{
for (mint k = 0; k < static_cast<mint>(P_near.size()); ++k)
{
safe_free(P_near[k]);
}
}
#pragma omp task
{
for (mint k = 0; k < static_cast<mint>(C_far.size()); ++k)
{
safe_free(C_far[k]);
}
}
#pragma omp task
{
for (mint k = 0; k < static_cast<mint>(P_min.size()); ++k)
{
safe_free(P_min[k]);
}
}
#pragma omp task
{
for (mint k = 0; k < static_cast<mint>(P_max.size()); ++k)
{
safe_free(P_max[k]);
}
}
#pragma omp task
{
for (mint k = 0; k < static_cast<mint>(C_min.size()); ++k)
{
safe_free(C_min[k]);
}
}
#pragma omp task
{
for (mint k = 0; k < static_cast<mint>(C_max.size()); ++k)
{
safe_free(C_max[k]);
}
}
#pragma omp task
{
safe_free(P_in);
}
#pragma omp task
{
safe_free(P_out);
}
#pragma omp task
{
safe_free(C_in);
}
#pragma omp task
{
safe_free(C_out);
}
#pragma omp task
{
safe_free(C_squared_radius);
}
#pragma omp task
{
safe_free(leaf_clusters);
}
#pragma omp task
{
safe_free(leaf_cluster_lookup);
}
#pragma omp task
{
safe_free(leaf_cluster_ptr);
}
#pragma omp task
{
safe_free(inverse_ordering);
}
#pragma omp task
{
safe_free(P_ext_pos);
}
#pragma omp task
{
safe_free(C_begin);
}
#pragma omp task
{
safe_free(C_end);
}
#pragma omp task
{
safe_free(C_depth);
}
#pragma omp task
{
safe_free(C_next);
}
#pragma omp task
{
safe_free(C_left);
}
#pragma omp task
{
safe_free(C_right);
}
#pragma omp task
{
safe_free(C_is_chunk_root);
}
}
}
ptoc("~OptimizedClusterTree");
};
void SplitCluster(Cluster2 * const C, const mint free_thread_count);
void Serialize(Cluster2 * const C, const mint ID, const mint leaf_before_count, const mint free_thread_count);
void ComputePrimitiveData(
const mreal * restrict const P_hull_coords_,
const mreal * restrict const P_near_,
const mreal * restrict const P_far_
// , const mreal * const restrict P_moments_
); // copy, reordering and computing bounding boxes
void ComputeClusterData();
void RequireBuffers(const mint cols);
void ComputePrePost(MKLSparseMatrix &DiffOp, MKLSparseMatrix &AvOp);
void CleanseBuffers();
void CleanseD();
void Pre(Eigen::MatrixXd &input, BCTKernelType type);
void Pre(mreal *input, const mint cols, BCTKernelType type);
void Post(Eigen::MatrixXd &output, BCTKernelType type, bool addToResult = false);
void Post(mreal *output, const mint cols, BCTKernelType type, bool addToResult = false);
void PercolateUp();
void PercolateDown();
void RequireChunks();
// some prototype
void PercolateUp_Chunks();
void percolateUp_Tip( const mint C);
// some prototype
void PercolateDown_Chunks();
void percolateDown_Tip( const mint C);
// TODO: Not nearly as fast as I'd like it to be; not scalable!
// recusive algorithm parallelized by OpenMP tasks
void PercolateUp_Tasks(const mint C, const mint free_thread_count);
// TODO: Not nearly as fast as I'd like it to be; not scalable!
// recusive algorithm parallelized by OpenMP tasks
void PercolateDown_Tasks(const mint C, const mint free_thread_count);
// TODO: use a stack for recursion instead of the program stack?
// sequential, recursive algorithm
void PercolateUp_Seq(const mint C);
// TODO: use a stack for recursion instead of the program stack?
// sequential, recursive algorithm
void PercolateDown_Seq(const mint C);
void CollectDerivatives( mreal * restrict const P_D_near_output ); // collect only near field data
void CollectDerivatives( mreal * restrict const P_D_near_output, mreal * restrict const P_D_far_output );
// Updates only the computational data (primitive/cluster areas, centers of mass and normals).
// All data related to clustering or multipole acceptance criteria remain are unchanged, as well
// as the preprocessor and postprocessor matrices (that are needed for matrix-vector multiplies of the BCT.)
void SemiStaticUpdate( const mreal * restrict const P_near_, const mreal * restrict const P_far_ );
void PrintToFile(std::string filename = "./OptimizedClusterTree.tsv");
private:
void computeClusterData(const mint C, const mint free_thread_count); // helper function for ComputeClusterData
bool requireChunks( mint C, mint last, mint thread);
}; //OptimizedClusterTree
} // namespace rsurfaces
|
GB_unaryop__identity_uint16_int64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_int64
// op(A') function: GB_tran__identity_uint16_int64
// C type: uint16_t
// A type: int64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_int64
(
uint16_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_int64_fp32.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int64_fp32
// op(A') function: GB_tran__identity_int64_fp32
// C type: int64_t
// A type: float
// cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int64_t z ; GB_CAST_SIGNED(z,aij,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int64_fp32
(
int64_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.c
|
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
axpbyMany.c
|
/*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(axpbyMany)(const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat & alpha, const dfloat * __restrict__ cpu_a,
const dfloat & beta, dfloat * __restrict__ cpu_b){
#ifdef __NEKRS__OMP__
#pragma omp parallel for collapse(2)
#endif
for(int fld=0;fld<Nfields;fld++) {
for(dlong i=0;i<N;++i){
const dlong id = i + fld*offset;
const dfloat ai = cpu_a[id];
const dfloat bi = cpu_b[id];
cpu_b[id] = alpha*ai + beta*bi;
}
}
}
|
bml_allocate_ellpack_typed.c
|
#include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_types.h"
#include "bml_allocate_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Clear a matrix.
*
* Numbers of non-zeroes, indeces, and values are set to zero.
*
* \ingroup allocate_group
*
* \param A The matrix.
*/
void TYPED_FUNC(
bml_clear_ellpack) (
bml_matrix_ellpack_t * A)
{
memset(A->nnz, 0, A->N * sizeof(int));
memset(A->index, 0, A->N * A->M * sizeof(int));
memset(A->value, 0.0, A->N * A->M * sizeof(REAL_T));
}
/** Allocate a matrix with uninitialized values.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param matrix_dimension The matrix size.
* \param distrib_mode The distribution mode.
* \return The matrix.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_noinit_matrix_ellpack) (
bml_matrix_dimension_t matrix_dimension,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A =
bml_noinit_allocate_memory(sizeof(bml_matrix_ellpack_t));
A->matrix_type = ellpack;
A->matrix_precision = MATRIX_PRECISION;
A->N = matrix_dimension.N_rows;
A->M = matrix_dimension.N_nz_max;
A->distribution_mode = distrib_mode;
A->index = bml_noinit_allocate_memory(sizeof(int) * A->N * A->M);
A->nnz = bml_allocate_memory(sizeof(int) * A->N);
A->value = bml_noinit_allocate_memory(sizeof(REAL_T) * A->N * A->M);
A->domain = bml_default_domain(A->N, A->M, distrib_mode);
A->domain2 = bml_default_domain(A->N, A->M, distrib_mode);
return A;
}
/** Allocate the zero matrix.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param N The matrix size.
* \param M The number of non-zeroes per row.
* \param distrib_mode The distribution mode.
* \return The matrix.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_zero_matrix_ellpack) (
int N,
int M,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A =
bml_allocate_memory(sizeof(bml_matrix_ellpack_t));
A->matrix_type = ellpack;
A->matrix_precision = MATRIX_PRECISION;
A->N = N;
A->M = M;
A->distribution_mode = distrib_mode;
A->index = bml_allocate_memory(sizeof(int) * N * M);
A->nnz = bml_allocate_memory(sizeof(int) * N);
A->value = bml_allocate_memory(sizeof(REAL_T) * N * M);
A->domain = bml_default_domain(N, M, distrib_mode);
A->domain2 = bml_default_domain(N, M, distrib_mode);
return A;
}
/** Allocate a banded random matrix.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param N The matrix size.
* \param M The number of non-zeroes per row.
* \param distrib_mode The distribution mode.
* \return The matrix.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_banded_matrix_ellpack) (
int N,
int M,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode);
REAL_T *A_value = A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
#pragma omp parallel for shared(A_value, A_index, A_nnz)
for (int i = 0; i < N; i++)
{
int jind = 0;
for (int j = (i - M / 2 >= 0 ? i - M / 2 : 0);
j < (i - M / 2 + M <= N ? i - M / 2 + M : N); j++)
{
A_value[ROWMAJOR(i, jind, N, M)] = rand() / (REAL_T) RAND_MAX;
A_index[ROWMAJOR(i, jind, N, M)] = j;
jind++;
}
A_nnz[i] = jind;
}
return A;
}
/** Allocate a random matrix.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param N The matrix size.
* \param M The number of non-zeroes per row.
* \param distrib_mode The distribution mode.
* \return The matrix.
*
* Note: Do not use OpenMP when setting values for a random matrix,
* this makes the operation non-repeatable.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_random_matrix_ellpack) (
int N,
int M,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode);
REAL_T *A_value = A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
for (int i = 0; i < N; i++)
{
int jind = 0;
for (int j = 0; j < M; j++)
{
A_value[ROWMAJOR(i, jind, N, M)] = rand() / (REAL_T) RAND_MAX;
A_index[ROWMAJOR(i, jind, N, M)] = j;
jind++;
}
A_nnz[i] = jind;
}
return A;
}
/** Allocate the identity matrix.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param N The matrix size.
* \param M The number of non-zeroes per row.
* \param distrib_mode The distribution mode.
* \return The matrix.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_identity_matrix_ellpack) (
int N,
int M,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode);
REAL_T *A_value = A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
#pragma omp parallel for shared(A_value, A_index, A_nnz)
for (int i = 0; i < N; i++)
{
A_value[ROWMAJOR(i, 0, N, M)] = (REAL_T) 1.0;
A_index[ROWMAJOR(i, 0, N, M)] = i;
A_nnz[i] = 1;
}
return A;
}
|
broadcast_reduce-inl.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file broadcast_reduce-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#include <mxnet/operator_util.h>
#include <algorithm>
#include <vector>
#include <string>
#include <utility>
#include "../mshadow_op.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
template <int ndim, typename OP>
struct binary_broadcast_kernel {
/*! \brief Map function for binary_broadcast_kernel */
template <typename IType, typename DType>
MSHADOW_XINLINE static void Map(index_t base,
index_t length,
OpReqType req,
const Shape<ndim>& lstride,
const Shape<ndim>& rstride,
const Shape<ndim>& oshape,
IType* lhs,
IType* rhs,
DType* out) {
Shape<ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx]));
}
}
/*! \brief Map function for binary_broadcast_kernel */
template <typename LType, typename RType, typename OType>
MSHADOW_XINLINE static void Map(index_t base,
index_t length,
OpReqType req,
const Shape<ndim>& lstride,
const Shape<ndim>& rstride,
const Shape<ndim>& oshape,
LType* lhs,
RType* rhs,
OType* out) {
Shape<ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx]));
}
}
/*! \brief Map function for binary_broadcast_kernel */
template <typename IType, typename DType>
MSHADOW_XINLINE static void Map(index_t base,
index_t length,
OpReqType req,
const Shape<ndim>& lstride,
const Shape<ndim>& rstride,
const Shape<ndim>& oshape,
IType lhs,
IType* rhs,
DType* out) {
Shape<ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx]));
}
}
/*! \brief Map function for binary_broadcast_kernel */
/* used for mixed type binary ops */
template <typename IType,
typename DType,
typename std::enable_if<!std::is_same<IType, DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t base,
index_t length,
OpReqType req,
const Shape<ndim>& lstride,
const Shape<ndim>& rstride,
const Shape<ndim>& oshape,
IType* lhs,
DType* rhs,
DType* out) {
Shape<ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx]));
}
}
/*! \brief Map function for binary_broadcast_kernel */
/* used for mixed type binary ops */
template <
typename IType,
typename DType,
typename std::enable_if<!std::is_same<IType, DType>::value && !std::is_pointer<IType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t base,
index_t length,
OpReqType req,
const Shape<ndim>& lstride,
const Shape<ndim>& rstride,
const Shape<ndim>& oshape,
IType lhs,
DType* rhs,
DType* out) {
Shape<ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx]));
}
}
};
template <int req, typename OP, bool col_vec>
struct csr_dns_csr_broadcast_kernel {
/*!
* \brief Map function for broadcast between csr and 1D vector
* \param row global thread id/assigned row id
* \param csr_data ptr to data buffer of csr matrix
* \param csr_indices ptr to indices buffer of csr matrix
* \param csr_indptr ptr to indptr buffer of csr matrix
* \param dns ptr to data buffer of the dense vector
* \param out ptr to the data buffer of the result csr matrix
*/
template <typename DType, typename CType, typename RType>
MSHADOW_XINLINE static void Map(index_t row,
const DType* csr_data,
const CType* csr_indices,
const RType* csr_indptr,
const DType* dns,
DType* out) {
const nnvm::dim_t curr_row_i = csr_indptr[row];
const nnvm::dim_t next_row_i = csr_indptr[row + 1];
for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) {
KERNEL_ASSIGN(
out[iter], req, OP::Map(csr_data[iter], (col_vec) ? dns[row] : dns[csr_indices[iter]]));
}
}
/*!
* \brief Map function for broadcast between csr and a scalar
* \param i global thread id
* \param csr_data ptr to data buffer of csr matrix
* \param scalar_ptr ptr to data buffer of the scalar tensor, only the 0-th element is used
* \param out ptr to the data buffer of output csr matrix
* \param nnz number of non-zero elements in input csr matrix
*/
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
const DType* csr_data,
const DType* scalar_ptr,
DType* out,
const nnvm::dim_t nnz) {
const DType scale = scalar_ptr[0];
if (i < nnz) {
KERNEL_ASSIGN(out[i], req, OP::Map(csr_data[i], scale));
}
}
};
template <int req, typename OP, bool reverse = false>
struct csr_dns_map_kernel {
template <typename DType, typename CType, typename RType>
MSHADOW_XINLINE static void Map(index_t row,
const DType* csr_data,
const CType* csr_indices,
const RType* csr_indptr,
DType* out,
const nnvm::dim_t num_rows,
const nnvm::dim_t num_cols) {
if (row < num_rows) {
const nnvm::dim_t curr_row_i = csr_indptr[row];
const nnvm::dim_t next_row_i = csr_indptr[row + 1];
for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) {
const nnvm::dim_t target = row * num_cols + csr_indices[iter];
KERNEL_ASSIGN(
out[target],
req,
reverse ? OP::Map(out[target], csr_data[iter]) : OP::Map(csr_data[iter], out[target]));
}
}
}
};
} // namespace mxnet_op
namespace broadcast {
using namespace mshadow;
const int MAX_DIM = 5;
template <int ndim>
MSHADOW_XINLINE void unravel_dot(const index_t idx,
const Shape<ndim>& shape,
const Shape<ndim>& stridej,
const Shape<ndim>& stridek,
index_t* j,
index_t* k) {
*j = 0;
*k = 0;
#pragma unroll
for (index_t i = ndim - 1, idx_t = idx; i >= 0; --i) {
const auto tmp = idx_t / shape[i];
const auto coord = idx_t - tmp * shape[i];
*j += coord * stridej[i];
*k += coord * stridek[i];
idx_t = tmp;
}
}
template <int ndim>
MSHADOW_XINLINE int diff(const Shape<ndim>& small,
const Shape<ndim>& big,
Shape<ndim>* dims,
Shape<ndim>* stride) {
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
index_t s = 1;
#pragma unroll
for (int i = ndim - 1, j = mdim; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
template <typename DType>
MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) {
if (addto) {
*dst += src;
} else {
*dst = src;
}
}
template <int ndim, typename DType, typename OP>
MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx,
const bool addto,
const DType* __restrict lhs,
const DType* __restrict rhs,
DType* out,
const Shape<ndim>& lshape,
const Shape<ndim>& rshape,
const Shape<ndim>& oshape) {
const Shape<ndim> coord = mxnet_op::unravel(idx, oshape);
const index_t j = mxnet_op::ravel(coord, lshape);
const index_t k = mxnet_op::ravel(coord, rshape);
assign(&out[idx], addto, OP::Map(lhs[j], rhs[k]));
}
template <typename Reducer,
int ndim,
typename AType,
typename DType,
typename OType,
typename OP,
typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>>
MSHADOW_XINLINE std::pair<AType, AType> seq_reduce_assign_block(size_t start,
size_t len,
size_t j,
const DType* __restrict big,
const Shape<ndim>& rshape,
const Shape<ndim>& rstride) {
Shape<ndim> coord;
AType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = start; k < start + len; ++k) {
coord = mxnet_op::unravel(k, rshape);
AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]);
if (IndexOP::do_op)
IndexOP::Op(&temp, k);
Reducer::Reduce(val, temp, residual);
}
return std::make_pair(val, residual);
}
template <typename Reducer,
int ndim,
typename AType,
typename DType,
typename OType,
typename OP,
typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx,
const size_t M,
const bool addto,
const DType* __restrict big,
OType* small,
const Shape<ndim>& bshape,
const Shape<ndim>& sshape,
const Shape<ndim>& rshape,
const Shape<ndim>& rstride,
const bool use_omp = false) {
Shape<ndim> coord = mxnet_op::unravel(idx, sshape);
index_t j = mxnet_op::ravel(coord, bshape);
AType val, residual;
Reducer::SetInitValue(val, residual);
if (!use_omp) {
for (size_t k = 0; k < M; ++k) {
coord = mxnet_op::unravel(k, rshape);
AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]);
// argmin/max, set IndexedNum.idx
if (IndexOP::do_op)
IndexOP::Op(&temp, k);
Reducer::Reduce(val, temp, residual);
}
} else {
const int thread_count = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
auto pairs = std::make_unique<std::pair<AType, AType>[]>(thread_count);
#pragma omp parallel for num_threads(thread_count)
for (int i = 0; i < thread_count; ++i) {
pairs[i] = seq_reduce_assign_block<Reducer, ndim, AType, DType, OType, OP, IndexOP>(
i * (M / thread_count),
i < (thread_count - 1) ? (M / thread_count) : (M / thread_count) + M % thread_count,
j,
big,
rshape,
rstride);
}
for (int i = 0; i < thread_count; ++i) {
Reducer::Merge(val, residual, pairs[i].first, pairs[i].second);
}
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, OType(val));
}
namespace {
// Returns the stride with which the fastest dimension is moving.
// Used to detect memory access scatter.
inline int fastest_stride(const TShape& small, const TShape& big, const TShape& big_stride) {
const int ndim = small.ndim();
for (int i = ndim - 1; i >= 0; --i) {
if (big[i] != 1) {
return (small[i] == big[i]) ? 1 : big_stride[i];
}
}
return 1;
}
} // namespace
template <int ndim, typename DType, typename OP>
void BinaryBroadcastComputeImpl(Stream<cpu>* s,
const OpReqType req,
const TBlob& lhs,
const TBlob& rhs,
const TBlob& out) {
mshadow::Shape<ndim> oshape = out.shape_.get<ndim>();
mshadow::Shape<ndim> lstride = mxnet_op::calc_stride(lhs.shape_.get<ndim>());
mshadow::Shape<ndim> rstride = mxnet_op::calc_stride(rhs.shape_.get<ndim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<ndim, OP>, cpu>::template LaunchEx(
s,
out.shape_.Size(),
req,
lstride,
rstride,
oshape,
lhs.dptr<DType>(),
rhs.dptr<DType>(),
out.dptr<DType>());
}
template <typename Reducer,
int ndim,
typename AType,
typename DType,
typename OType,
typename OP,
typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>>
void seq_reduce_compute(const size_t N,
const size_t M,
const bool addto,
const DType* big,
OType* small,
const Shape<ndim> bshape,
const Shape<ndim> sshape,
const Shape<ndim> rshape,
const Shape<ndim> rstride) {
const int thread_count = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (N >= thread_count) {
#pragma omp parallel for num_threads(thread_count)
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>(
idx, M, addto, big, small, bshape, sshape, rshape, rstride, false);
}
} else {
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>(
idx, M, addto, big, small, bshape, sshape, rshape, rstride, true);
}
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute_extra_mem(const size_t N,
const size_t M,
const bool addto,
const DType* big,
DType* small,
const Shape<ndim> bshape,
const Shape<ndim> sshape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
const index_t* ws_dptr) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
Shape<ndim> coord = mxnet_op::unravel(idx, sshape);
index_t j = mxnet_op::ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual);
}
assign(&small[idx], addto, val);
}
}
template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false>
void Reduce(Stream<cpu>* s,
const TBlob& small,
const OpReqType req,
const Tensor<cpu, 1, char>& workspace,
const TBlob& big) {
if (req == kNullOp)
return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
if (!safe_acc) {
seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>(N,
M,
req == kAddTo,
big.dptr<DType>(),
small.dptr<DType>(),
big.shape_.get<ndim>(),
small.shape_.get<ndim>(),
rshape,
rstride);
} else {
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(N,
M,
req == kAddTo,
big.dptr<DataType>(),
small.dptr<OutType>(),
big.shape_.get<ndim>(),
small.shape_.get<ndim>(),
rshape,
rstride);
});
});
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void ReduceBool(Stream<cpu>* s,
const TBlob& small,
const OpReqType req,
const Tensor<cpu, 1, char>& workspace,
const TBlob& big) {
if (req == kNullOp)
return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
seq_reduce_compute<Reducer, ndim, bool, DType, bool, OP>(N,
M,
req == kAddTo,
big.dptr<DType>(),
small.dptr<bool>(),
big.shape_.get<ndim>(),
small.shape_.get<ndim>(),
rshape,
rstride);
}
template <typename Reducer, int ndim, typename DType, typename OP>
void ReduceWithExtraMem(Stream<cpu>* s,
const TBlob& small,
const OpReqType req,
const Tensor<cpu, 1, char>& workspace,
const TBlob& big) {
using namespace mxnet_op;
if (req == kNullOp)
return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_);
size_t N = small.shape_.Size(), M = rshape.Size();
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t k = 0; k < static_cast<index_t>(M); k++) {
Shape<ndim> coord = mxnet_op::unravel(k, rshape);
ws_dptr[k] = mxnet_op::dot(coord, rstride);
}
seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(N,
M,
req == kAddTo,
big.dptr<DType>(),
small.dptr<DType>(),
big.shape_.get<ndim>(),
small.shape_.get<ndim>(),
rshape,
rstride,
ws_dptr);
}
inline size_t ReduceWorkspaceSize(Stream<cpu>* s,
const mxnet::TShape& small,
const OpReqType req,
const mxnet::TShape& big) {
return 0;
}
inline size_t ReduceWorkspaceSize(Stream<cpu>* s,
const mxnet::TShape& small,
const OpReqType req,
const mxnet::TShape& big,
const mxnet::TShape& lhs,
const mxnet::TShape& rhs) {
return 0;
}
#if MXNET_USE_CUDA
namespace {
constexpr int warpSize = 32;
constexpr int unroll_reduce = 2;
// Returns a/b integer division rounded up
template <typename Type>
Type ceil_idiv(const Type a, const Type b) {
return (a + b - 1) / b;
}
uint64_t calc_num_load(const int X, const int Y, const int* strides) {
// Number of full warps
uint64_t num_full_warp = X / warpSize;
// Length of the partial warp i.e. number of threads that are performing loads
uint64_t len_part_warp = X % warpSize;
uint64_t num_load_full = (std::min(warpSize, strides[0]) + std::min(warpSize, strides[1]) +
std::min(warpSize, strides[2])) *
num_full_warp;
uint64_t num_load_part =
(std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[0], warpSize)) +
std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[1], warpSize)) +
std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[2], warpSize))) *
(len_part_warp != 0);
uint64_t num_load = (num_load_full + num_load_part) * (uint64_t)Y;
return num_load;
}
inline int diff(const TShape& small, const TShape& big, TShape* dims, TShape* stride) {
int ndim = small.ndim();
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
index_t s = 1;
#pragma unroll
for (int i = ndim - 1, j = mdim; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
constexpr int nthread_reduce = 512;
constexpr index_t kBaseGridNum = 1024;
} // namespace
// Configuration for ReduceImpl()
struct ReduceImplConfig {
index_t N;
index_t M;
index_t Mnext;
struct {
dim3 blockDim;
dim3 gridDim;
int shMemSize;
bool do_transpose;
} kernel_1;
struct {
int blockSize;
int gridSize;
} kernel_2;
size_t workspace_size;
TShape rshape, rstride;
TShape lhs_shape, lhs_stride;
TShape rhs_shape, rhs_stride;
inline ReduceImplConfig(const ::mxnet::TShape& small,
const ::mxnet::TShape& big,
const ::mxnet::TShape* lhs,
const ::mxnet::TShape* rhs)
: rshape(small.ndim(), 1),
rstride(small.ndim(), 1),
lhs_shape(small.ndim(), 1),
lhs_stride(small.ndim(), 1),
rhs_shape(small.ndim(), 1),
rhs_stride(small.ndim(), 1) {
// The largest reduction type currently is (index_t, double) struct
// aligned to 16B
constexpr size_t max_type_size = 2 * sizeof(double);
constexpr int maxLoopPerTB = 64;
int ndim = small.ndim();
diff(small, big, &rshape, &rstride);
N = small.Size();
M = rshape[0];
for (int i = 1; i < ndim; ++i) {
M *= rshape[i];
}
bool multiOp = false;
if (lhs != nullptr) {
CHECK_NOTNULL(rhs);
diff(small, *lhs, &lhs_shape, &lhs_stride);
diff(small, *rhs, &rhs_shape, &rhs_stride);
multiOp = true;
}
workspace_size = 0;
kernel_1.shMemSize = 0;
kernel_1.do_transpose = false;
if (M == 1) {
kernel_1.blockDim.x = nthread_reduce;
kernel_1.gridDim.x = std::min(
kBaseGridNum, static_cast<index_t>((N + kernel_1.blockDim.x - 1) / kernel_1.blockDim.x));
} else {
int reduce_strides[3];
reduce_strides[0] = fastest_stride(small, big, big);
reduce_strides[1] = (multiOp) ? fastest_stride(small, *lhs, *lhs) : 1;
reduce_strides[2] = (multiOp) ? fastest_stride(small, *rhs, *rhs) : 1;
int reduce_strides_transp[3];
reduce_strides_transp[0] = fastest_stride(small, rshape, rstride);
reduce_strides_transp[1] = (multiOp) ? fastest_stride(small, lhs_shape, lhs_stride) : 1;
reduce_strides_transp[2] = (multiOp) ? fastest_stride(small, rhs_shape, rhs_stride) : 1;
uint64_t num_load = calc_num_load(N, M, reduce_strides);
uint64_t num_load_transp = calc_num_load(M, N, reduce_strides_transp);
Mnext = 1;
kernel_1.do_transpose = (num_load > num_load_transp);
kernel_1.blockDim.x = 0;
kernel_1.blockDim.y = 0;
if (kernel_1.do_transpose) {
// Fastest thread ID goes through M
// Loop over N has step size kernel_1.blockDim.y
if (N < 8) {
kernel_1.blockDim.y = 1;
} else if (N < 256) {
kernel_1.blockDim.y = 4;
} else {
if (M < 8) {
kernel_1.blockDim.x = 1;
} else if (M < 256) {
kernel_1.blockDim.x = 4;
} else {
kernel_1.blockDim.x = warpSize;
}
}
} else {
// Fastest thread ID goes through N
// Loop over M has step size kernel_1.blockDim.y
if (M < 8) {
kernel_1.blockDim.y = 1;
} else if (M < 256) {
kernel_1.blockDim.y = 4;
} else {
if (N < 8) {
kernel_1.blockDim.x = 1;
} else if (N < 256) {
kernel_1.blockDim.x = 4;
} else {
kernel_1.blockDim.x = warpSize;
}
}
}
if (kernel_1.blockDim.x == 0 && kernel_1.blockDim.y == 0) {
LOG(FATAL) << "Unable to set blockDim";
} else if (kernel_1.blockDim.x == 0) {
kernel_1.blockDim.x = nthread_reduce / kernel_1.blockDim.y;
} else if (kernel_1.blockDim.y == 0) {
kernel_1.blockDim.y = nthread_reduce / kernel_1.blockDim.x;
}
if (kernel_1.do_transpose) {
// Fastest thread ID goes through M
kernel_1.gridDim.x =
std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.y));
kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext);
int by = kernel_1.blockDim.y;
if (kernel_1.blockDim.y % warpSize == 0) {
// Fix shared memory bank conflict
by++;
}
kernel_1.shMemSize =
(kernel_1.blockDim.x > 1) ? kernel_1.blockDim.x * by * max_type_size * 2 : 0;
// Maximum number of times we want TB to loop in M
// Max size of M-block each TB can handle
int maxMblock = kernel_1.blockDim.x * maxLoopPerTB;
Mnext = (M + maxMblock - 1) / maxMblock;
} else {
// Fastest thread ID goes through N
kernel_1.gridDim.x =
std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.x));
kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext);
kernel_1.shMemSize = (kernel_1.blockDim.y > 1)
? kernel_1.blockDim.x * kernel_1.blockDim.y * max_type_size * 2
: 0;
// Maximum number of times we want TB to loop in M
// Max size of M-block each TB can handle
int maxMblock = kernel_1.blockDim.y * maxLoopPerTB;
Mnext = (M + maxMblock - 1) / maxMblock;
}
if (Mnext > 1) {
// small_dptr[] is N*Mnext*type_size bytes
workspace_size += N * Mnext * max_type_size;
// Set gridDim.y to Mnext
kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext);
}
if (Mnext > 1) {
kernel_2.blockSize = nthread_reduce;
kernel_2.gridSize = std::min(
kBaseGridNum, static_cast<index_t>((N + kernel_2.blockSize - 1) / kernel_2.blockSize));
}
}
}
};
inline size_t ReduceWorkspaceSize(Stream<gpu>* s,
const ::mxnet::TShape& small,
const OpReqType req,
const ::mxnet::TShape& big) {
if (req == kNullOp)
return 0;
ReduceImplConfig config(small, big, nullptr, nullptr);
return config.workspace_size;
}
inline size_t ReduceWorkspaceSize(Stream<gpu>* s,
const ::mxnet::TShape& small,
const OpReqType req,
const ::mxnet::TShape& big,
const ::mxnet::TShape& lhs,
const ::mxnet::TShape& rhs) {
if (req == kNullOp)
return 0;
ReduceImplConfig config(small, big, &lhs, &rhs);
return config.workspace_size;
}
#endif // MXNET_USE_CUDA
template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx,
const size_t M,
const bool addto,
const DType* __restrict big,
const DType* __restrict lhs,
const DType* __restrict rhs,
DType* small,
const Shape<ndim>& big_shape,
const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape,
const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape,
const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride,
const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride) {
Shape<ndim> coord = mxnet_op::unravel(idx, small_shape);
const index_t idx_big0 = mxnet_op::ravel(coord, big_shape);
const index_t idx_lhs0 = mxnet_op::ravel(coord, lhs_shape0);
const index_t idx_rhs0 = mxnet_op::ravel(coord, rhs_shape0);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Shape<ndim> coord_big = mxnet_op::unravel(k, rshape);
index_t idx_big = idx_big0 + mxnet_op::dot(coord_big, rstride);
Shape<ndim> coord_lhs = mxnet_op::unravel(k, lhs_shape);
index_t idx_lhs = idx_lhs0 + mxnet_op::dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = mxnet_op::unravel(k, rhs_shape);
index_t idx_rhs = idx_rhs0 + mxnet_op::dot(coord_rhs, rhs_stride);
Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute(const size_t N,
const size_t M,
const bool addto,
const DType* big,
const DType* lhs,
const DType* rhs,
DType* small,
const Shape<ndim> big_shape,
const Shape<ndim> small_shape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
const Shape<ndim> lhs_shape,
const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape,
const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx,
M,
addto,
big,
lhs,
rhs,
small,
big_shape,
lhs_shape0,
rhs_shape0,
small_shape,
rshape,
lhs_shape,
rhs_shape,
rstride,
lhs_stride,
rhs_stride);
}
}
template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void Reduce(Stream<cpu>* s,
const TBlob& small,
const OpReqType req,
const Tensor<cpu, 1, char>& workspace,
const TBlob& big,
const TBlob& lhs,
const TBlob& rhs) {
if (req == kNullOp)
return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size();
size_t M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(N,
M,
req == kAddTo,
big.dptr<DType>(),
lhs.dptr<DType>(),
rhs.dptr<DType>(),
small.dptr<DType>(),
big.shape_.get<ndim>(),
small.shape_.get<ndim>(),
rshape,
rstride,
lhs_shape,
lhs_stride,
rhs_shape,
rhs_stride,
lhs.shape_.get<ndim>(),
rhs.shape_.get<ndim>());
}
#if MXNET_USE_CUDA
void RTCReduce(const OpContext& ctx,
const TBlob& small,
const OpReqType req,
const Tensor<gpu, 1, char>& workspace,
const TBlob& big,
const std::string& reducer,
int ndim,
const std::string& OP,
const bool use_index = false);
void RTCReduce(const OpContext& ctx,
const TBlob& small,
const OpReqType req,
const Tensor<gpu, 1, char>& workspace,
const TBlob& big,
const TBlob& lhs,
const TBlob& rhs,
const std::string& reducer,
int ndim,
const std::string& OP1,
const std::string& OP2);
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
|
BenchUtils.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <chrono>
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "./AlignedVec.h"
namespace fbgemm {
template <typename T>
void randFill(aligned_vector<T>& vec, T low, T high);
aligned_vector<float> getRandomSparseVector(
unsigned size,
float fractionNonZeros = 1.0);
void llc_flush(std::vector<char>& llc);
int fbgemm_get_num_threads();
int fbgemm_get_thread_num();
/**
* @param llc if not nullptr, flush llc
*/
template <class Fn>
double measureWithWarmup(
Fn&& fn,
int warmupIterations,
int measuredIterations,
std::vector<char>* llc = nullptr,
bool useOpenMP = false) {
for (int i = 0; i < warmupIterations; ++i) {
if (llc) {
llc_flush(*llc);
}
fn();
}
double ttot = 0.0;
#ifdef _OPENMP
#pragma omp parallel if (useOpenMP)
#endif
for (int i = 0; i < measuredIterations; ++i) {
int thread_id = 0;
std::chrono::time_point<std::chrono::high_resolution_clock> start, end;
#ifdef _OPENMP
if (useOpenMP) {
thread_id = omp_get_thread_num();
}
#endif
if (llc && thread_id == 0) {
llc_flush(*llc);
}
#ifdef _OPENMP
if (useOpenMP) {
#pragma omp barrier
}
#endif
start = std::chrono::high_resolution_clock::now();
fn();
end = std::chrono::high_resolution_clock::now();
auto dur =
std::chrono::duration_cast<std::chrono::nanoseconds>(end - start);
if (thread_id == 0) {
// TODO: measure load imbalance
ttot += dur.count();
}
}
return ttot / 1e9 / measuredIterations;
}
} // namespace fbgemm
|
Hola_mundo_paralelo.c
|
#include <stdio.h>
int main()
{
int tid,nth,j,X;
#pragma omp parallel num_threads(4)
//#pragma omp parallel
{
int i;
printf("Hola Mundo\n");
tid=omp_get_thread_num();
nth=omp_get_num_threads();
X=omp_get_max_threads( );
printf("DISPONIBLES: %d \n",X);
for (i=0;i<10;i++)
printf("Iteracion: %d desde el hilo %d de un total de %d\n",i,tid,nth);
}
printf("Adios \n");
return 0;
}
|
deconvolution_3x3.h
|
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void deconv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.data + out.w * i;
float* outptr0 = outptr;
float* outptr1 = outptr + outw;
float* outptr2 = outptr + outw*2;
int j = 0;
#if __ARM_NEON
for (; j+3 < w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
#if 0 // bad compiler generate slow instructions :(
// 0
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1);
// ext
float32x4_t _zero_out01 = vdupq_n_f32(0.f);
_zero_out01 = vextq_f32(_zero_out01, _out01, 3);
_out00 = vaddq_f32(_out00, _zero_out01);
//
float32x2_t _out00low = vget_low_f32(_out00);
float32x2_t _out00high = vget_high_f32(_out00);
_out00high = vmla_lane_f32(_out00high, vget_low_f32(_v), vget_high_f32(_k0), 0);
_out00 = vcombine_f32(_out00low, _out00high);
vst1q_f32(outptr0 + 0, _out00);
//
float32x2_t _out02high = vld1_f32(outptr0 + 4);
float32x2_t _out01_zero = vext_f32(vget_high_f32(_out01), vget_low_f32(_zero_out01), 1);
_out02high = vadd_f32(_out02high, _out01_zero);
_out02high = vmla_lane_f32(_out02high, vget_high_f32(_v), vget_high_f32(_k0), 0);
vst1_f32(outptr0 + 4, _out02high);
// 1
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1);
// ext
float32x4_t _zero_out11 = vdupq_n_f32(0.f);
_zero_out11 = vextq_f32(_zero_out11, _out11, 3);
_out10 = vaddq_f32(_out10, _zero_out11);
//
float32x2_t _out10low = vget_low_f32(_out10);
float32x2_t _out10high = vget_high_f32(_out10);
_out10high = vmla_lane_f32(_out10high, vget_low_f32(_v), vget_high_f32(_k1), 0);
_out10 = vcombine_f32(_out10low, _out10high);
vst1q_f32(outptr1 + 0, _out10);
//
float32x2_t _out12high = vld1_f32(outptr1 + 4);
float32x2_t _out11_zero = vext_f32(vget_high_f32(_out11), vget_low_f32(_zero_out11), 1);
_out12high = vadd_f32(_out12high, _out11_zero);
_out12high = vmla_lane_f32(_out12high, vget_high_f32(_v), vget_high_f32(_k1), 0);
vst1_f32(outptr1 + 4, _out12high);
// 2
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1);
// ext
float32x4_t _zero_out21 = vdupq_n_f32(0.f);
_zero_out21 = vextq_f32(_zero_out21, _out21, 3);
_out20 = vaddq_f32(_out20, _zero_out21);
//
float32x2_t _out20low = vget_low_f32(_out20);
float32x2_t _out20high = vget_high_f32(_out20);
_out20high = vmla_lane_f32(_out20high, vget_low_f32(_v), vget_high_f32(_k2), 0);
_out20 = vcombine_f32(_out20low, _out20high);
vst1q_f32(outptr2 + 0, _out20);
//
float32x2_t _out22high = vld1_f32(outptr2 + 4);
float32x2_t _out21_zero = vext_f32(vget_high_f32(_out21), vget_low_f32(_zero_out21), 1);
_out22high = vadd_f32(_out22high, _out21_zero);
_out22high = vmla_lane_f32(_out22high, vget_high_f32(_v), vget_high_f32(_k2), 0);
vst1_f32(outptr2 + 4, _out22high);
#else
//
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
vst1q_f32(outptr0 + 0, _out00);
float32x4_t _out01 = vld1q_f32(outptr0 + 1);
_out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1);
vst1q_f32(outptr0 + 1, _out01);
float32x4_t _out02 = vld1q_f32(outptr0 + 2);
_out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0);
vst1q_f32(outptr0 + 2, _out02);
//
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
vst1q_f32(outptr1 + 0, _out10);
float32x4_t _out11 = vld1q_f32(outptr1 + 1);
_out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1);
vst1q_f32(outptr1 + 1, _out11);
float32x4_t _out12 = vld1q_f32(outptr1 + 2);
_out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0);
vst1q_f32(outptr1 + 2, _out12);
//
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
vst1q_f32(outptr2 + 0, _out20);
float32x4_t _out21 = vld1q_f32(outptr2 + 1);
_out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1);
vst1q_f32(outptr2 + 1, _out21);
float32x4_t _out22 = vld1q_f32(outptr2 + 2);
_out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0);
vst1q_f32(outptr2 + 2, _out22);
#endif
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
r0++;
outptr0++;
outptr1++;
outptr2++;
}
}
}
}
}
static void deconv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.data + outw * i*2;
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
int j = 0;
#if __ARM_NEON
for (; j+3 < w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
// out row 0
float32x4_t _out00 = vmulq_lane_f32(_v, vget_low_f32(_k0), 0); // 0,2,4,6
float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // 1,3,5,7
float32x4_t _out02 = vmulq_lane_f32(_v, vget_high_f32(_k0), 0); // 2,4,6,8
float32x4x2_t _out0 = vld2q_f32(outptr0);
_out0.val[0] = vaddq_f32(_out0.val[0], _out00); // 0,2,4,6
_out0.val[1] = vaddq_f32(_out0.val[1], _out01); // 1,3,5,7
vst2q_f32(outptr0, _out0);
_out0 = vld2q_f32(outptr0 + 2);
_out0.val[0] = vaddq_f32(_out0.val[0], _out02); // 2,4,6,8
vst2q_f32(outptr0 + 2, _out0);
// out row 1
float32x4_t _out10 = vmulq_lane_f32(_v, vget_low_f32(_k1), 0); // 0,2,4,6
float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // 1,3,5,7
float32x4_t _out12 = vmulq_lane_f32(_v, vget_high_f32(_k1), 0); // 2,4,6,8
float32x4x2_t _out1 = vld2q_f32(outptr1);
_out1.val[0] = vaddq_f32(_out1.val[0], _out10); // 0,2,4,6
_out1.val[1] = vaddq_f32(_out1.val[1], _out11); // 1,3,5,7
vst2q_f32(outptr1, _out1);
_out1 = vld2q_f32(outptr1 + 2);
_out1.val[0] = vaddq_f32(_out1.val[0], _out12); // 2,4,6,8
vst2q_f32(outptr1 + 2, _out1);
// out row 2
float32x4_t _out20 = vmulq_lane_f32(_v, vget_low_f32(_k2), 0); // 0,2,4,6
float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // 1,3,5,7
float32x4_t _out22 = vmulq_lane_f32(_v, vget_high_f32(_k2), 0); // 2,4,6,8
float32x4x2_t _out2 = vld2q_f32(outptr2);
_out2.val[0] = vaddq_f32(_out2.val[0], _out20); // 0,2,4,6
_out2.val[1] = vaddq_f32(_out2.val[1], _out21); // 1,3,5,7
vst2q_f32(outptr2, _out2);
_out2 = vld2q_f32(outptr2 + 2);
_out2.val[0] = vaddq_f32(_out2.val[0], _out22); // 2,4,6,8
vst2q_f32(outptr2 + 2, _out2);
r0 += 4;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
r0++;
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
}
}
}
}
}
|
mandel-omp-taskloop-row.c
|
/*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define G 4 /* grainsize */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel
#pragma omp single
#pragma omp taskloop grainsize(G)
for (row = 0; row < height; ++row) {
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
End of preview. Expand
in Data Studio
- Downloads last month
- 5