source
stringlengths
3
92
c
stringlengths
26
2.25M
gt_gtf.c
#include "gt_gtf.h" GT_INLINE gt_gtf_entry* gt_gtf_entry_new(const uint64_t start, const uint64_t end, const gt_strand strand, gt_string* const type){ gt_gtf_entry* entry = malloc(sizeof(gt_gtf_entry)); entry->uid = 0; entry->start = start; entry->end = end; entry->num_children = 0; entry->type = type; entry->strand = strand; entry->gene_type = NULL; entry->gene_id = NULL; entry->transcript_id = NULL; entry->length = 0; return entry; } GT_INLINE void gt_gtf_entry_delete(gt_gtf_entry* const entry){ free(entry); } GT_INLINE gt_gtf_ref* gt_gtf_ref_new(void){ gt_gtf_ref* ref = malloc(sizeof(gt_gtf_ref)); ref->entries = gt_vector_new(GTF_DEFAULT_ENTRIES, sizeof(gt_gtf_entry*)); return ref; } GT_INLINE void gt_gtf_ref_delete(gt_gtf_ref* const ref){ register uint64_t s = gt_vector_get_used(ref->entries); register uint64_t i = 0; for(i=0; i<s; i++){ gt_gtf_entry_delete( (gt_vector_get_elm(ref->entries, i, gt_gtf_entry))); } gt_vector_delete(ref->entries); free(ref); } GT_INLINE gt_gtf* gt_gtf_new(void){ gt_gtf* gtf = malloc(sizeof(gt_gtf)); gtf->refs = gt_shash_new(); gtf->types = gt_shash_new(); gtf->gene_ids = gt_shash_new(); gtf->transcript_ids = gt_shash_new(); gtf->gene_types = gt_shash_new(); gtf->genes = gt_shash_new(); gtf->transcripts = gt_shash_new(); return gtf; } GT_INLINE void gt_gtf_delete(gt_gtf* const gtf){ gt_shash_delete(gtf->refs, true); gt_shash_delete(gtf->types, true); gt_shash_delete(gtf->gene_ids, true); gt_shash_delete(gtf->transcript_ids, true); gt_shash_delete(gtf->gene_types, true); gt_shash_delete(gtf->genes, false); gt_shash_delete(gtf->transcripts, false); free(gtf); } GT_INLINE gt_gtf_hits* gt_gtf_hits_new(void){ gt_gtf_hits* hits = malloc(sizeof(gt_gtf_hits)); hits->exon_hits = gt_vector_new(16, sizeof(gt_gtf_hit*)); hits->num_genes = 0; hits->num_protein_coding =0; hits->num_paired_genes =0; return hits; } GT_INLINE void gt_gtf_hits_delete(gt_gtf_hits* const hits){ gt_gtf_hits_clear(hits); gt_vector_delete(hits->exon_hits); free(hits); } GT_INLINE void gt_gtf_hits_clear(gt_gtf_hits* const hits){ uint64_t i = 0; for(i=0; i<gt_vector_get_used(hits->exon_hits); i++){ gt_gtf_hit* hit = *gt_vector_get_elm(hits->exon_hits, i, gt_gtf_hit*); gt_gtf_hit_delete(hit); } hits->num_genes = 0; hits->num_protein_coding =0; hits->num_paired_genes =0; hits->junction_hit_ration = 0.0; gt_vector_clear(hits->exon_hits); } GT_INLINE gt_gtf_count_parms* gt_gtf_count_params_new(bool coverage){ gt_gtf_count_parms* p = gt_malloc_(1, sizeof(gt_gtf_count_parms), false, false); p->num_maps = 0; p->exon_overlap = 0; p->unweighted_counts = true; p->single_pair_counts = false; p->num_junctions = 0; p->count_bases = false; p->num_annotated_junctions = 0; if(coverage){ p->single_transcript_coverage = GT_GTF_INIT_COVERAGE(); p->gene_body_coverage = GT_GTF_INIT_COVERAGE(); }else{ p->single_transcript_coverage = NULL; p->gene_body_coverage = NULL; } return p; } GT_INLINE void gt_gtf_count_params_delete(gt_gtf_count_parms* params){ if(params->single_transcript_coverage != NULL){ free(params->single_transcript_coverage); } if(params->gene_body_coverage != NULL){ free(params->gene_body_coverage); } free(params); } GT_INLINE gt_string* gt_gtf_get_type(const gt_gtf* const gtf, char* const type){ if(!gt_gtf_contains_type(gtf, type)){ gt_string* s = gt_string_set_new(type); gt_shash_insert_string(gtf->types, type, s); } return gt_shash_get(gtf->types, type, gt_string); } GT_INLINE bool gt_gtf_contains_type(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->types, name); } GT_INLINE gt_gtf_ref* gt_gtf_get_ref(const gt_gtf* const gtf, char* const name){ if(!gt_gtf_contains_ref(gtf, name)){ gt_gtf_ref* rr = gt_gtf_ref_new(); gt_shash_insert(gtf->refs, name, rr, gt_gtf_ref*); } return gt_shash_get(gtf->refs, name, gt_gtf_ref); } GT_INLINE bool gt_gtf_contains_ref(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->refs, name); } GT_INLINE gt_string* gt_gtf_get_gene_id(const gt_gtf* const gtf, char* const name){ if(!gt_gtf_contains_gene_id(gtf, name)){ gt_string* const gene_id = gt_string_set_new(name); gt_shash_insert(gtf->gene_ids, name, gene_id, gt_string*); } return gt_shash_get(gtf->gene_ids, name, gt_string); } GT_INLINE bool gt_gtf_contains_gene_id(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->gene_ids, name); } GT_INLINE gt_string* gt_gtf_get_transcript_id(const gt_gtf* const gtf, char* const name){ if(!gt_gtf_contains_transcript_id(gtf, name)){ gt_string* const gene_id = gt_string_set_new(name); gt_shash_insert(gtf->transcript_ids, name, gene_id, gt_string*); } return gt_shash_get(gtf->transcript_ids, name, gt_string); } GT_INLINE bool gt_gtf_contains_transcript_id(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->transcript_ids, name); } GT_INLINE gt_string* gt_gtf_get_gene_type(const gt_gtf* const gtf, char* const name){ if(!gt_gtf_contains_gene_type(gtf, name)){ gt_string* const gene_type = gt_string_set_new(name); gt_shash_insert(gtf->gene_types, name, gene_type, gt_string*); } return gt_shash_get(gtf->gene_types, name, gt_string); } GT_INLINE bool gt_gtf_contains_gene_type(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->gene_types, name); } GT_INLINE gt_gtf_entry* gt_gtf_get_gene_by_id(const gt_gtf* const gtf, char* const key){ if(gt_shash_is_contained(gtf->genes, key)){ return gt_shash_get_element(gtf->genes, key); } return NULL; } GT_INLINE gt_gtf_entry* gt_gtf_get_transcript_by_id(const gt_gtf* const gtf, char* const key){ if(gt_shash_is_contained(gtf->transcripts, key)){ return gt_shash_get_element(gtf->transcripts, key); } return NULL; } /** * Comparator that compares two gtf_entries by starting position */ GT_INLINE int gt_gtf_sort_by_start_cmp_(const gt_gtf_entry** a, const gt_gtf_entry** b){ uint64_t p1 = (*a)->start; uint64_t p2 = (*b)->start; return p1 < p2 ? -1 : (p1>p2 ? 1 : gt_string_cmp( (*a)->type, (*b)->type )); } /** * Comparator that compares two gtf_entries by ending position */ GT_INLINE int gt_gtf_sort_by_end_cmp_(const gt_gtf_entry** a, const gt_gtf_entry** b){ uint64_t p1 = (*a)->end; uint64_t p2 = (*b)->end; return p1 < p2 ? -1 : (p1>p2 ? 1 : gt_string_cmp( (*a)->type, (*b)->type )); } /** * Sort vector of gt_gtf_entries by starting position */ GT_INLINE void gt_gtf_sort_by_start(gt_vector* entries) { qsort(gt_vector_get_mem(entries, gt_gtf_entry*), gt_vector_get_used(entries), sizeof(gt_gtf_entry**), (int (*)(const void *,const void *))gt_gtf_sort_by_start_cmp_); } /** * Sort vector of gt_gtf_entries by ending position */ GT_INLINE void gt_gtf_sort_by_end( gt_vector* entries) { qsort(gt_vector_get_mem(entries, gt_gtf_entry*), gt_vector_get_used(entries), sizeof(gt_gtf_entry**), (int (*)(const void *,const void *))gt_gtf_sort_by_end_cmp_); } GT_INLINE gt_gtf_node* gt_gtf_create_node(gt_vector* entries){ const uint64_t len = gt_vector_get_used(entries); if(len == 0){ return NULL; } gt_gtf_node* const node = malloc(sizeof(gt_gtf_node)); const gt_gtf_entry* mid = *gt_vector_get_elm(entries, len/2, gt_gtf_entry*); node->midpoint = mid->start + ((mid->end - mid->start)/2); node->entries_by_end = gt_vector_new(16, sizeof(gt_gtf_entry*)); node->entries_by_start = gt_vector_new(16, sizeof(gt_gtf_entry*)); gt_vector* to_left = gt_vector_new(16, sizeof(gt_gtf_entry*)); gt_vector* to_right = gt_vector_new(16, sizeof(gt_gtf_entry*)); GT_VECTOR_ITERATE(entries, element, counter, gt_gtf_entry*){ if((*element)->end < node->midpoint){ gt_vector_insert(to_left, (*element), gt_gtf_entry*); }else if((*element)->start > node->midpoint){ gt_vector_insert(to_right, (*element), gt_gtf_entry*); }else{ gt_vector_insert(node->entries_by_end, (*element), gt_gtf_entry*); gt_vector_insert(node->entries_by_start, (*element), gt_gtf_entry*); } } // sort the start and end lists gt_gtf_sort_by_start(node->entries_by_start); gt_gtf_sort_by_end(node->entries_by_end); // delete incoming entry list gt_vector_delete(entries); if(gt_vector_get_used(to_left) > 0){ // create left node node->left = gt_gtf_create_node(to_left); }else{ node->left = NULL; gt_vector_delete(to_left); } if(gt_vector_get_used(to_right) > 0){ // create right node node->right = gt_gtf_create_node(to_right); }else{ node->right = NULL; gt_vector_delete(to_right); } return node; } /* * Read next tab separated field from line or return NULL if no such field exists */ GT_INLINE char* gt_gtf_read_gtf_field_(char** line){ char* current = *line; GT_READ_UNTIL(line, **line=='\t'); if(GT_IS_EOL(line)) return NULL; **line = EOS; GT_NEXT_CHAR(line); return current; } GT_INLINE gt_status gt_gtf_read_attributes_(char** line, gt_shash* attrs){ gt_shash_clear(attrs, false); while(!GT_IS_EOL(line)){ while(**line == ' ') GT_NEXT_CHAR(line); if(**line == EOL || **line == EOS) return GT_STATUS_OK; // get the attribute name char* name = *line; GT_READ_UNTIL(line, **line==' ') if(GT_IS_EOL(line)){ gt_error_msg("Error parsing GTF attributes. Expected space but found end of line"); return GT_GTF_INVALID_LINE; } **line = EOS; GT_NEXT_CHAR(line); // skip to attribute start while(**line == ' ') GT_NEXT_CHAR(line); // remove starting quote if(**line == '"') GT_NEXT_CHAR(line); char* attr = *line; // skip until the closing ; while(**line != ';') GT_NEXT_CHAR(line); if(GT_IS_EOL(line)) return GT_GTF_INVALID_LINE; // remove trailing quotes and add EOS if(*(*line-1) == '"') *(*line-1) = EOS; else **line = EOS; GT_NEXT_CHAR(line); // add attribute if(gt_shash_is_contained(attrs, name)){ gt_shash_remove(attrs, name, false); } gt_shash_insert(attrs, name, attr, char*); if(gt_shash_is_contained(attrs, "gene_id") && gt_shash_is_contained(attrs, "gene_type") && gt_shash_is_contained(attrs, "transcript_id")){ return GT_STATUS_OK; } } return GT_STATUS_OK; } /** * Parse a single GTF line */ GT_INLINE gt_status gt_gtf_read_line(char* line, gt_gtf* const gtf, uint64_t counter, gt_shash* attrs){ // skip comments if(line[0] == '#'){ return GT_STATUS_OK; } char* ref = NULL; char* type = NULL; uint64_t start = 0; uint64_t end = 0; gt_strand strand = UNKNOWN; char* current = line; ref = gt_gtf_read_gtf_field_(&line); if(ref == NULL){ gt_error_msg("Unable to parse name: '%s'", line); return GT_GTF_INVALID_LINE; } // SKIP source current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse source: '%s'", line); return GT_GTF_INVALID_LINE; } // type type = gt_gtf_read_gtf_field_(&line); if(type == NULL){ gt_error_msg("Unable to parse type: '%s'", line); return GT_GTF_INVALID_LINE; } // start current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse start: '%s'", line); return GT_GTF_INVALID_LINE; } start = atol(current); // end current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse end: '%s'", line); return GT_GTF_INVALID_LINE; } end = atol(current); // SKIP score current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse score: '%s'", line); return GT_GTF_INVALID_LINE; } // strand current = gt_gtf_read_gtf_field_(&line); if(current == NULL) return GT_GTF_INVALID_LINE; if(current == NULL){ gt_error_msg("Unable to parse strand: '%s'", line); return GT_GTF_INVALID_LINE; } if(*current == '+'){ strand = FORWARD; }else if(*current == '-'){ strand = REVERSE; } // SIKP last thing where i can not remember what it was current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse last: '%s'", line); return GT_GTF_INVALID_LINE; } // WARNING >>> the attribute parser stops after // the currently used feels are found. If you want // to add a field, also update the attribute parser if(gt_gtf_read_attributes_(&line, attrs) != GT_STATUS_OK){ gt_error_msg("Unable to parse attributes: '%s'", line); return GT_GTF_INVALID_ATTRIBUTES; } // get the type or create it gt_string* tp = gt_gtf_get_type(gtf, type); gt_gtf_entry* e = gt_gtf_entry_new(start, end, strand, tp); e->uid = counter; if(gt_shash_is_contained(attrs, "gene_id")){ e->gene_id = gt_gtf_get_gene_id(gtf, gt_shash_get(attrs, "gene_id", char)); } if(gt_shash_is_contained(attrs, "gene_type")){ e->gene_type = gt_gtf_get_gene_type(gtf, gt_shash_get(attrs, "gene_type", char)); } if(gt_shash_is_contained(attrs, "transcript_id")){ e->transcript_id = gt_gtf_get_transcript_id(gtf, gt_shash_get(attrs, "transcript_id", char)); } // get the ref or create it gt_gtf_ref* gtref = gt_gtf_get_ref(gtf, ref); gt_vector_insert(gtref->entries, e, gt_gtf_entry*); if(strcmp(e->type->buffer, "gene") == 0){ gt_shash_insert(gtf->genes, e->gene_id->buffer, e, gt_gtf_entry*); } if(strcmp(e->type->buffer, "transcript") == 0){ gt_shash_insert(gtf->transcripts, e->transcript_id->buffer, e, gt_gtf_entry*); } return GT_STATUS_OK; } bool gt_gtf_hits_junction(gt_map* map, gt_gtf_entry* e){ uint64_t rs = gt_map_get_begin_mapping_position(map); uint64_t re = gt_map_get_end_mapping_position(map); bool hit = (rs==e->start) || (rs==e->end) || (re == e->end) || (re == e->start); return hit; } GT_INLINE uint64_t gt_gtf_get_map_begin(gt_map* const map){ return gt_map_get_begin_mapping_position(map) + gt_map_get_left_trim_length(map); } GT_INLINE uint64_t gt_gtf_get_map_end(gt_map* const map){ return gt_map_get_end_mapping_position(map); } /** * Iterate over the map blocks and count exon-exon junctions that are annotated */ GT_INLINE uint64_t gt_gtf_count_junction(const gt_gtf* const gtf, gt_map* const map){ uint64_t blocks = gt_map_get_num_blocks(map); if(blocks <= 1) return 0; // single block map uint64_t num_junctions = 0; char* seq_name = gt_map_get_seq_name(map); gt_vector* hits = gt_vector_new(16, sizeof(gt_gtf_entry*)); gt_shash* last_hits = NULL; GT_MAP_ITERATE(map, block){ uint64_t start = gt_map_get_begin_mapping_position(block); uint64_t end = gt_map_get_end_mapping_position(block); if(last_hits != NULL){ // there was a block before, check if we found an annotated junction gt_gtf_search(gtf, hits, seq_name, start, start, true); GT_VECTOR_ITERATE(hits, e, c, gt_gtf_entry*){ gt_gtf_entry* hit = *e; if(hit->transcript_id != NULL && hit->type != NULL && strcmp(hit->type->buffer, "exon") == 0){ if(gt_shash_is_contained(last_hits, hit->transcript_id->buffer)){ num_junctions++; break; } } } } if(last_hits == NULL) last_hits = gt_shash_new(); else gt_shash_clear(last_hits, true); // search for the overlaps with the end of the block gt_gtf_search(gtf, hits, seq_name, end, end, true); GT_VECTOR_ITERATE(hits, e, c, gt_gtf_entry*){ gt_gtf_entry* hit = *e; if(hit->transcript_id != NULL && hit->type != NULL && strcmp(hit->type->buffer, "exon") == 0){ gt_gtf_count_(last_hits, hit->transcript_id->buffer); } } } gt_vector_delete(hits); gt_shash_delete(last_hits, true); return num_junctions; } void gt_gtf_print_entry_(FILE* target, gt_gtf_entry* e, gt_map* map){ if(map != NULL){ gt_output_map_fprint_map(target, map, NULL); fprintf(target, " ==> "); } if(e->type != NULL){ fprintf(target, "%s : %"PRIu64" - %"PRIu64" (%c)", e->type->buffer, e->start, e->end, (e->strand==FORWARD?'+':'-') ); } if(e->gene_id != NULL){ fprintf(target, " GID:%s", e->gene_id->buffer); } if(e->transcript_id != NULL){ fprintf(target, " TID:%s", e->transcript_id->buffer); } if(e->type != NULL){ fprintf(target, " [%s]", e->type->buffer); } if(e->gene_type != NULL){ fprintf(target, " [%s]", e->gene_type->buffer); } fprintf(target, " [#transcripts: %"PRIu64"]", e->num_children); if(map != NULL && gt_gtf_hits_junction(map, e)){ fprintf(target, " [Hits JS]"); } fprintf(target, "\n"); } GT_INLINE gt_gtf_hit* gt_gtf_hit_new(void){ gt_gtf_hit* hit = malloc(sizeof(gt_gtf_hit)); hit->exon_overlap = 0.0; hit->intron_length = 0.0; hit->is_protein_coding = false; hit->junction_hits = 0.0; hit->map = NULL; hit->num_junctions = 0; hit->pairs_transcript = false; hit->pairs_splits = false; hit->pairs_gene = false; hit->num_junctions_hits =0; hit->num_template_blocks = 0; hit->transcripts = NULL; hit->genes = NULL; hit->hits_exon = false; return hit; } GT_INLINE void gt_gtf_hit_delete(gt_gtf_hit* hit){ if(hit->transcripts != NULL){ gt_shash_delete(hit->transcripts, true); } if(hit->genes != NULL){ gt_shash_delete(hit->genes, true); } free(hit); } GT_INLINE gt_status gt_gtf_reload_buffer(gt_buffered_input_file* const buffered_fasta_input) { GT_BUFFERED_INPUT_FILE_CHECK(buffered_fasta_input); // Dump buffer if BOF it attached to input, and get new out block (always FIRST) gt_buffered_input_file_dump_attached_buffers(buffered_fasta_input->attached_buffered_output_file); // Read new input block const uint64_t read_lines = gt_buffered_input_file_get_block(buffered_fasta_input, GT_NUM_LINES_50K); if (gt_expect_false(read_lines==0)) return GT_INPUT_FILE_EOF; // Assign block ID gt_buffered_input_file_set_id_attached_buffers(buffered_fasta_input->attached_buffered_output_file,buffered_fasta_input->block_id); return GT_STATUS_OK; } GT_INLINE gt_status gt_gtf_get_line(gt_buffered_input_file* const buffered_input, gt_string* const line) { GT_BUFFERED_INPUT_FILE_CHECK(buffered_input); GT_STRING_CHECK(line); gt_status error_code; // Check the end_of_block. Reload buffer if needed if (gt_buffered_input_file_eob(buffered_input)) { if ((error_code=gt_gtf_reload_buffer(buffered_input))!=GT_IMP_OK) return error_code; } // Prepare the template char* const line_start = buffered_input->cursor; gt_string_clear(line); GT_INPUT_FILE_SKIP_LINE(buffered_input); gt_string_set_nstring_static(line, line_start, (buffered_input->cursor - line_start)); return GT_IMP_OK; } GT_INLINE uint64_t gt_gtf_merge_(const gt_gtf* const target, gt_gtf* source, uint64_t counter){ // get the type or create it GT_SHASH_BEGIN_KEY_ITERATE(source->refs, key){ gt_gtf_ref* source_ref = gt_gtf_get_ref(source, key); gt_gtf_ref* target_ref = gt_gtf_get_ref(target, key); GT_VECTOR_ITERATE(source_ref->entries, value, c, gt_gtf_entry*){ gt_gtf_entry* e = *value; e->uid = counter++; if(e->gene_id != NULL){ e->gene_id = gt_gtf_get_gene_id(target, gt_string_get_string(e->gene_id)); } if(e->transcript_id != NULL){ e->transcript_id = gt_gtf_get_transcript_id(target, gt_string_get_string(e->transcript_id)); } if(e->type != NULL)e->type = gt_gtf_get_type(target, gt_string_get_string(e->type)); if(e->gene_type != NULL)e->gene_type = gt_gtf_get_gene_type(target, gt_string_get_string(e->gene_type)); gt_vector_insert(target_ref->entries, e, gt_gtf_entry*); if(strcmp(e->type->buffer, GT_GTF_TYPE_GENE) == 0 && !gt_shash_is_contained(target->genes, e->gene_id->buffer)){ gt_shash_insert(target->genes, e->gene_id->buffer, e, gt_gtf_entry*); } if(strcmp(e->type->buffer, GT_GTF_TYPE_TRANSCRIPT) == 0 && !gt_shash_is_contained(target->transcripts, e->transcript_id->buffer)){ gt_shash_insert(target->transcripts, e->transcript_id->buffer, e, gt_gtf_entry*); } } }GT_SHASH_END_ITERATE; return counter; } GT_INLINE gt_gtf* gt_gtf_read_from_stream(FILE* input, uint64_t threads){ gt_input_file* input_file = gt_input_stream_open(input); return gt_gtf_read(input_file, threads); } GT_INLINE gt_gtf* gt_gtf_read_from_file(char* input, uint64_t threads){ gt_input_file* input_file = gt_input_file_open(input, false); return gt_gtf_read(input_file, threads); } GT_INLINE gt_gtf* gt_gtf_read(gt_input_file* input_file, const uint64_t threads){ GT_NULL_CHECK(input_file); GT_ZERO_CHECK(threads); uint64_t counter = 0; uint64_t i = 0; gt_gtf* const gtf = gt_gtf_new(); gt_gtf** gtfs = gt_calloc(threads-1, gt_gtf*, true); for(i=0; i<threads-1; i++){ gtfs[i] = gt_gtf_new(); } #pragma omp parallel num_threads(threads) { uint64_t tid = omp_get_thread_num(); gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file); gt_string* buffered_line = gt_string_new(GTF_MAX_LINE_LENGTH); gt_gtf* thread_gtf; if(tid == 0){ thread_gtf = gtf; }else{ thread_gtf = gtfs[tid-1]; } gt_shash* attrs = gt_shash_new(); while(gt_gtf_get_line(buffered_input, buffered_line)){ if(gt_gtf_read_line(buffered_line->buffer, thread_gtf, buffered_input->current_line_num, attrs) != GT_STATUS_OK){ // raise error gt_fatal_error_msg("Failed to parse GTF line '%s'", buffered_line->buffer); } counter++; } gt_shash_delete(attrs, false); gt_buffered_input_file_close(buffered_input); gt_string_delete(buffered_line); } gt_input_file_close(input_file); counter = 0; // merge all the thread gtfs into a single one for(i=0; i<threads-1; i++){ counter = gt_gtf_merge_(gtf, gtfs[i], counter); gt_gtf_delete(gtfs[i]); } free(gtfs); gt_string* const exon_t = gt_string_set_new("exon"); gt_string* const transcript_t = gt_string_set_new("transcript"); gt_string* const intron_t = gt_string_set_new("intron"); // sort the refs GT_SHASH_BEGIN_ELEMENT_ITERATE(gtf->refs,shash_element,gt_gtf_ref) { // sort by start position gt_gtf_sort_by_start(shash_element->entries); uint64_t size = gt_vector_get_used(shash_element->entries); uint64_t i = 0; gt_shash* last_exons = gt_shash_new(); gt_shash* exons_counts = gt_shash_new(); for(i=0; i<size; i++){ gt_gtf_entry* entry = *gt_vector_get_elm(shash_element->entries, i, gt_gtf_entry*); if(entry->type != NULL && gt_string_equals(exon_t, entry->type)){ gt_string* transcript_id = entry->transcript_id; if(transcript_id != NULL){ // set exon id and count the exon for the transcript entry->num_children = gt_gtf_get_count_(exons_counts, transcript_id->buffer); gt_gtf_count_(exons_counts, transcript_id->buffer); if(!gt_shash_is_contained(last_exons, gt_string_get_string(transcript_id))){ gt_shash_insert(last_exons, gt_string_get_string(transcript_id), entry, gt_gtf_entry*); }else{ gt_gtf_entry* prev_exon = gt_shash_get_element(last_exons, gt_string_get_string(transcript_id)); gt_gtf_entry* intron = gt_gtf_entry_new(prev_exon->end+1, entry->start-1, prev_exon->strand, intron_t); intron->transcript_id = transcript_id; intron->gene_id = prev_exon->gene_id; intron->uid = counter++; gt_vector_insert(shash_element->entries, intron, gt_gtf_entry*); gt_shash_remove(last_exons, gt_string_get_string(transcript_id),false); gt_shash_insert(last_exons, gt_string_get_string(transcript_id), entry, gt_gtf_entry*); } // add exon counts gt_gtf_entry* transcript = gt_gtf_get_transcript_by_id(gtf, gt_string_get_string(entry->transcript_id)); if(transcript != NULL){ transcript->num_children++; entry->length = transcript->length; transcript->length += (entry->end - entry->start) + 1; } } }else if(entry->type != NULL && gt_string_equals(transcript_t, entry->type)){ // sum transcript counts for gene id if(entry->gene_id != NULL){ gt_gtf_entry* gene = gt_gtf_get_gene_by_id(gtf, gt_string_get_string(entry->gene_id)); gene->num_children++; } } } gt_shash_delete(last_exons, false); gt_shash_delete(exons_counts, true); // create a interval tree node for each ref shash_element->node = gt_gtf_create_node(shash_element->entries); } GT_SHASH_END_ITERATE return gtf; } /* * Binary search for start position */ GT_INLINE uint64_t gt_gtf_bin_search(gt_vector* const entries, const uint64_t t, const uint64_t end){ uint64_t used = gt_vector_get_used(entries); uint64_t l = 0; uint64_t h = used - 1; uint64_t m = 0; register gt_gtf_entry* e = *gt_vector_get_elm(entries, h, gt_gtf_entry*); while(l < h ){ m = (l + h) / 2; e = *gt_vector_get_elm(entries, m, gt_gtf_entry*); if(e->start < t){ l = m + 1; }else{ h = m; } } e = *gt_vector_get_elm(entries, l, gt_gtf_entry*); if (h == l){ return l; }else{ return m; } } GT_INLINE void gt_gtf_search_node_(gt_gtf_node* node, const uint64_t start, const uint64_t end, gt_vector* const target){ if(node == NULL) return; // add overlapping intervals from this node GT_VECTOR_ITERATE(node->entries_by_start, element, counter, gt_gtf_entry*){ if((*element)->start > end){ break; } gt_gtf_entry* e = *element; //if((*element)->start <= start && (*element)->end >= end){ if((start < e->end && end > e->start) || (start >= e->start && end <=e->end) || (start < e->end && end >= e->end) || (start < e->start && end > e->end)){ gt_vector_insert(target, (*element), gt_gtf_entry*); } } if(end < node->midpoint || start < node->midpoint){ // search left tree gt_gtf_search_node_(node->left, start, end, target); } if (start > node->midpoint || end > node->midpoint){ gt_gtf_search_node_(node->right, start, end, target); } } GT_INLINE uint64_t gt_gtf_search(const gt_gtf* const gtf, gt_vector* const target, char* const ref, const uint64_t start, const uint64_t end, const bool clear_target){ if(clear_target)gt_vector_clear(target); // make sure the target ref is contained if (! gt_shash_is_contained(gtf->refs, ref)){ return 0; } const gt_gtf_ref* const source_ref = gt_gtf_get_ref(gtf, ref); gt_gtf_search_node_(source_ref->node, start, end, target); return gt_vector_get_used(target); } GT_INLINE void gt_gtf_count_(gt_shash* const table, char* const element){ if(!gt_shash_is_contained(table, element)){ uint64_t* v = gt_malloc_uint64(); *v = 1; gt_shash_insert(table, element, v, uint64_t); }else{ uint64_t* v = gt_shash_get(table,element,uint64_t); ++(*v); } } GT_INLINE void gt_gtf_count_custom_(gt_shash* const table, char* const element, uint64_t c){ if(!gt_shash_is_contained(table, element)){ uint64_t* v = gt_malloc_uint64(); *v = c; gt_shash_insert(table, element, v, uint64_t); }else{ uint64_t* v = gt_shash_get(table,element,uint64_t); *v += c; } } GT_INLINE void gt_gtf_count_sum_(gt_shash* const table, char* const element, uint64_t value){ if(!gt_shash_is_contained(table, element)){ uint64_t* v = gt_malloc_uint64(); *v = value; gt_shash_insert(table, element, v, uint64_t); }else{ uint64_t* v = gt_shash_get(table,element,uint64_t); *v += value; } } GT_INLINE void gt_gtf_count_weight_(gt_shash* const table, char* const element, double weight){ if(!gt_shash_is_contained(table, element)){ double* v = malloc(sizeof(double*)); *v = weight; gt_shash_insert(table, element, v, double); }else{ double* v = gt_shash_get(table,element,double); *v += weight; } } GT_INLINE uint64_t gt_gtf_get_count_(gt_shash* const table, char* const element){ if(!gt_shash_is_contained(table, element)){ return 0; } uint64_t* v = gt_shash_get(table,element,uint64_t); return *v; } GT_INLINE float gt_gtf_get_count_weight(gt_shash* const table, char* const element){ if(!gt_shash_is_contained(table, element)){ return 0.0; } double* v = gt_shash_get(table,element,double); return *v; } GT_INLINE void gt_gtf_create_hit(gt_vector* search_hits, gt_shash* all_genes, gt_gtf_hits* hits, gt_gtf_hit* template_hit){ template_hit->transcripts = gt_shash_new(); template_hit->genes = gt_shash_new(); template_hit->is_protein_coding = false; template_hit->hits_exon = false; bool counted_protein = false; // set gene count GT_SHASH_BEGIN_ITERATE(all_genes, gene_id, c, uint64_t){ gt_gtf_count_sum_(template_hit->genes, gene_id, *c); }GT_SHASH_END_ITERATE; GT_VECTOR_ITERATE(search_hits, v, c, gt_gtf_entry*){ gt_gtf_entry* e = *v; // count transcript if(e->transcript_id != NULL){ gt_gtf_count_(template_hit->transcripts, gt_string_get_string(e->transcript_id)); } if(!template_hit->hits_exon && strcmp(e->type->buffer, "exon") == 0){ template_hit->hits_exon = true; } if(!counted_protein && e->gene_type != NULL){ template_hit->is_protein_coding |= (strcmp(e->gene_type->buffer, "protein_coding") == 0); hits->num_protein_coding++; counted_protein = true; } } template_hit->pairs_gene = (gt_shash_get_num_elements(all_genes) > 1); // single gene template_hit->pairs_transcript = (gt_shash_get_num_elements(template_hit->transcripts) == 1); // single gene hits->num_paired_genes += (template_hit->pairs_gene ? 1 : 0); gt_vector_insert(hits->exon_hits, template_hit, gt_gtf_hit*); } GT_INLINE void gt_gtf_search_template_hits(const gt_gtf* const gtf, gt_gtf_hits* const hits, gt_template* const template_src){ gt_vector* const search_hits = gt_vector_new(32, sizeof(gt_gtf_entry*)); // reset the hits gt_gtf_hits_clear(hits); gt_shash* all_genes = gt_shash_new(); // process paired alignment GT_TEMPLATE_ITERATE_MMAP__ATTR_(template_src,mmap,mmap_attr) { gt_gtf_hit* template_hit = gt_gtf_hit_new(); template_hit->num_template_blocks = gt_template_get_num_blocks(template_src); template_hit->mmap = mmap; template_hit->map = NULL; template_hit->map_attributes = mmap_attr; template_hit->num_junctions = (gt_map_get_num_blocks(mmap[0]) + gt_map_get_num_blocks(mmap[1])) - 2; template_hit->num_junctions_hits = gt_gtf_count_junction(gtf, mmap[0]) + gt_gtf_count_junction(gtf, mmap[1]); double junction_ratio = template_hit->num_junctions == 0 ? -1.0 : (double)template_hit->num_junctions_hits/(double)template_hit->num_junctions; if(junction_ratio > 0 && junction_ratio > hits->junction_hit_ration) hits->junction_hit_ration = junction_ratio; gt_shash_clear(all_genes, true); gt_gtf_count_map(gtf, mmap[0], mmap[1], NULL, all_genes, NULL, NULL); gt_gtf_search_map(gtf, search_hits, mmap[0], true); gt_gtf_search_map(gtf, search_hits, mmap[1], false); gt_gtf_create_hit(search_hits, all_genes, hits, template_hit); hits->num_genes += gt_shash_get_num_elements(all_genes); } gt_shash_delete(all_genes, true); gt_vector_delete(search_hits); } GT_INLINE void gt_gtf_search_alignment_hits(const gt_gtf* const gtf, gt_gtf_hits* const hits, gt_alignment* const alignment){ gt_vector* const search_hits = gt_vector_new(32, sizeof(gt_gtf_entry*)); // reset the hits gt_gtf_hits_clear(hits); gt_shash* all_genes = gt_shash_new(); // process paired alignment GT_ALIGNMENT_ITERATE(alignment, map){ gt_gtf_hit* template_hit = gt_gtf_hit_new(); template_hit->map = map; template_hit->mmap = NULL; template_hit->num_junctions = gt_map_get_num_blocks(map) - 1; template_hit->num_junctions_hits = gt_gtf_count_junction(gtf, map); template_hit->num_template_blocks = 1; double junction_ratio = template_hit->num_junctions == 0 ? -1.0 : (double)template_hit->num_junctions_hits/(double)template_hit->num_junctions; if(junction_ratio > 0 && junction_ratio > hits->junction_hit_ration) hits->junction_hit_ration = junction_ratio; gt_shash_clear(all_genes, false); gt_gtf_count_map(gtf, map, NULL, NULL, all_genes, NULL, NULL); gt_gtf_search_map(gtf, search_hits, map, true); gt_gtf_create_hit(search_hits, all_genes, hits, template_hit); hits->num_genes += gt_shash_get_num_elements(all_genes); } gt_shash_delete(all_genes, false); gt_vector_delete(search_hits); } GT_INLINE void gt_gtf_count_add_(gt_shash* const source, gt_shash* const target){ GT_SHASH_BEGIN_ITERATE(source, key, value, uint64_t){ if(!gt_shash_is_contained(target, key)){ uint64_t* v = gt_malloc_uint64(); *v = *value; gt_shash_insert(target, key, v, uint64_t); }else{ uint64_t* v = gt_shash_get(target,key,uint64_t); *v += (*value); } }GT_SHASH_END_ITERATE; } GT_INLINE void gt_gtf_add_coverage(uint64_t* store, const uint64_t transcript_length, const uint64_t bucket){ // add to all store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_ALL, bucket)] += 1; if(transcript_length <= 150){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_150, bucket)] += 1; } if(transcript_length > 150 && transcript_length <= 250){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_250, bucket)] += 1; } if(transcript_length > 250 && transcript_length <= 500){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_500, bucket)] += 1; } if(transcript_length > 500 && transcript_length <= 1000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_1000, bucket)] += 1; } if(transcript_length > 1000 && transcript_length <= 2500){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_2500, bucket)] += 1; } if(transcript_length > 2500 && transcript_length <= 5000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_5000, bucket)] += 1; } if(transcript_length > 5000 && transcript_length <= 7500){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_7500, bucket)] += 1; } if(transcript_length > 7500 && transcript_length <= 10000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_10000, bucket)] += 1; } if(transcript_length > 10000 && transcript_length <= 15000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_15000, bucket)] += 1; } if(transcript_length > 15000 && transcript_length <= 20000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_20000, bucket)] += 1; } } GT_INLINE void gt_gtf_count_coverage_(const gt_gtf* const gtf, gt_map* const map, char* gene_id, gt_gtf_count_parms* params){ // get coordinates uint64_t start = gt_gtf_get_map_begin(map); uint64_t end = gt_gtf_get_map_end(map); if(start > end){ return; // happens for (1)>123*... trim followed by split } uint64_t map_length = (end-start)+1; if(map_length <= 1){ // count only maps with at least 2 bases in length return; } // store the search hits and search gt_vector* const hits = gt_vector_new(32, sizeof(gt_gtf_entry*)); gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, true); GT_VECTOR_ITERATE(hits, e, i, gt_gtf_entry*){ gt_gtf_entry* hit = *e; if(hit->transcript_id == NULL) continue; // no transcript id if(hit->type == NULL || strcmp("exon", hit->type->buffer) != 0) continue; // no exon or no type if(gene_id != NULL && (hit->gene_id == NULL || strcmp(hit->gene_id->buffer, gene_id) != 0)) continue; // we are looking for a specific gene_id gt_gtf_entry* transcript = gt_gtf_get_transcript_by_id(gtf, hit->transcript_id->buffer); if(transcript == NULL || transcript->length <= 100){ continue; } if(hit->gene_id == NULL) continue; // no gene id on the hit gt_gtf_entry* gene = gt_gtf_get_gene_by_id(gtf,hit->gene_id->buffer); if(gene == NULL) continue; // no gene found if(gene_id != NULL && strcmp(gene_id, gene->gene_id->buffer) != 0) continue; // we are looking for a specific hit uint64_t exon_length = (hit->end - hit->start) + 1; int64_t rel_start = start - hit->start; int64_t rel_end = (rel_start + map_length) - 1; if(rel_start < 0){ rel_start = 0; } if(rel_end > exon_length){ rel_end = exon_length; } if(rel_start >= 0 && rel_end <= exon_length){ // contained in range // count for exon count uint64_t start_bucket = (((rel_start/(double)exon_length) * 100.0) + 0.5) - 1; uint64_t end_bucket = (((rel_end/(double)exon_length) * 100.0) + 0.5) - 1; uint64_t s = 0; if(start_bucket >= 0 && start_bucket < 100 && end_bucket >= start_bucket && end_bucket < 100){ // handle reverse strand and flip coordinates if(hit->strand == REVERSE){ uint64_t tmp = start_bucket; start_bucket = (GT_GTF_COVERAGE_BUCKETS - 1) - end_bucket; end_bucket = (GT_GTF_COVERAGE_BUCKETS - 1) - tmp; } // scale up // count for global count and make exon coordinates relative to transcript // coordinate range uint64_t hit_start_on_transcript = hit->length; if(hit->strand == REVERSE){ // flip the bucket start if this is a gene on reverse strand // the exon start/end is already flipped // so we just flip the order of the exons here hit_start_on_transcript = (transcript->length - hit_start_on_transcript) - exon_length; } uint64_t trans_start_bucket = ((((double)hit_start_on_transcript / (double)transcript->length) * 100.0) + 0.5) - 1; double scale = (double)exon_length / (double) transcript->length; start_bucket = (scale * (double)start_bucket) + trans_start_bucket; end_bucket = (scale * (double)end_bucket) + trans_start_bucket; if(start_bucket >= 0 && start_bucket < 100 && end_bucket >= start_bucket && end_bucket < 100){ for(s=start_bucket;s<=end_bucket; s++){ //fprintf(stderr, ">>>GLOBAL COUNT %s : %"PRIu64" S/E: %"PRIu64" %"PRIu64" (%"PRIu64") Exon: %"PRIu64" %"PRIu64"\n", transcript->transcript_id->buffer, s, start, end, map_length, hit->start, hit->end); // count gene body coverage gt_gtf_add_coverage(params->gene_body_coverage, transcript->length, s); // count single transcript if( gene->num_children == 1){ gt_gtf_add_coverage(params->single_transcript_coverage, transcript->length, s); } } } }else{ gt_fatal_error_msg("Coverage overlap out of range %"PRIu64" %"PRIu64, start_bucket, end_bucket); } } } gt_vector_delete(hits); } /** * This counts a single continuous block and takes the. Note that we do not perform any checks on * splits/pairs here and simply count for this single continuous map * * @param gt_gtf* gtf the gtf reference * @param gt_map* continuous map block * @param gt_shash* type_counts the type counts, i.e exon/intron etc * @param gt_shash* gene_counts the gene counts with the gene_id's hit by the map. * @param gt_shash* exon_counts the exon counts with the gene_id's hit by the map. * @param gt_shash* junction_counts the number of annotated junctions that are hit per gene * @param float* overlap float pointer that is set to the maximum exon overlap of this block * @return uint64_t num_gene_exons number of unique gene_ids hit by exons */ GT_INLINE uint64_t gt_gtf_count_map_(const gt_gtf* const gtf, gt_map* const map, gt_shash* const type_counts, gt_shash* const gene_counts, gt_shash* const exon_counts, gt_shash* const junction_counts, float* overlap, uint64_t total_map_length, gt_gtf_count_parms* params){ // get coordinates uint64_t start = gt_gtf_get_map_begin(map); uint64_t end = gt_gtf_get_map_end(map); if(start > end){ gt_gtf_count_(type_counts, GT_GTF_TYPE_EMPTY_BLOCK); return 0; // happens for (1)>123*... where map starts with trim followed by split } uint64_t map_length = (end-start)+1; // store the search hits and search gt_vector* const hits = gt_vector_new(32, sizeof(gt_gtf_entry*)); gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, true); // we do a complete local count for this block // and then merge the local count with the global count // to be able to resolve genes/gene_types that are // through wither the pair information or split information, // assuming that the counts for the other pair and/or the other split // are already contained in the globally presented count maps gt_shash* const local_type_counts = gt_shash_new(); gt_shash* local_gene_counts = gt_shash_new(); gt_shash* local_exon_gene_counts = gt_shash_new(); float max_overlap = 0.0; GT_VECTOR_ITERATE(hits, e, i, gt_gtf_entry*){ gt_gtf_entry* hit = *e; // count type gt_gtf_count_(local_type_counts, gt_string_get_string(hit->type)); // count gene id if(hit->gene_id != NULL){ gt_gtf_count_(local_gene_counts, gt_string_get_string(hit->gene_id)); } // count gene_id from exons if(hit->type != NULL && hit->gene_id != NULL && strcmp("exon", hit->type->buffer) == 0){ if(gt_gtf_hits_junction(map, hit)){ gt_gtf_count_(junction_counts, gt_string_get_string(hit->gene_id)); } gt_gtf_count_(local_exon_gene_counts, gt_string_get_string(hit->gene_id)); gt_gtf_count_(exon_counts, gt_string_get_string(hit->gene_id)); int64_t o = ((hit->end < end ? hit-> end : end) - (hit->start > start ? hit->start : start)) + 1; float block_overlap = o <= 0 ? 0.0 : ((float)o)/((float)(map_length)); if(block_overlap > max_overlap) max_overlap = block_overlap; if(block_overlap > 1.0){ gt_fatal_error_msg("Block overlap > 1.0\nMap : %"PRIu64" %"PRIu64" (%"PRIu64")\nExon :%"PRIu64" %"PRIu64" ", start, end, map_length, hit->start, hit->end); } } } *overlap += (max_overlap * ( (float)map_length / (float) total_map_length)); if(*overlap > 1.000001){ gt_output_map_fprint_map(stderr, map, NULL); fprintf(stderr, "\n"); gt_fatal_error_msg("Block overlap > 1.0 :: %.10f\nMap length : %"PRIu64" Total length: %"PRIu64" max overlap: %.10f", *overlap, map_length, total_map_length, max_overlap); } uint64_t num_gene_hit_exons = gt_shash_get_num_elements(local_exon_gene_counts); // count types and merge them with the global // counts. NOTE that the order matters here, so // we: // 1. check for NA hits where nothing is found // 2. count exon hits // 3. count intron hits // 4. count unknown if the hit was neither an intron nor exon hit // all counting steps are exclusive, thats why the order matters! if(gt_vector_get_used(hits) == 0){ // count 'NA' type if we did not hit anything gt_gtf_count_(type_counts, GT_GTF_TYPE_NA); }else if(gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON) > 0){ gt_gtf_count_(type_counts, GT_GTF_TYPE_EXON); }else if(gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON) > 0){ gt_gtf_count_(type_counts, GT_GTF_TYPE_INTRON); }else{ gt_gtf_count_(type_counts, GT_GTF_TYPE_UNKNOWN); } // make gene counts based on exon hits if we found at least one if(num_gene_hit_exons > 0){ GT_SHASH_BEGIN_KEY_ITERATE(local_exon_gene_counts, key){ gt_gtf_count_(gene_counts, key); }GT_SHASH_END_ITERATE; }else{ // add all gene counts GT_SHASH_BEGIN_KEY_ITERATE(local_gene_counts, key){ gt_gtf_count_(gene_counts, key); }GT_SHASH_END_ITERATE; } // if(params->single_transcript_coverage != NULL){ // gt_gtf_count_coverage_(gtf, map, NULL, params); // } gt_shash_delete(local_gene_counts, true); gt_shash_delete(local_type_counts, true); gt_shash_delete(local_exon_gene_counts, true); gt_vector_delete(hits); return num_gene_hit_exons; } GT_INLINE uint64_t gt_gtf_join_(gt_string* buf, char* base, bool multi_gene, uint64_t blocks){ if(blocks == 0) return 0; uint64_t i = 0; uint64_t len = strlen(base); for(i=0; i<blocks; i++){ gt_string_right_append_string(buf, base, len); if(multi_gene){ gt_string_right_append_string(buf, "_mg", 3); } if(i<blocks-1){ gt_string_append_char(buf, '^'); } } return blocks; } GT_INLINE double gt_gtf_count_get_sum_(gt_shash* table){ double v = 0; GT_SHASH_BEGIN_ELEMENT_ITERATE(table, value, uint64_t){ v += *value; }GT_SHASH_END_ITERATE; return v; } GT_INLINE uint64_t gt_gtf_get_map_length(gt_map* const maps){ uint64_t map_length = 0; GT_MAP_ITERATE(maps, map){ // get coordinates uint64_t start = gt_gtf_get_map_begin(map); uint64_t end = gt_gtf_get_map_end(map); if(start > end){ continue; // happens for wired thigs like (1)>231*... where the map start with a trim followed by a split } map_length += (end-start)+1; } return map_length; } /** * Count a map. This respects split maps and unifies gene_id's based on the * the split. If the both sides of the split match multiple gene_ids but there is * a common gene_id on both side, only that id is counted. Otherwise a count is set * for all gene_ids. * In addition to the counts, if a pattern string is given, it is filled with the type * pattern with respect to split maps. For example: * * exon -> exon * exon and intron (split map) -> exon^intron * exon in multiple genes -> exon_mg * * The function returns the number of gene_ids hit by the map. * * The first map has to be specified, but the second one is options. If it is set, * the second map block is also checked and counted. * * * @param gt_gtf* gtf the gtf reference * @param gt_map* map1 the first map * @param gt_map* map2 the scond map * @param gt_shash* type_counts the type counts * @param gt_shash* gene_counts the gene counts * @param gt_string pattern the pattern string filled based on the types * @return uint64_t num_gene_hits the number of gene_ids hit by the map */ GT_INLINE uint64_t gt_gtf_count_map(const gt_gtf* const gtf, gt_map* const map1, gt_map* const map2, gt_shash* const pattern_counts, gt_shash* const gene_counts, gt_string* pattern, gt_gtf_count_parms* params){ // clear patterns if(pattern != NULL)gt_string_clear(pattern); // get number of blocks and ensure we have at least one uint64_t blocks = gt_map_get_num_blocks(map1); if(map2 != NULL){ blocks += gt_map_get_num_blocks(map2); } if(blocks == 0) return 0; // local counts for all blocks // and store the number of multi gene exon hits for each block // in addition we create the base pattern per block here gt_shash* const local_type_counts = gt_shash_new(); gt_shash* local_gene_counts = gt_shash_new(); gt_shash* local_gene_counts_1 = gt_shash_new(); gt_shash* local_gene_counts_2 = gt_shash_new(); gt_shash* local_junction_counts_1 = gt_shash_new(); gt_shash* local_junction_counts_2 = gt_shash_new(); gt_shash* local_exon_counts_1 = gt_shash_new(); gt_shash* local_exon_counts_2 = gt_shash_new(); uint64_t* const local_exon_gene_hits = malloc(blocks * sizeof(uint64_t)); gt_vector* const local_type_patterns = gt_vector_new(2, sizeof(char*)); uint64_t exons, introns, unknown, not_annotated, empty_blocks; exons = introns = unknown = not_annotated = empty_blocks =0; uint64_t i = 0; float block_1_overlap = 0.0; float block_2_overlap = 0.0; uint64_t map_1_length = gt_gtf_get_map_length(map1); GT_MAP_ITERATE(map1, map_block){ local_exon_gene_hits[i++] = gt_gtf_count_map_(gtf, map_block, local_type_counts, local_gene_counts_1, local_exon_counts_1,local_junction_counts_1, &block_1_overlap, map_1_length, params); uint64_t _exons = exons + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON); uint64_t _introns = introns + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON); uint64_t _unknown = unknown + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_UNKNOWN); uint64_t _not_annotated = not_annotated + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_NA); uint64_t _empty_block = empty_blocks + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EMPTY_BLOCK); // add the pattern string based in the count value that changed if(_exons > exons) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EXON, char*); if(_introns > introns) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_INTRON, char*); if(_unknown > unknown) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_UNKNOWN, char*); if(_not_annotated > not_annotated) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_NA, char*); if(_empty_block > empty_blocks) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EMPTY_BLOCK, char*); exons = _exons; introns = _introns; unknown = _unknown; not_annotated = _not_annotated; empty_blocks = _empty_block; } // if we hit more than one gene, // try to unify the gene by checking the other blocks for // overlaps. If we find genes that are covered by all the // blocks we count only them. if(gt_shash_get_num_elements(local_gene_counts_1) > 1){ gt_shash* merged_counts = gt_shash_new(); uint64_t blocks1 = gt_map_get_num_blocks(map1); // search for the best junction hit uint64_t hits_junctions = 0; GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){ uint64_t m = gt_gtf_get_count_(local_junction_counts_1,gene_id); if(*count == blocks1 && m > 0){ if(m > hits_junctions) hits_junctions = m; } }GT_SHASH_END_ITERATE; GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){ if(*count == blocks1 && (hits_junctions == 0 || gt_gtf_get_count_(local_junction_counts_1,gene_id) == hits_junctions)){ gt_gtf_count_sum_(merged_counts, gene_id, blocks1); } }GT_SHASH_END_ITERATE; // if we found some unique ids that are covered by both // we flip over to the merged counts gt_shash_delete(local_gene_counts_1, true); local_gene_counts_1 = merged_counts; // we fliped so we reset the exon gene hit counts to ones as well if(gt_shash_get_num_elements(merged_counts) > 0){ for(i=0;i<blocks1;i++){ if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1; } } } if(map2 != NULL){ uint64_t map_2_length = gt_gtf_get_map_length(map2); GT_MAP_ITERATE(map2, map_block){ local_exon_gene_hits[i++] = gt_gtf_count_map_(gtf, map_block, local_type_counts, local_gene_counts_2, local_exon_counts_2, local_junction_counts_2, &block_2_overlap, map_2_length, params); uint64_t _exons = exons + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON); uint64_t _introns = introns + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON); uint64_t _unknown = unknown + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_UNKNOWN); uint64_t _not_annotated = not_annotated + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_NA); uint64_t _empty_block = empty_blocks + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EMPTY_BLOCK); // add the pattern string based in the count value that changed if(_exons > exons) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EXON, char*); if(_introns > introns) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_INTRON, char*); if(_unknown > unknown) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_UNKNOWN, char*); if(_not_annotated > not_annotated) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_NA, char*); if(_empty_block > empty_blocks) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EMPTY_BLOCK, char*); exons = _exons; introns = _introns; unknown = _unknown; not_annotated = _not_annotated; empty_blocks = _empty_block; } // unify the gene counts based on the number of blocks. // the gene_counts are reduced to either the ones that are found in // all blocks or they are kept as they are if(gt_shash_get_num_elements(local_gene_counts_2) > 1){ gt_shash* merged_counts = gt_shash_new(); uint64_t blocks2 = gt_map_get_num_blocks(map2); // search for the best junction hit uint64_t hits_junctions = 0; GT_SHASH_BEGIN_ITERATE(local_gene_counts_2, gene_id, count, uint64_t){ uint64_t m = gt_gtf_get_count_(local_junction_counts_2,gene_id); if(*count == blocks2 && m > 0){ if(m > hits_junctions) hits_junctions = m; } }GT_SHASH_END_ITERATE; GT_SHASH_BEGIN_ITERATE(local_gene_counts_2, gene_id, count, uint64_t){ if(*count == blocks2 && (hits_junctions == 0 || gt_gtf_get_count_(local_junction_counts_2,gene_id) == hits_junctions)){ gt_gtf_count_sum_(merged_counts, gene_id, blocks2); } }GT_SHASH_END_ITERATE; // if we found some unique ids that are covered by both // we flip over to the merged counts gt_shash_delete(local_gene_counts_2, true); local_gene_counts_2 = merged_counts; if(gt_shash_get_num_elements(merged_counts) > 0){ uint64_t blocks1 = gt_map_get_num_blocks(map1); // we flipped so we reset the exon gene hit counts to ones as well for(i=blocks1;i<(blocks1+blocks2);i++){ if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1; } } } } /** * Merge everything into a single merged map */ gt_shash* merged_counts = gt_shash_new(); uint64_t blocks1 = gt_map_get_num_blocks(map1); uint64_t blocks2 = 0; if(map2 != NULL){ blocks2 = gt_map_get_num_blocks(map2); } float overlap = (block_1_overlap + block_2_overlap) / (float) (map2==NULL?1.0:2.0); uint64_t map2_hits = map2 != NULL ? gt_shash_get_num_elements(local_gene_counts_2) : 0; GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){ if( (gt_shash_is_contained(local_gene_counts_2, gene_id) || map2_hits == 0) && (params == NULL || params->exon_overlap <= 0.0 || overlap >= params->exon_overlap)){ uint64_t nv =*count + gt_gtf_get_count_(local_gene_counts_2, gene_id); gt_gtf_count_sum_(merged_counts, gene_id, nv); if(overlap > 1.000001){ gt_fatal_error_msg("Exon Overlap %.10f > 1.0 from %.10f %.10f!", overlap, block_1_overlap, block_2_overlap); } } }GT_SHASH_END_ITERATE; uint64_t unique_genes_between_pairs = gt_shash_get_num_elements(merged_counts); // we found unique genes through the pair, so we can use // the merged map to do the final counts if(unique_genes_between_pairs > 0){ // we flip the exon gene hit counts in case if(unique_genes_between_pairs == 1){ for(i=0;i<blocks;i++){ if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1; } } // merge the gene counts weighted to a single map GT_SHASH_BEGIN_KEY_ITERATE(merged_counts, gene_id){ double v = 0.0; if(gt_shash_is_contained(local_exon_counts_1, gene_id) || ((params == NULL || params->exon_overlap <= 0.0) && gt_shash_is_contained(local_gene_counts_1, gene_id))){ v+= 1.0; } if(gt_shash_is_contained(local_exon_counts_2, gene_id) || ((params == NULL || params->exon_overlap <= 0.0 )&& gt_shash_is_contained(local_gene_counts_2, gene_id))){ v+=1.0; } if(v > 0.0) gt_gtf_count_weight_(local_gene_counts, gene_id, v); }GT_SHASH_END_ITERATE; } // get the number of hits of this map uint64_t num_gene_hits = gt_shash_get_num_elements(local_gene_counts); if(pattern_counts != NULL){ // now iterate the blocks and construct final pattern for(i=0; i<blocks; i++){ char* p = *(gt_vector_get_elm(local_type_patterns, i, char*)); if(strcmp(p, GT_GTF_TYPE_EMPTY_BLOCK) == 0) continue; // for exons check that in case we have a single gene hit, its exons, in case of a multi-gene hit, append _mg if // the multi gene hit comes from the current block gt_gtf_join_(pattern, p, (strcmp("exon",p) == 0) ? ((num_gene_hits == 1) ? false : (local_exon_gene_hits[i] > 1)) : false, 1); // add paired end spacer if(map2 != NULL && i == (blocks1-1)){ gt_string_append_char(pattern, '|'); }else{ if(i<blocks-1){ gt_string_append_char(pattern, '^'); } } } gt_string_append_eos(pattern); // count global type based on the constructed pattern gt_gtf_count_(pattern_counts, gt_string_get_string(pattern)); } if(params != NULL && params->num_maps == 1){ // count junctions for single mapping reads if(blocks1 > 1){ params->num_junctions += blocks1 - 1; params->num_annotated_junctions += gt_gtf_count_junction(gtf, map1); } if(blocks2 > 1){ params->num_junctions += blocks2 - 1; params->num_annotated_junctions += gt_gtf_count_junction(gtf, map2); } } if(gene_counts != NULL){ // count the gene ids GT_SHASH_BEGIN_ITERATE(local_gene_counts, key, e, double){ if(gt_shash_is_contained(gene_counts, key)){ double current = gt_gtf_get_count_weight(gene_counts, key); if(current < *e){ // set to max count gt_gtf_count_weight_(gene_counts, key, (*e)-current); } }else{ gt_gtf_count_weight_(gene_counts, key, *e); } }GT_SHASH_END_ITERATE; } if(params->single_transcript_coverage != NULL){ // do coverage counts for merged genes GT_SHASH_BEGIN_KEY_ITERATE(local_gene_counts, key){ // count map1 GT_MAP_ITERATE(map1, map_block){ gt_gtf_count_coverage_(gtf, map_block, key, params); } if(map2 != NULL){ GT_MAP_ITERATE(map2, map_block){ gt_gtf_count_coverage_(gtf, map_block, key, params); } } }GT_SHASH_END_ITERATE; } // cleanup gt_vector_delete(local_type_patterns); gt_shash_delete(local_gene_counts, true); // cleanup gt_shash_delete(local_gene_counts_1, true); gt_shash_delete(local_gene_counts_2, true); gt_shash_delete(local_exon_counts_1, true); gt_shash_delete(local_exon_counts_2, true); gt_shash_delete(local_junction_counts_1, true); gt_shash_delete(local_junction_counts_2, true); gt_shash_delete(local_type_counts, true); gt_shash_delete(merged_counts, true); free(local_exon_gene_hits); return gt_shash_get_num_elements(gene_counts); } GT_INLINE uint64_t gt_gtf_count_alignment(gt_gtf* const gtf, gt_alignment* const alignment, gt_shash* const pattern_count, gt_shash* const gene_counts, gt_gtf_count_parms* params){ uint64_t hits = 0; gt_string* pattern = gt_string_new(16); params->num_maps = gt_alignment_get_num_maps(alignment); GT_ALIGNMENT_ITERATE(alignment,map) { hits = gt_gtf_count_map(gtf, map, NULL, pattern_count, gene_counts, pattern, params); gt_string_clear(pattern); } gt_string_delete(pattern); return hits; } GT_INLINE uint64_t gt_gtf_count_template(gt_gtf* const gtf, gt_template* const template, gt_shash* const pattern_count, gt_shash* const gene_counts, gt_gtf_count_parms* params){ uint64_t hits = 0; gt_string* pattern = gt_string_new(16); params->num_maps = gt_template_get_num_mmaps(template); GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,mmap,mmap_attr) { hits = gt_gtf_count_map(gtf, mmap[0], mmap[1], pattern_count, gene_counts, pattern, params); gt_string_clear(pattern); } gt_string_delete(pattern); return hits; } GT_INLINE void gt_gtf_search_map(const gt_gtf* const gtf, gt_vector* const hits, gt_map* const map, const bool clean_target){ GT_MAP_ITERATE(map, block){ uint64_t start = gt_map_get_begin_mapping_position(map); uint64_t end = gt_map_get_end_mapping_position(map); gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, clean_target); } } GT_INLINE void gt_gtf_search_alignment(const gt_gtf* const gtf, gt_vector* const hits, gt_alignment* const alignment){ GT_ALIGNMENT_ITERATE(alignment, map){ gt_gtf_search_map(gtf, hits, map, true); } } GT_INLINE void gt_gtf_search_template(const gt_gtf* const gtf, gt_vector* const hits, gt_template* const template){ GT_TEMPLATE_IF_REDUCES_TO_ALINGMENT(template, alignment){ gt_gtf_search_alignment(gtf,hits, alignment); }GT_TEMPLATE_END_REDUCTION__RETURN; gt_gtf_search_alignment(gtf,hits, gt_template_get_block(template, 0)); gt_gtf_search_alignment(gtf,hits, gt_template_get_block(template, 1)); }
GB_binop__rminus_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint16) // A*D function (colscale): GB (_AxD__rminus_uint16) // D*A function (rowscale): GB (_DxB__rminus_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint16) // C=scalar+B GB (_bind1st__rminus_uint16) // C=scalar+B' GB (_bind1st_tran__rminus_uint16) // C=A+scalar GB (_bind2nd__rminus_uint16) // C=A'+scalar GB (_bind2nd_tran__rminus_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT16 || GxB_NO_RMINUS_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mpifft.c
/* -*- mode: C; tab-width: 2; indent-tabs-mode: nil; fill-column: 79; coding: iso-latin-1-unix -*- */ /* mpifft.c */ #include <hpcc.h> #include "hpccfft.h" #include "wrapmpifftw.h" double *HPCC_fft_timings_forward, *HPCC_fft_timings_backward; static void MPIFFT0(HPCC_Params *params, int doIO, FILE *outFile, MPI_Comm comm, int locN, double *UGflops, s64Int_t *Un, double *UmaxErr, int *Ufailure) { int commRank, commSize, failure, flags; s64Int_t i, n; s64Int_t locn, loc0, alocn, aloc0, tls; double maxErr, tmp1, tmp2, tmp3, t0, t1, t2, t3, Gflops; double deps; fftw_complex *inout, *work; fftw_mpi_plan p; hpcc_fftw_mpi_plan ip; int sAbort, rAbort; #ifdef USING_FFTW int ilocn, iloc0, ialocn, ialoc0, itls; #endif failure = 1; Gflops = -1.0; deps = HPL_dlamch( HPL_MACH_EPS ); maxErr = 1.0 / deps; MPI_Comm_size( comm, &commSize ); MPI_Comm_rank( comm, &commRank ); n = locN; /* number of processes have been factored out - need to put it back in */ n *= commSize; n *= commSize; /* global vector size */ #ifdef USING_FFTW /* FFTW ver. 2 only supports vector sizes that fit in 'int' */ if (n > (1<<30)-1+(1<<30)) { #ifdef HPCC_FFTW_CHECK32 goto no_plan; #else if (doIO) { fprintf( outFile, "Warning: problem size too large: %ld*%d*%d\n", (long)(n / commSize / commSize), commSize, commSize ); } #endif } #endif #ifdef HPCC_FFTW_ESTIMATE flags = FFTW_ESTIMATE; #else flags = FFTW_MEASURE; #endif t1 = -MPI_Wtime(); p = fftw_mpi_create_plan( comm, n, FFTW_FORWARD, flags ); t1 += MPI_Wtime(); if (! p) goto no_plan; #ifdef USING_FFTW fftw_mpi_local_sizes( p, &ilocn, &iloc0, &ialocn, &ialoc0, &itls ); locn = ilocn; loc0 = iloc0; alocn = ialocn; aloc0 = ialoc0; tls = itls; #else fftw_mpi_local_sizes( p, &locn, &loc0, &alocn, &aloc0, &tls ); #endif inout = (fftw_complex *)HPCC_fftw_malloc( tls * (sizeof *inout) ); work = (fftw_complex *)HPCC_fftw_malloc( tls * (sizeof *work) ); sAbort = 0; if (! inout || ! work) sAbort = 1; MPI_Allreduce( &sAbort, &rAbort, 1, MPI_INT, MPI_SUM, comm ); if (rAbort > 0) { fftw_mpi_destroy_plan( p ); goto comp_end; } /* Make sure that `inout' and `work' are initialized in parallel if using Open MP: this will ensure better placement of pages if first-touch policy is used by a distrubuted shared memory machine. */ #ifdef _OPENMP #pragma omp parallel for for (i = 0; i < tls; ++i) { c_re( inout[i] ) = c_re( work[i] ) = 0.0; c_re( inout[i] ) = c_im( work[i] ) = 0.0; } #endif t0 = -MPI_Wtime(); HPCC_bcnrand( 2 * tls, 53 * commRank * 2 * tls, inout ); t0 += MPI_Wtime(); t2 = -MPI_Wtime(); fftw_mpi( p, 1, inout, work ); t2 += MPI_Wtime(); fftw_mpi_destroy_plan( p ); ip = HPCC_fftw_mpi_create_plan( comm, n, FFTW_BACKWARD, FFTW_ESTIMATE ); if (ip) { t3 = -MPI_Wtime(); HPCC_fftw_mpi( ip, 1, inout, work ); t3 += MPI_Wtime(); HPCC_fftw_mpi_destroy_plan( ip ); } HPCC_bcnrand( 2 * tls, 53 * commRank * 2 * tls, work ); /* regenerate data */ maxErr = 0.0; for (i = 0; i < locn; ++i) { tmp1 = c_re( inout[i] ) - c_re( work[i] ); tmp2 = c_im( inout[i] ) - c_im( work[i] ); tmp3 = sqrt( tmp1*tmp1 + tmp2*tmp2 ); maxErr = maxErr >= tmp3 ? maxErr : tmp3; } MPI_Allreduce( &maxErr, UmaxErr, 1, MPI_DOUBLE, MPI_MAX, comm ); maxErr = *UmaxErr; if (maxErr / log(n) / deps < params->test.thrsh) failure = 0; if (t2 > 0.0) Gflops = 1e-9 * (5.0 * n * log(n) / log(2.0)) / t2; if (doIO) { fprintf( outFile, "Number of nodes: %d\n", commSize ); fprintf( outFile, "Vector size: %20.0f\n", tmp1 = (double)n ); fprintf( outFile, "Generation time: %9.3f\n", t0 ); fprintf( outFile, "Tuning: %9.3f\n", t1 ); fprintf( outFile, "Computing: %9.3f\n", t2 ); fprintf( outFile, "Inverse FFT: %9.3f\n", t3 ); fprintf( outFile, "max(|x-x0|): %9.3e\n", maxErr ); fprintf( outFile, "Gflop/s: %9.3f\n", Gflops ); } comp_end: if (work) HPCC_fftw_free( work ); if (inout) HPCC_fftw_free( inout ); no_plan: *UGflops = Gflops; *Un = n; *UmaxErr = maxErr; *Ufailure = failure; } int HPCC_MPIFFT(HPCC_Params *params) { int commRank, commSize; int locN, procCnt, isComputing, doIO, failure = 0; s64Int_t n; double Gflops = -1.0, maxErr = -1.0; MPI_Comm comm; FILE *outFile; MPI_Comm_size( MPI_COMM_WORLD, &commSize ); MPI_Comm_rank( MPI_COMM_WORLD, &commRank ); doIO = commRank == 0 ? 1 : 0; if (doIO) { outFile = fopen( params->outFname, "a" ); if (! outFile) outFile = stderr; } /* There are two vectors of size 'n'/'commSize': inout, work, and internal work: 2*'n'/'commSize'; it's 4 vectors then. FFTE requires that the global vector size 'n' has to be at least as big as square of number of processes. The square is calculated in each factor independently. In other words, 'n' has to have at least twice as many 2 factors as the process count, twice as many 3 factors and twice as many 5 factors. */ #ifdef HPCC_FFT_235 locN = 0; procCnt = commSize + 1; do { int f[3]; procCnt--; for ( ; procCnt > 1 && HPCC_factor235( procCnt, f ); procCnt--) ; /* EMPTY */ /* Make sure the local vector size is greater than 0 */ locN = HPCC_LocalVectorSize( params, 4*procCnt, sizeof(fftw_complex), 0 ); for ( ; locN >= 1 && HPCC_factor235( locN, f ); locN--) ; /* EMPTY */ } while (locN < 1); #else /* Find power of two that is smaller or equal to number of processes */ for (procCnt = 1; procCnt <= (commSize >> 1); procCnt <<= 1) ; /* EMPTY */ /* Make sure the local vector size is greater than 0 */ while (1) { locN = HPCC_LocalVectorSize( params, 4*procCnt, sizeof(fftw_complex), 1 ); if (locN) break; procCnt >>= 1; } #endif isComputing = commRank < procCnt ? 1 : 0; HPCC_fft_timings_forward = params->MPIFFTtimingsForward; HPCC_fft_timings_backward = params->MPIFFTtimingsBackward; if (commSize == procCnt) comm = MPI_COMM_WORLD; else MPI_Comm_split( MPI_COMM_WORLD, isComputing ? 0 : MPI_UNDEFINED, commRank, &comm ); if (isComputing) MPIFFT0( params, doIO, outFile, comm, locN, &Gflops, &n, &maxErr, &failure ); if (commSize != procCnt && isComputing && comm != MPI_COMM_NULL) MPI_Comm_free( &comm ); params->MPIFFT_N = n; params->MPIFFT_Procs = procCnt; params->MPIFFT_maxErr = maxErr; MPI_Bcast( &Gflops, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ); params->MPIFFTGflops = Gflops; params->FFTEnblk = FFTE_NBLK; params->FFTEnp = FFTE_NP; params->FFTEl2size = FFTE_L2SIZE; if (failure) params->Failure = 1; if (doIO) if (outFile != stderr) fclose( outFile ); return 0; }
optimized_cluster_tree.h
#pragma once #include "bct_kernel_type.h" #include "optimized_bct_types.h" namespace rsurfaces { struct BVHSettings { mint split_threshold = 8; // bool use_old_prepost = false; // TreePercolationAlgorithm tree_perc_alg = TreePercolationAlgorithm::Chunks; // TreePercolationAlgorithm tree_perc_alg = TreePercolationAlgorithm::Tasks; TreePercolationAlgorithm tree_perc_alg = TreePercolationAlgorithm::Sequential; }; // a global instance to store default settings extern BVHSettings BVHDefaultSettings; struct Cluster2 // slim POD container to hold only the data relevant for the construction phase in the tree, before it is serialized { public: Cluster2(){}; ~Cluster2(){ // delete left; // delete right; }; Cluster2(mint begin_, mint end_, mint depth_); mint begin = 0; // position of first triangle in cluster relative to array ordering mint end = 0; // position behind last triangle in cluster relative to array ordering mint depth = 0; // depth within the tree -- not absolutely necessary but nice to have for plotting images mint max_depth = 0; // used to compute the maximal depth in the tree mint descendant_count = 0; mint descendant_leaf_count = 0; Cluster2 *left = nullptr; Cluster2 *right = nullptr; }; //Cluster2 class OptimizedClusterTree // binary cluster tree; layout mostly in Struct of Array fashion in order to prepare SIMDization. Note SIMDized, yet, though. { public: OptimizedClusterTree(){}; // Solving interface problems by using standard types // This way, things are easier to port. For example, I can call this from Mathematica for faster debugging. OptimizedClusterTree( const mreal * restrict const P_coords_, // coordinates per primitive used for clustering; assumed to be of size primitive_count x dim const mint primitive_count_, const mint dim_, const mreal * restrict const P_hull_coords_, // points that define the convex hulls of primitives; assumed to be array of size primitive_count x hull_count x dim const mint hull_count_, const mreal * restrict const P_near_, // data used actual interaction computation; assumed to be of size primitive_count x near_dim. For a triangle mesh in 3D, we want to feed each triangles i), area ii) barycenter and iii) normal as a 1 + 3 + 3 = 7 vector const mint near_dim_, const mreal * restrict const P_far_, // data used actual interaction computation; assumed to be of size primitive_count x far_dim. For a triangle mesh in 3D, we want to feed each triangles i), area ii) barycenter and iii) orthoprojector onto normal space as a 1 + 3 + 6 = 10 vector const mint far_dim_, // const mreal * const restrict P_moments_, // Interface to deal with higher order multipole expansion. Not used, yet. // const mint moment_count_, const mint * restrict const ordering_, // A suggested preordering of primitives; this gets applied before the clustering begins in the hope that this may improve the sorting within a cluster --- at least in the top level(s). This could, e.g., be the ordering obtained by a tree for similar data set. MKLSparseMatrix &DiffOp, MKLSparseMatrix &AvOp, BVHSettings settings_ = BVHDefaultSettings ); mint dim = 3; mint near_dim = 7; // = 1 + 3 + 3 for weight, center, normal, stored consecutively mint far_dim = 10; // = 1 + 3 + 3 * (3 + 1)/2 for weight, center, projector, stored consecutively mint hull_count = 3; mint tree_thread_count = 1; mint thread_count = 1; mint primitive_count = 0; mint cluster_count = 0; mint leaf_cluster_count = 0; mint max_buffer_dim = 0; mint buffer_dim = 0; // mint moment_count = 22; BVHSettings settings; mint *restrict P_ext_pos = nullptr; // Reordering of primitives; crucial for communication with outside world mint *restrict inverse_ordering = nullptr; // Inverse ordering of the above; crucial for communication with outside world // A_Vector<mint> P_leaf; // Index of the leaf cluster to which the primitive belongs // "C_" stands for "cluster", "P_" stands for "primitive" mint *restrict C_begin = nullptr; mint *restrict C_end = nullptr; mint *restrict C_depth = nullptr; mint *restrict C_next = nullptr; mint *restrict C_left = nullptr; // list of index of left children; entry is -1 if no child is present mint *restrict C_right = nullptr; // list of index of right children; entry is -1 if no child is present bool *restrict C_is_chunk_root = nullptr; // Primitive double data, stored in Structure of Arrays fashion A_Vector<mreal *> P_near; //weight, center, normal, stored consecutively; assumed to be matrix of size near_dim x primitive_count! A_Vector<mreal *> P_far; //weight, center, projector, stored consecutively; assumed to be matrix of size far_dim x primitive_count! A_Vector<mreal *> P_coords; //clustering coordinates, stored as dim x primitive_count matrix A_Vector<mreal *> P_min; //lower bounding box point, stored as dim x primitive_count matrix A_Vector<mreal *> P_max; //upper bounding box point, stored as dim x n matrix // A_Vector<mreal * restrict> P_moments; mreal *restrict P_in = nullptr; mreal *restrict P_out = nullptr; // mreal * restrict P_moment_buffer = nullptr; // Cluster double data, stored in Structure of Arrays fashion A_Vector<mreal *> C_far; //weight, center, normal, stored consecutively; assumed to be matrix of size data_dim x n A_Vector<mreal *> C_coords; //clustering coordinate A_Vector<mreal *> C_min; A_Vector<mreal *> C_max; // A_Vector<mreal * restrict> C_moments; mreal *restrict C_in = nullptr; mreal *restrict C_out = nullptr; // mreal * restrict C_moment_buffer = nullptr; mreal *restrict C_squared_radius = nullptr; mint *restrict leaf_clusters = nullptr; mint *restrict leaf_cluster_lookup = nullptr; mint *restrict leaf_cluster_ptr = nullptr; // point to __end__ of each leaf cluster A_Vector<A_Vector<mreal>> P_D_near; A_Vector<A_Vector<mreal>> P_D_far; A_Vector<A_Vector<mreal>> C_D_far; // mint scratch_size = 12; // A_Vector<A_Vector<mreal>> scratch; MKLSparseMatrix hi_pre; MKLSparseMatrix hi_post; MKLSparseMatrix lo_pre; MKLSparseMatrix lo_post; MKLSparseMatrix P_to_C; MKLSparseMatrix C_to_P; A_Vector<A_Vector<mint>> chunk_roots; mint tree_max_depth = 0; bool chunks_prepared = false; ~OptimizedClusterTree() {; ptic("~OptimizedClusterTree"); // pointer arrays come at the cost of manual deallocation... #pragma omp parallel { #pragma omp single { // #pragma omp task // { // for( mint k = 0; k < moment_count; ++ k ) // { // safe_free(P_moments[k]); // } // } // // #pragma omp task // { // for( mint k = 0; k < moment_count; ++ k ) // { // safe_free(C_moments[k]); // } // } #pragma omp task { for (mint k = 0; k < static_cast<mint>(P_coords.size()); ++k) { safe_free(P_coords[k]); } } #pragma omp task { for (mint k = 0; k < static_cast<mint>(C_coords.size()); ++k) { safe_free(C_coords[k]); } } #pragma omp task { for (mint k = 0; k < static_cast<mint>(P_near.size()); ++k) { safe_free(P_near[k]); } } #pragma omp task { for (mint k = 0; k < static_cast<mint>(C_far.size()); ++k) { safe_free(C_far[k]); } } #pragma omp task { for (mint k = 0; k < static_cast<mint>(P_min.size()); ++k) { safe_free(P_min[k]); } } #pragma omp task { for (mint k = 0; k < static_cast<mint>(P_max.size()); ++k) { safe_free(P_max[k]); } } #pragma omp task { for (mint k = 0; k < static_cast<mint>(C_min.size()); ++k) { safe_free(C_min[k]); } } #pragma omp task { for (mint k = 0; k < static_cast<mint>(C_max.size()); ++k) { safe_free(C_max[k]); } } #pragma omp task { safe_free(P_in); } #pragma omp task { safe_free(P_out); } #pragma omp task { safe_free(C_in); } #pragma omp task { safe_free(C_out); } #pragma omp task { safe_free(C_squared_radius); } #pragma omp task { safe_free(leaf_clusters); } #pragma omp task { safe_free(leaf_cluster_lookup); } #pragma omp task { safe_free(leaf_cluster_ptr); } #pragma omp task { safe_free(inverse_ordering); } #pragma omp task { safe_free(P_ext_pos); } #pragma omp task { safe_free(C_begin); } #pragma omp task { safe_free(C_end); } #pragma omp task { safe_free(C_depth); } #pragma omp task { safe_free(C_next); } #pragma omp task { safe_free(C_left); } #pragma omp task { safe_free(C_right); } #pragma omp task { safe_free(C_is_chunk_root); } } } ptoc("~OptimizedClusterTree"); }; void SplitCluster(Cluster2 * const C, const mint free_thread_count); void Serialize(Cluster2 * const C, const mint ID, const mint leaf_before_count, const mint free_thread_count); void ComputePrimitiveData( const mreal * restrict const P_hull_coords_, const mreal * restrict const P_near_, const mreal * restrict const P_far_ // , const mreal * const restrict P_moments_ ); // copy, reordering and computing bounding boxes void ComputeClusterData(); void RequireBuffers(const mint cols); void ComputePrePost(MKLSparseMatrix &DiffOp, MKLSparseMatrix &AvOp); void CleanseBuffers(); void CleanseD(); void Pre(Eigen::MatrixXd &input, BCTKernelType type); void Pre(mreal *input, const mint cols, BCTKernelType type); void Post(Eigen::MatrixXd &output, BCTKernelType type, bool addToResult = false); void Post(mreal *output, const mint cols, BCTKernelType type, bool addToResult = false); void PercolateUp(); void PercolateDown(); void RequireChunks(); // some prototype void PercolateUp_Chunks(); void percolateUp_Tip( const mint C); // some prototype void PercolateDown_Chunks(); void percolateDown_Tip( const mint C); // TODO: Not nearly as fast as I'd like it to be; not scalable! // recusive algorithm parallelized by OpenMP tasks void PercolateUp_Tasks(const mint C, const mint free_thread_count); // TODO: Not nearly as fast as I'd like it to be; not scalable! // recusive algorithm parallelized by OpenMP tasks void PercolateDown_Tasks(const mint C, const mint free_thread_count); // TODO: use a stack for recursion instead of the program stack? // sequential, recursive algorithm void PercolateUp_Seq(const mint C); // TODO: use a stack for recursion instead of the program stack? // sequential, recursive algorithm void PercolateDown_Seq(const mint C); void CollectDerivatives( mreal * restrict const P_D_near_output ); // collect only near field data void CollectDerivatives( mreal * restrict const P_D_near_output, mreal * restrict const P_D_far_output ); // Updates only the computational data (primitive/cluster areas, centers of mass and normals). // All data related to clustering or multipole acceptance criteria remain are unchanged, as well // as the preprocessor and postprocessor matrices (that are needed for matrix-vector multiplies of the BCT.) void SemiStaticUpdate( const mreal * restrict const P_near_, const mreal * restrict const P_far_ ); void PrintToFile(std::string filename = "./OptimizedClusterTree.tsv"); private: void computeClusterData(const mint C, const mint free_thread_count); // helper function for ComputeClusterData bool requireChunks( mint C, mint last, mint thread); }; //OptimizedClusterTree } // namespace rsurfaces
GB_unaryop__identity_uint16_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_int64 // op(A') function: GB_tran__identity_uint16_int64 // C type: uint16_t // A type: int64_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_int64 ( uint16_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_fp32 // op(A') function: GB_tran__identity_int64_fp32 // C type: int64_t // A type: float // cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int64_t z ; GB_CAST_SIGNED(z,aij,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_fp32 ( int64_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
axpbyMany.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(axpbyMany)(const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat & alpha, const dfloat * __restrict__ cpu_a, const dfloat & beta, dfloat * __restrict__ cpu_b){ #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) #endif for(int fld=0;fld<Nfields;fld++) { for(dlong i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat bi = cpu_b[id]; cpu_b[id] = alpha*ai + beta*bi; } } }
bml_allocate_ellpack_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_types.h" #include "bml_allocate_ellpack.h" #include "bml_types_ellpack.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Clear a matrix. * * Numbers of non-zeroes, indeces, and values are set to zero. * * \ingroup allocate_group * * \param A The matrix. */ void TYPED_FUNC( bml_clear_ellpack) ( bml_matrix_ellpack_t * A) { memset(A->nnz, 0, A->N * sizeof(int)); memset(A->index, 0, A->N * A->M * sizeof(int)); memset(A->value, 0.0, A->N * A->M * sizeof(REAL_T)); } /** Allocate a matrix with uninitialized values. * * Note that the matrix \f$ a \f$ will be newly allocated. If it is * already allocated then the matrix will be deallocated in the * process. * * \ingroup allocate_group * * \param matrix_precision The precision of the matrix. The default * is double precision. * \param matrix_dimension The matrix size. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_ellpack_t *TYPED_FUNC( bml_noinit_matrix_ellpack) ( bml_matrix_dimension_t matrix_dimension, bml_distribution_mode_t distrib_mode) { bml_matrix_ellpack_t *A = bml_noinit_allocate_memory(sizeof(bml_matrix_ellpack_t)); A->matrix_type = ellpack; A->matrix_precision = MATRIX_PRECISION; A->N = matrix_dimension.N_rows; A->M = matrix_dimension.N_nz_max; A->distribution_mode = distrib_mode; A->index = bml_noinit_allocate_memory(sizeof(int) * A->N * A->M); A->nnz = bml_allocate_memory(sizeof(int) * A->N); A->value = bml_noinit_allocate_memory(sizeof(REAL_T) * A->N * A->M); A->domain = bml_default_domain(A->N, A->M, distrib_mode); A->domain2 = bml_default_domain(A->N, A->M, distrib_mode); return A; } /** Allocate the zero matrix. * * Note that the matrix \f$ a \f$ will be newly allocated. If it is * already allocated then the matrix will be deallocated in the * process. * * \ingroup allocate_group * * \param matrix_precision The precision of the matrix. The default * is double precision. * \param N The matrix size. * \param M The number of non-zeroes per row. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_ellpack_t *TYPED_FUNC( bml_zero_matrix_ellpack) ( int N, int M, bml_distribution_mode_t distrib_mode) { bml_matrix_ellpack_t *A = bml_allocate_memory(sizeof(bml_matrix_ellpack_t)); A->matrix_type = ellpack; A->matrix_precision = MATRIX_PRECISION; A->N = N; A->M = M; A->distribution_mode = distrib_mode; A->index = bml_allocate_memory(sizeof(int) * N * M); A->nnz = bml_allocate_memory(sizeof(int) * N); A->value = bml_allocate_memory(sizeof(REAL_T) * N * M); A->domain = bml_default_domain(N, M, distrib_mode); A->domain2 = bml_default_domain(N, M, distrib_mode); return A; } /** Allocate a banded random matrix. * * Note that the matrix \f$ a \f$ will be newly allocated. If it is * already allocated then the matrix will be deallocated in the * process. * * \ingroup allocate_group * * \param matrix_precision The precision of the matrix. The default * is double precision. * \param N The matrix size. * \param M The number of non-zeroes per row. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_ellpack_t *TYPED_FUNC( bml_banded_matrix_ellpack) ( int N, int M, bml_distribution_mode_t distrib_mode) { bml_matrix_ellpack_t *A = TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode); REAL_T *A_value = A->value; int *A_index = A->index; int *A_nnz = A->nnz; #pragma omp parallel for shared(A_value, A_index, A_nnz) for (int i = 0; i < N; i++) { int jind = 0; for (int j = (i - M / 2 >= 0 ? i - M / 2 : 0); j < (i - M / 2 + M <= N ? i - M / 2 + M : N); j++) { A_value[ROWMAJOR(i, jind, N, M)] = rand() / (REAL_T) RAND_MAX; A_index[ROWMAJOR(i, jind, N, M)] = j; jind++; } A_nnz[i] = jind; } return A; } /** Allocate a random matrix. * * Note that the matrix \f$ a \f$ will be newly allocated. If it is * already allocated then the matrix will be deallocated in the * process. * * \ingroup allocate_group * * \param matrix_precision The precision of the matrix. The default * is double precision. * \param N The matrix size. * \param M The number of non-zeroes per row. * \param distrib_mode The distribution mode. * \return The matrix. * * Note: Do not use OpenMP when setting values for a random matrix, * this makes the operation non-repeatable. */ bml_matrix_ellpack_t *TYPED_FUNC( bml_random_matrix_ellpack) ( int N, int M, bml_distribution_mode_t distrib_mode) { bml_matrix_ellpack_t *A = TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode); REAL_T *A_value = A->value; int *A_index = A->index; int *A_nnz = A->nnz; for (int i = 0; i < N; i++) { int jind = 0; for (int j = 0; j < M; j++) { A_value[ROWMAJOR(i, jind, N, M)] = rand() / (REAL_T) RAND_MAX; A_index[ROWMAJOR(i, jind, N, M)] = j; jind++; } A_nnz[i] = jind; } return A; } /** Allocate the identity matrix. * * Note that the matrix \f$ a \f$ will be newly allocated. If it is * already allocated then the matrix will be deallocated in the * process. * * \ingroup allocate_group * * \param matrix_precision The precision of the matrix. The default * is double precision. * \param N The matrix size. * \param M The number of non-zeroes per row. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_ellpack_t *TYPED_FUNC( bml_identity_matrix_ellpack) ( int N, int M, bml_distribution_mode_t distrib_mode) { bml_matrix_ellpack_t *A = TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode); REAL_T *A_value = A->value; int *A_index = A->index; int *A_nnz = A->nnz; #pragma omp parallel for shared(A_value, A_index, A_nnz) for (int i = 0; i < N; i++) { A_value[ROWMAJOR(i, 0, N, M)] = (REAL_T) 1.0; A_index[ROWMAJOR(i, 0, N, M)] = i; A_nnz[i] = 1; } return A; }
broadcast_reduce-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file broadcast_reduce-inl.h * \brief CPU-specific Function definition of broadcast and reduce operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" #include "../mxnet_op.h" #include "../operator_common.h" namespace mxnet { namespace op { namespace mxnet_op { template <int ndim, typename OP> struct binary_broadcast_kernel { /*! \brief Map function for binary_broadcast_kernel */ template <typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType* lhs, IType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template <typename LType, typename RType, typename OType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, LType* lhs, RType* rhs, OType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template <typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType lhs, IType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template <typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType* lhs, DType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template < typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value && !std::is_pointer<IType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType lhs, DType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } }; template <int req, typename OP, bool col_vec> struct csr_dns_csr_broadcast_kernel { /*! * \brief Map function for broadcast between csr and 1D vector * \param row global thread id/assigned row id * \param csr_data ptr to data buffer of csr matrix * \param csr_indices ptr to indices buffer of csr matrix * \param csr_indptr ptr to indptr buffer of csr matrix * \param dns ptr to data buffer of the dense vector * \param out ptr to the data buffer of the result csr matrix */ template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType* csr_data, const CType* csr_indices, const RType* csr_indptr, const DType* dns, DType* out) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { KERNEL_ASSIGN( out[iter], req, OP::Map(csr_data[iter], (col_vec) ? dns[row] : dns[csr_indices[iter]])); } } /*! * \brief Map function for broadcast between csr and a scalar * \param i global thread id * \param csr_data ptr to data buffer of csr matrix * \param scalar_ptr ptr to data buffer of the scalar tensor, only the 0-th element is used * \param out ptr to the data buffer of output csr matrix * \param nnz number of non-zero elements in input csr matrix */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, const DType* csr_data, const DType* scalar_ptr, DType* out, const nnvm::dim_t nnz) { const DType scale = scalar_ptr[0]; if (i < nnz) { KERNEL_ASSIGN(out[i], req, OP::Map(csr_data[i], scale)); } } }; template <int req, typename OP, bool reverse = false> struct csr_dns_map_kernel { template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType* csr_data, const CType* csr_indices, const RType* csr_indptr, DType* out, const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { if (row < num_rows) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { const nnvm::dim_t target = row * num_cols + csr_indices[iter]; KERNEL_ASSIGN( out[target], req, reverse ? OP::Map(out[target], csr_data[iter]) : OP::Map(csr_data[iter], out[target])); } } } }; } // namespace mxnet_op namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template <int ndim> MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) { *j = 0; *k = 0; #pragma unroll for (index_t i = ndim - 1, idx_t = idx; i >= 0; --i) { const auto tmp = idx_t / shape[i]; const auto coord = idx_t - tmp * shape[i]; *j += coord * stridej[i]; *k += coord * stridek[i]; idx_t = tmp; } } template <int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template <typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template <int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = mxnet_op::unravel(idx, oshape); const index_t j = mxnet_op::ravel(coord, lshape); const index_t k = mxnet_op::ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> MSHADOW_XINLINE std::pair<AType, AType> seq_reduce_assign_block(size_t start, size_t len, size_t j, const DType* __restrict big, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord; AType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = start; k < start + len; ++k) { coord = mxnet_op::unravel(k, rshape); AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]); if (IndexOP::do_op) IndexOP::Op(&temp, k); Reducer::Reduce(val, temp, residual); } return std::make_pair(val, residual); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, OType* small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride, const bool use_omp = false) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); AType val, residual; Reducer::SetInitValue(val, residual); if (!use_omp) { for (size_t k = 0; k < M; ++k) { coord = mxnet_op::unravel(k, rshape); AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]); // argmin/max, set IndexedNum.idx if (IndexOP::do_op) IndexOP::Op(&temp, k); Reducer::Reduce(val, temp, residual); } } else { const int thread_count = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); auto pairs = std::make_unique<std::pair<AType, AType>[]>(thread_count); #pragma omp parallel for num_threads(thread_count) for (int i = 0; i < thread_count; ++i) { pairs[i] = seq_reduce_assign_block<Reducer, ndim, AType, DType, OType, OP, IndexOP>( i * (M / thread_count), i < (thread_count - 1) ? (M / thread_count) : (M / thread_count) + M % thread_count, j, big, rshape, rstride); } for (int i = 0; i < thread_count; ++i) { Reducer::Merge(val, residual, pairs[i].first, pairs[i].second); } } Reducer::Finalize(val, residual); assign(&small[idx], addto, OType(val)); } namespace { // Returns the stride with which the fastest dimension is moving. // Used to detect memory access scatter. inline int fastest_stride(const TShape& small, const TShape& big, const TShape& big_stride) { const int ndim = small.ndim(); for (int i = ndim - 1; i >= 0; --i) { if (big[i] != 1) { return (small[i] == big[i]) ? 1 : big_stride[i]; } } return 1; } } // namespace template <int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu>* s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { mshadow::Shape<ndim> oshape = out.shape_.get<ndim>(); mshadow::Shape<ndim> lstride = mxnet_op::calc_stride(lhs.shape_.get<ndim>()); mshadow::Shape<ndim> rstride = mxnet_op::calc_stride(rhs.shape_.get<ndim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<ndim, OP>, cpu>::template LaunchEx( s, out.shape_.Size(), req, lstride, rstride, oshape, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>()); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType* big, OType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { const int thread_count = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (N >= thread_count) { #pragma omp parallel for num_threads(thread_count) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>( idx, M, addto, big, small, bshape, sshape, rshape, rstride, false); } } else { for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>( idx, M, addto, big, small, bshape, sshape, rshape, rstride, true); } } } template <typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto, const DType* big, DType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride, const index_t* ws_dptr) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual); } assign(&small[idx], addto, val); } } template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); if (!safe_acc) { seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } else { MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); }); }); } } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceBool(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); seq_reduce_compute<Reducer, ndim, bool, DType, bool, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<bool>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { using namespace mxnet_op; if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_); size_t N = small.shape_.Size(), M = rshape.Size(); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t k = 0; k < static_cast<index_t>(M); k++) { Shape<ndim> coord = mxnet_op::unravel(k, rshape); ws_dptr[k] = mxnet_op::dot(coord, rstride); } seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, ws_dptr); } inline size_t ReduceWorkspaceSize(Stream<cpu>* s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big) { return 0; } inline size_t ReduceWorkspaceSize(Stream<cpu>* s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const mxnet::TShape& lhs, const mxnet::TShape& rhs) { return 0; } #if MXNET_USE_CUDA namespace { constexpr int warpSize = 32; constexpr int unroll_reduce = 2; // Returns a/b integer division rounded up template <typename Type> Type ceil_idiv(const Type a, const Type b) { return (a + b - 1) / b; } uint64_t calc_num_load(const int X, const int Y, const int* strides) { // Number of full warps uint64_t num_full_warp = X / warpSize; // Length of the partial warp i.e. number of threads that are performing loads uint64_t len_part_warp = X % warpSize; uint64_t num_load_full = (std::min(warpSize, strides[0]) + std::min(warpSize, strides[1]) + std::min(warpSize, strides[2])) * num_full_warp; uint64_t num_load_part = (std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[0], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[1], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[2], warpSize))) * (len_part_warp != 0); uint64_t num_load = (num_load_full + num_load_part) * (uint64_t)Y; return num_load; } inline int diff(const TShape& small, const TShape& big, TShape* dims, TShape* stride) { int ndim = small.ndim(); int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } constexpr int nthread_reduce = 512; constexpr index_t kBaseGridNum = 1024; } // namespace // Configuration for ReduceImpl() struct ReduceImplConfig { index_t N; index_t M; index_t Mnext; struct { dim3 blockDim; dim3 gridDim; int shMemSize; bool do_transpose; } kernel_1; struct { int blockSize; int gridSize; } kernel_2; size_t workspace_size; TShape rshape, rstride; TShape lhs_shape, lhs_stride; TShape rhs_shape, rhs_stride; inline ReduceImplConfig(const ::mxnet::TShape& small, const ::mxnet::TShape& big, const ::mxnet::TShape* lhs, const ::mxnet::TShape* rhs) : rshape(small.ndim(), 1), rstride(small.ndim(), 1), lhs_shape(small.ndim(), 1), lhs_stride(small.ndim(), 1), rhs_shape(small.ndim(), 1), rhs_stride(small.ndim(), 1) { // The largest reduction type currently is (index_t, double) struct // aligned to 16B constexpr size_t max_type_size = 2 * sizeof(double); constexpr int maxLoopPerTB = 64; int ndim = small.ndim(); diff(small, big, &rshape, &rstride); N = small.Size(); M = rshape[0]; for (int i = 1; i < ndim; ++i) { M *= rshape[i]; } bool multiOp = false; if (lhs != nullptr) { CHECK_NOTNULL(rhs); diff(small, *lhs, &lhs_shape, &lhs_stride); diff(small, *rhs, &rhs_shape, &rhs_stride); multiOp = true; } workspace_size = 0; kernel_1.shMemSize = 0; kernel_1.do_transpose = false; if (M == 1) { kernel_1.blockDim.x = nthread_reduce; kernel_1.gridDim.x = std::min( kBaseGridNum, static_cast<index_t>((N + kernel_1.blockDim.x - 1) / kernel_1.blockDim.x)); } else { int reduce_strides[3]; reduce_strides[0] = fastest_stride(small, big, big); reduce_strides[1] = (multiOp) ? fastest_stride(small, *lhs, *lhs) : 1; reduce_strides[2] = (multiOp) ? fastest_stride(small, *rhs, *rhs) : 1; int reduce_strides_transp[3]; reduce_strides_transp[0] = fastest_stride(small, rshape, rstride); reduce_strides_transp[1] = (multiOp) ? fastest_stride(small, lhs_shape, lhs_stride) : 1; reduce_strides_transp[2] = (multiOp) ? fastest_stride(small, rhs_shape, rhs_stride) : 1; uint64_t num_load = calc_num_load(N, M, reduce_strides); uint64_t num_load_transp = calc_num_load(M, N, reduce_strides_transp); Mnext = 1; kernel_1.do_transpose = (num_load > num_load_transp); kernel_1.blockDim.x = 0; kernel_1.blockDim.y = 0; if (kernel_1.do_transpose) { // Fastest thread ID goes through M // Loop over N has step size kernel_1.blockDim.y if (N < 8) { kernel_1.blockDim.y = 1; } else if (N < 256) { kernel_1.blockDim.y = 4; } else { if (M < 8) { kernel_1.blockDim.x = 1; } else if (M < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } else { // Fastest thread ID goes through N // Loop over M has step size kernel_1.blockDim.y if (M < 8) { kernel_1.blockDim.y = 1; } else if (M < 256) { kernel_1.blockDim.y = 4; } else { if (N < 8) { kernel_1.blockDim.x = 1; } else if (N < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } if (kernel_1.blockDim.x == 0 && kernel_1.blockDim.y == 0) { LOG(FATAL) << "Unable to set blockDim"; } else if (kernel_1.blockDim.x == 0) { kernel_1.blockDim.x = nthread_reduce / kernel_1.blockDim.y; } else if (kernel_1.blockDim.y == 0) { kernel_1.blockDim.y = nthread_reduce / kernel_1.blockDim.x; } if (kernel_1.do_transpose) { // Fastest thread ID goes through M kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.y)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); int by = kernel_1.blockDim.y; if (kernel_1.blockDim.y % warpSize == 0) { // Fix shared memory bank conflict by++; } kernel_1.shMemSize = (kernel_1.blockDim.x > 1) ? kernel_1.blockDim.x * by * max_type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.x * maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } else { // Fastest thread ID goes through N kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.x)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); kernel_1.shMemSize = (kernel_1.blockDim.y > 1) ? kernel_1.blockDim.x * kernel_1.blockDim.y * max_type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.y * maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } if (Mnext > 1) { // small_dptr[] is N*Mnext*type_size bytes workspace_size += N * Mnext * max_type_size; // Set gridDim.y to Mnext kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); } if (Mnext > 1) { kernel_2.blockSize = nthread_reduce; kernel_2.gridSize = std::min( kBaseGridNum, static_cast<index_t>((N + kernel_2.blockSize - 1) / kernel_2.blockSize)); } } } }; inline size_t ReduceWorkspaceSize(Stream<gpu>* s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, nullptr, nullptr); return config.workspace_size; } inline size_t ReduceWorkspaceSize(Stream<gpu>* s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const ::mxnet::TShape& lhs, const ::mxnet::TShape& rhs) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, &lhs, &rhs); return config.workspace_size; } #endif // MXNET_USE_CUDA template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType* small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = mxnet_op::unravel(idx, small_shape); const index_t idx_big0 = mxnet_op::ravel(coord, big_shape); const index_t idx_lhs0 = mxnet_op::ravel(coord, lhs_shape0); const index_t idx_rhs0 = mxnet_op::ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Shape<ndim> coord_big = mxnet_op::unravel(k, rshape); index_t idx_big = idx_big0 + mxnet_op::dot(coord_big, rstride); Shape<ndim> coord_lhs = mxnet_op::unravel(k, lhs_shape); index_t idx_lhs = idx_lhs0 + mxnet_op::dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = mxnet_op::unravel(k, rhs_shape); index_t idx_rhs = idx_rhs0 + mxnet_op::dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType* big, const DType* lhs, const DType* rhs, DType* small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(); size_t M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #if MXNET_USE_CUDA void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const std::string& reducer, int ndim, const std::string& OP, const bool use_index = false); void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs, const std::string& reducer, int ndim, const std::string& OP1, const std::string& OP2); #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
BenchUtils.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <chrono> #include <vector> #ifdef _OPENMP #include <omp.h> #endif #include "./AlignedVec.h" namespace fbgemm { template <typename T> void randFill(aligned_vector<T>& vec, T low, T high); aligned_vector<float> getRandomSparseVector( unsigned size, float fractionNonZeros = 1.0); void llc_flush(std::vector<char>& llc); int fbgemm_get_num_threads(); int fbgemm_get_thread_num(); /** * @param llc if not nullptr, flush llc */ template <class Fn> double measureWithWarmup( Fn&& fn, int warmupIterations, int measuredIterations, std::vector<char>* llc = nullptr, bool useOpenMP = false) { for (int i = 0; i < warmupIterations; ++i) { if (llc) { llc_flush(*llc); } fn(); } double ttot = 0.0; #ifdef _OPENMP #pragma omp parallel if (useOpenMP) #endif for (int i = 0; i < measuredIterations; ++i) { int thread_id = 0; std::chrono::time_point<std::chrono::high_resolution_clock> start, end; #ifdef _OPENMP if (useOpenMP) { thread_id = omp_get_thread_num(); } #endif if (llc && thread_id == 0) { llc_flush(*llc); } #ifdef _OPENMP if (useOpenMP) { #pragma omp barrier } #endif start = std::chrono::high_resolution_clock::now(); fn(); end = std::chrono::high_resolution_clock::now(); auto dur = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start); if (thread_id == 0) { // TODO: measure load imbalance ttot += dur.count(); } } return ttot / 1e9 / measuredIterations; } } // namespace fbgemm
Hola_mundo_paralelo.c
#include <stdio.h> int main() { int tid,nth,j,X; #pragma omp parallel num_threads(4) //#pragma omp parallel { int i; printf("Hola Mundo\n"); tid=omp_get_thread_num(); nth=omp_get_num_threads(); X=omp_get_max_threads( ); printf("DISPONIBLES: %d \n",X); for (i=0;i<10;i++) printf("Iteracion: %d desde el hilo %d de un total de %d\n",i,tid,nth); } printf("Adios \n"); return 0; }
deconvolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void deconv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.data + out.w * i; float* outptr0 = outptr; float* outptr1 = outptr + outw; float* outptr2 = outptr + outw*2; int j = 0; #if __ARM_NEON for (; j+3 < w; j+=4) { float32x4_t _v = vld1q_f32(r0); #if 0 // bad compiler generate slow instructions :( // 0 float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // ext float32x4_t _zero_out01 = vdupq_n_f32(0.f); _zero_out01 = vextq_f32(_zero_out01, _out01, 3); _out00 = vaddq_f32(_out00, _zero_out01); // float32x2_t _out00low = vget_low_f32(_out00); float32x2_t _out00high = vget_high_f32(_out00); _out00high = vmla_lane_f32(_out00high, vget_low_f32(_v), vget_high_f32(_k0), 0); _out00 = vcombine_f32(_out00low, _out00high); vst1q_f32(outptr0 + 0, _out00); // float32x2_t _out02high = vld1_f32(outptr0 + 4); float32x2_t _out01_zero = vext_f32(vget_high_f32(_out01), vget_low_f32(_zero_out01), 1); _out02high = vadd_f32(_out02high, _out01_zero); _out02high = vmla_lane_f32(_out02high, vget_high_f32(_v), vget_high_f32(_k0), 0); vst1_f32(outptr0 + 4, _out02high); // 1 float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // ext float32x4_t _zero_out11 = vdupq_n_f32(0.f); _zero_out11 = vextq_f32(_zero_out11, _out11, 3); _out10 = vaddq_f32(_out10, _zero_out11); // float32x2_t _out10low = vget_low_f32(_out10); float32x2_t _out10high = vget_high_f32(_out10); _out10high = vmla_lane_f32(_out10high, vget_low_f32(_v), vget_high_f32(_k1), 0); _out10 = vcombine_f32(_out10low, _out10high); vst1q_f32(outptr1 + 0, _out10); // float32x2_t _out12high = vld1_f32(outptr1 + 4); float32x2_t _out11_zero = vext_f32(vget_high_f32(_out11), vget_low_f32(_zero_out11), 1); _out12high = vadd_f32(_out12high, _out11_zero); _out12high = vmla_lane_f32(_out12high, vget_high_f32(_v), vget_high_f32(_k1), 0); vst1_f32(outptr1 + 4, _out12high); // 2 float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // ext float32x4_t _zero_out21 = vdupq_n_f32(0.f); _zero_out21 = vextq_f32(_zero_out21, _out21, 3); _out20 = vaddq_f32(_out20, _zero_out21); // float32x2_t _out20low = vget_low_f32(_out20); float32x2_t _out20high = vget_high_f32(_out20); _out20high = vmla_lane_f32(_out20high, vget_low_f32(_v), vget_high_f32(_k2), 0); _out20 = vcombine_f32(_out20low, _out20high); vst1q_f32(outptr2 + 0, _out20); // float32x2_t _out22high = vld1_f32(outptr2 + 4); float32x2_t _out21_zero = vext_f32(vget_high_f32(_out21), vget_low_f32(_zero_out21), 1); _out22high = vadd_f32(_out22high, _out21_zero); _out22high = vmla_lane_f32(_out22high, vget_high_f32(_v), vget_high_f32(_k2), 0); vst1_f32(outptr2 + 4, _out22high); #else // float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); vst1q_f32(outptr0 + 0, _out00); float32x4_t _out01 = vld1q_f32(outptr0 + 1); _out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1); vst1q_f32(outptr0 + 1, _out01); float32x4_t _out02 = vld1q_f32(outptr0 + 2); _out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0); vst1q_f32(outptr0 + 2, _out02); // float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); vst1q_f32(outptr1 + 0, _out10); float32x4_t _out11 = vld1q_f32(outptr1 + 1); _out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1); vst1q_f32(outptr1 + 1, _out11); float32x4_t _out12 = vld1q_f32(outptr1 + 2); _out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0); vst1q_f32(outptr1 + 2, _out12); // float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); vst1q_f32(outptr2 + 0, _out20); float32x4_t _out21 = vld1q_f32(outptr2 + 1); _out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1); vst1q_f32(outptr2 + 1, _out21); float32x4_t _out22 = vld1q_f32(outptr2 + 2); _out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0); vst1q_f32(outptr2 + 2, _out22); #endif r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; r0++; outptr0++; outptr1++; outptr2++; } } } } } static void deconv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.data + outw * i*2; float* outptr0 = outptr; float* outptr1 = outptr0 + outw; float* outptr2 = outptr1 + outw; int j = 0; #if __ARM_NEON for (; j+3 < w; j+=4) { float32x4_t _v = vld1q_f32(r0); // out row 0 float32x4_t _out00 = vmulq_lane_f32(_v, vget_low_f32(_k0), 0); // 0,2,4,6 float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // 1,3,5,7 float32x4_t _out02 = vmulq_lane_f32(_v, vget_high_f32(_k0), 0); // 2,4,6,8 float32x4x2_t _out0 = vld2q_f32(outptr0); _out0.val[0] = vaddq_f32(_out0.val[0], _out00); // 0,2,4,6 _out0.val[1] = vaddq_f32(_out0.val[1], _out01); // 1,3,5,7 vst2q_f32(outptr0, _out0); _out0 = vld2q_f32(outptr0 + 2); _out0.val[0] = vaddq_f32(_out0.val[0], _out02); // 2,4,6,8 vst2q_f32(outptr0 + 2, _out0); // out row 1 float32x4_t _out10 = vmulq_lane_f32(_v, vget_low_f32(_k1), 0); // 0,2,4,6 float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // 1,3,5,7 float32x4_t _out12 = vmulq_lane_f32(_v, vget_high_f32(_k1), 0); // 2,4,6,8 float32x4x2_t _out1 = vld2q_f32(outptr1); _out1.val[0] = vaddq_f32(_out1.val[0], _out10); // 0,2,4,6 _out1.val[1] = vaddq_f32(_out1.val[1], _out11); // 1,3,5,7 vst2q_f32(outptr1, _out1); _out1 = vld2q_f32(outptr1 + 2); _out1.val[0] = vaddq_f32(_out1.val[0], _out12); // 2,4,6,8 vst2q_f32(outptr1 + 2, _out1); // out row 2 float32x4_t _out20 = vmulq_lane_f32(_v, vget_low_f32(_k2), 0); // 0,2,4,6 float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // 1,3,5,7 float32x4_t _out22 = vmulq_lane_f32(_v, vget_high_f32(_k2), 0); // 2,4,6,8 float32x4x2_t _out2 = vld2q_f32(outptr2); _out2.val[0] = vaddq_f32(_out2.val[0], _out20); // 0,2,4,6 _out2.val[1] = vaddq_f32(_out2.val[1], _out21); // 1,3,5,7 vst2q_f32(outptr2, _out2); _out2 = vld2q_f32(outptr2 + 2); _out2.val[0] = vaddq_f32(_out2.val[0], _out22); // 2,4,6,8 vst2q_f32(outptr2 + 2, _out2); r0 += 4; outptr0 += 8; outptr1 += 8; outptr2 += 8; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; r0++; outptr0 += 2; outptr1 += 2; outptr2 += 2; } } } } }
mandel-omp-taskloop-row.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define G 4 /* grainsize */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ #pragma omp parallel #pragma omp single #pragma omp taskloop grainsize(G) for (row = 0; row < height; ++row) { for (col = 0; col < width; ++col) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("mandel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
par_cheby.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Chebyshev setup and solve * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "_hypre_parcsr_mv.h" #include "float.h" /****************************************************************************** Chebyshev relaxation Can specify order 1-4 (this is the order of the resid polynomial)- here we explicitly code the coefficients (instead of iteratively determining) variant 0: standard chebyshev this is rlx 11 if scale = 0, and 16 if scale == 1 variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t) this is rlx 15 if scale = 0, and 17 if scale == 1 ratio indicates the percentage of the whole spectrum to use (so .5 means half, and .1 means 10percent) *******************************************************************************/ /** * @brief Setups of coefficients (and optional diagonal scaling elements) for * Chebyshev relaxation * * Will calculate ds_ptr on device/host depending on where A is located * * @param[in] A Matrix for which to seteup * @param[in] max_eig Maximum eigenvalue * @param[in] min_eig Maximum eigenvalue * @param[in] fraction Fraction used to calculate lower bound * @param[in] order Polynomial order to use [1,4] * @param[in] scale Whether or not to scale by the diagonal * @param[in] variant Whether or not to use a variant of Chebyshev (0 standard, 1 variant) * @param[out] coefs_ptr *coefs_ptr will be allocated to contain coefficients of the polynomial * @param[out] ds_ptr *ds_ptr will be allocated to allow scaling by the diagonal */ HYPRE_Int hypre_ParCSRRelax_Cheby_Setup(hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Real max_eig, HYPRE_Real min_eig, HYPRE_Real fraction, HYPRE_Int order, /* polynomial order */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int variant, HYPRE_Real **coefs_ptr, HYPRE_Real **ds_ptr) /* initial/updated approximation */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real theta, delta; HYPRE_Real den; HYPRE_Real upper_bound = 0.0, lower_bound = 0.0; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Real *coefs = NULL; HYPRE_Int cheby_order; HYPRE_Real *ds_data = NULL; /* u = u + p(A)r */ if (order > 4) { order = 4; } if (order < 1) { order = 1; } coefs = hypre_CTAlloc(HYPRE_Real, order + 1, HYPRE_MEMORY_HOST); /* we are using the order of p(A) */ cheby_order = order - 1; if (min_eig >= 0.0) { /* make sure we are large enough - Adams et al. 2003 */ upper_bound = max_eig * 1.1; /* lower_bound = max_eig/fraction; */ lower_bound = (upper_bound - min_eig) * fraction + min_eig; } else if (max_eig <= 0.0) { upper_bound = min_eig * 1.1; lower_bound = max_eig - (max_eig - upper_bound) * fraction; } /* theta and delta */ theta = (upper_bound + lower_bound) / 2; delta = (upper_bound - lower_bound) / 2; if (variant == 1) { switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is one less that resid poly: r(t) = 1 - t*s(t) */ { case 0: coefs[0] = 1.0 / theta; break; case 1: /* (del - t + 2*th)/(th^2 + del*th) */ den = (theta * theta + delta * theta); coefs[0] = (delta + 2 * theta) / den; coefs[1] = -1.0 / den; break; case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/ den = 2 * delta * theta * theta - delta * delta * theta - pow(delta, 3) + 2 * pow(theta, 3); coefs[0] = (4 * delta * theta - pow(delta, 2) + 6 * pow(theta, 2)) / den; coefs[1] = -(2 * delta + 6 * theta) / den; coefs[2] = 2 / den; break; case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/ den = - (4 * delta * pow(theta, 3) - 3 * pow(delta, 2) * pow(theta, 2) - 3 * pow(delta, 3) * theta + 4 * pow(theta, 4) ); coefs[0] = (6 * pow(delta, 2) * theta - 12 * delta * pow(theta, 2) + 3 * pow(delta, 3) - 16 * pow(theta, 3) ) / den; coefs[1] = (12 * delta * theta - 3 * pow(delta, 2) + 24 * pow(theta, 2)) / den; coefs[2] = -( 4 * delta + 16 * theta) / den; coefs[3] = 4 / den; break; } } else /* standard chebyshev */ { switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is one less thatn resid poly: r(t) = 1 - t*s(t) */ { case 0: coefs[0] = 1.0 / theta; break; case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */ den = delta * delta - 2 * theta * theta; coefs[0] = -4 * theta / den; coefs[1] = 2 / den; break; case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/ den = 3 * (delta * delta) * theta - 4 * (theta * theta * theta); coefs[0] = (3 * delta * delta - 12 * theta * theta) / den; coefs[1] = 12 * theta / den; coefs[2] = -4 / den; break; case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/ den = pow(delta, 4) - 8 * delta * delta * theta * theta + 8 * pow(theta, 4); coefs[0] = (32 * pow(theta, 3) - 16 * delta * delta * theta) / den; coefs[1] = (8 * delta * delta - 48 * theta * theta) / den; coefs[2] = 32 * theta / den; coefs[3] = -8 / den; break; } } *coefs_ptr = coefs; if (scale) { /*grab 1/sqrt(abs(diagonal)) */ ds_data = hypre_CTAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A)); hypre_CSRMatrixExtractDiagonal(hypre_ParCSRMatrixDiag(A), ds_data, 4); } /* end of scaling code */ *ds_ptr = ds_data; return hypre_error_flag; } /** * @brief Solve using a chebyshev polynomial on the host * * @param[in] A Matrix to relax with * @param[in] f right-hand side * @param[in] ds_data Diagonal information * @param[in] coefs Polynomial coefficients * @param[in] order Order of the polynomial * @param[in] scale Whether or not to scale by diagonal * @param[in] scale Whether or not to use a variant * @param[in,out] u Initial/updated approximation * @param[in] v Temp vector * @param[in] r Temp Vector * @param[in] orig_u_vec Temp Vector * @param[in] tmp Temp Vector */ HYPRE_Int hypre_ParCSRRelax_Cheby_SolveHost(hypre_ParCSRMatrix *A, /* matrix to relax with */ hypre_ParVector *f, /* right-hand side */ HYPRE_Real *ds_data, HYPRE_Real *coefs, HYPRE_Int order, /* polynomial order */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int variant, hypre_ParVector *u, /* initial/updated approximation */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *r, /* another vector */ hypre_ParVector *orig_u_vec, /*another temp vector */ hypre_ParVector *tmp_vec) /*a potential temp vector */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f)); HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v)); HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r)); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Real mult; HYPRE_Real *orig_u; HYPRE_Int cheby_order; HYPRE_Real *tmp_data; /* u = u + p(A)r */ if (order > 4) { order = 4; } if (order < 1) { order = 1; } /* we are using the order of p(A) */ cheby_order = order - 1; hypre_assert(hypre_VectorSize(hypre_ParVectorLocalVector(orig_u_vec)) >= num_rows); orig_u = hypre_VectorData(hypre_ParVectorLocalVector(orig_u_vec)); if (!scale) { /* get residual: r = f - A*u */ hypre_ParVectorCopy(f, r); hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r); /* o = u; u = r .* coef */ for ( i = 0; i < num_rows; i++ ) { orig_u[i] = u_data[i]; u_data[i] = r_data[i] * coefs[cheby_order]; } for (i = cheby_order - 1; i >= 0; i-- ) { hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v); mult = coefs[i]; /* u = mult * r + v */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { u_data[j] = mult * r_data[j] + v_data[j]; } } /* u = o + u */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for ( i = 0; i < num_rows; i++ ) { u_data[i] = orig_u[i] + u_data[i]; } } else /* scaling! */ { /*grab 1/sqrt(diagonal) */ tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec)); /* get ds_data and get scaled residual: r = D^(-1/2)f - * D^(-1/2)A*u */ hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec); /* r = ds .* (f + tmp) */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { r_data[j] = ds_data[j] * (f_data[j] + tmp_data[j]); } /* save original u, then start the iteration by multiplying r by the cheby coef.*/ /* o = u; u = r * coef */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { orig_u[j] = u_data[j]; /* orig, unscaled u */ u_data[j] = r_data[j] * coefs[cheby_order]; } /* now do the other coefficients */ for (i = cheby_order - 1; i >= 0; i-- ) { /* v = D^(-1/2)AD^(-1/2)u */ /* tmp = ds .* u */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { tmp_data[j] = ds_data[j] * u_data[j]; } hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v); /* u_new = coef*r + v*/ mult = coefs[i]; /* u = coef * r + ds .* v */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { u_data[j] = mult * r_data[j] + ds_data[j] * v_data[j]; } } /* end of cheby_order loop */ /* now we have to scale u_data before adding it to u_orig*/ /* u = orig_u + ds .* u */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { u_data[j] = orig_u[j] + ds_data[j] * u_data[j]; } }/* end of scaling code */ return hypre_error_flag; } /** * @brief Solve using a chebyshev polynomial * * Determines whether to solve on host or device * * @param[in] A Matrix to relax with * @param[in] f right-hand side * @param[in] ds_data Diagonal information * @param[in] coefs Polynomial coefficients * @param[in] order Order of the polynomial * @param[in] scale Whether or not to scale by diagonal * @param[in] scale Whether or not to use a variant * @param[in,out] u Initial/updated approximation * @param[out] v Temp vector * @param[out] r Temp Vector * @param[out] orig_u_vec Temp Vector * @param[out] tmp_vec Temp Vector */ HYPRE_Int hypre_ParCSRRelax_Cheby_Solve(hypre_ParCSRMatrix *A, /* matrix to relax with */ hypre_ParVector *f, /* right-hand side */ HYPRE_Real *ds_data, HYPRE_Real *coefs, HYPRE_Int order, /* polynomial order */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int variant, hypre_ParVector *u, /* initial/updated approximation */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *r, /*another temp vector */ hypre_ParVector *orig_u_vec, /*another temp vector */ hypre_ParVector *tmp_vec) /*another temp vector */ { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ParCSRRelaxChebySolve"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(A)); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_ParCSRRelax_Cheby_SolveHost(A, f, ds_data, coefs, order, scale, variant, u, v, r, orig_u_vec, tmp_vec); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_ParCSRRelax_Cheby_SolveDevice(A, f, ds_data, coefs, order, scale, variant, u, v, r, orig_u_vec, tmp_vec); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; }
tinyexr.h
#ifndef TINYEXR_H_ #define TINYEXR_H_ /* Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) #define TINYEXR_X86_OR_X64_CPU 1 #else #define TINYEXR_X86_OR_X64_CPU 0 #endif #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU #define TINYEXR_LITTLE_ENDIAN 1 #else #define TINYEXR_LITTLE_ENDIAN 0 #endif // Use miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 // tile format image; // not zero for only a single-part "normal" tiled file (according to spec.) int tiled; int long_name; // long name attribute // deep image(EXR 2.0); // for a multi-part file, indicates that at least one part is of type deep* (according to spec.) int non_image; int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRBox2i { int min_x; int min_y; int max_x; int max_y; } EXRBox2i; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; EXRBox2i data_window; EXRBox2i display_window; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; // for a single-part file, agree with the version field bit 11 // for a multi-part file, it is consistent with the type of part int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) // name attribute required for multipart files; // must be unique and non empty (according to spec.); // use EXRSetNameAttr for setting value; // max 255 character allowed - excluding terminating zero char name[256]; } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. struct _EXRImage* next_level; // NULL if scanline format or image is the last level. int level_x; // x level index int level_y; // y level index unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifying layer name. Assume EXR image // contains A(single channel alpha) or RGB(A) channels. Application must free // image data as returned by `out_rgba` Result image format is: float x RGBA x // width x hight Returns negative value and may set error string in `err` when // there's an error When the specified layer name is not found in the EXR file, // the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory // after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(will be filled when the function returns error // code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Returns the number of resolution levels of the image (including the base) extern int EXRNumLevels(const EXRImage* exr_image); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Set name attribute of EXRHeader struct (it makes a copy) extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Frees internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Frees internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Frees error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRMultipartImageToFile(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEFINED #define TINYEXR_IMPLEMENTATION_DEFINED #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #ifndef NOMINMAX #define NOMINMAX #endif #include <windows.h> // for UTF-8 #endif #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #include <set> // https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support #if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900) #define TINYEXR_HAS_CXX11 (1) // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #include <miniz.h> #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Weverything" #endif #include "zfp.h" #ifdef __clang__ #pragma clang diagnostic pop #endif #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(int *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(float *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else float tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 union FP32 { unsigned int u; float f; struct { #if TINYEXR_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if TINYEXR_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(&outLen); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int requested_pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { int min_x; int min_y; int max_x; int max_y; } Box2iInfo; struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; Box2iInfo data_window; int line_order; Box2iInfo display_window; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tiled; // Non-zero if the part is tiled. int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; // required for multi-part or non-image files std::string name; // required for multi-part or non-image files std::string type; void clear() { channels.clear(); attributes.clear(); data_window.min_x = 0; data_window.min_y = 0; data_window.max_x = 0; data_window.max_y = 0; line_order = 0; display_window.min_x = 0; display_window.min_y = 0; display_window.max_x = 0; display_window.max_y = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tiled = 0; tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; name.clear(); type.clear(); } }; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(&info.pixel_type); tinyexr::swap4(&info.x_sampling); tinyexr::swap4(&info.y_sampling); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].requested_pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(&pixel_type); tinyexr::swap4(&x_sampling); tinyexr::swap4(&y_sampling); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // mz_ulong outSize = mz_compressBound(src_size); int ret = mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressible run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierarchical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- unsigned int len : 8; // code length 0 unsigned int lit : 24; // lit p size unsigned int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { unsigned int *p = pl->p; pl->p = new unsigned int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new unsigned int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ /* TinyEXR issue 160. in + 1 -> in */ if (in >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSizeInBytes, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSizeInBytes) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSizeInBytes / sizeof(unsigned short)); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSizeInBytes / sizeof(unsigned short))); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; unsigned int precision; unsigned int __pad0; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* unsigned int __pad1; ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0; } }; static bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes, std::string *err) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) { if (attributes[i].size == 1) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; break; } else { if (err) { (*err) += "zfpCompressionType attribute must be uchar(1 byte) type.\n"; } return false; } } } if (!foundType) { if (err) { (*err) += "`zfpCompressionType` attribute not found.\n"; } return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionRate` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionPrecision` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionTolerance` attribute not found.\n"; } } else { if (err) { (*err) += "Unknown value specified for `zfpCompressionType`.\n"; } } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, size_t num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = size_t(dst_width) * size_t(dst_num_lines) * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, static_cast<unsigned int>(dst_width), static_cast<unsigned int>(dst_num_lines) * static_cast<unsigned int>(num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = size_t(dst_width) * size_t(dst_num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // decompress 4x4 pixel block. for (size_t y = 0; y < size_t(dst_num_lines); y += 4) { for (size_t x = 0; x < size_t(dst_width); x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. static bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, static_cast<unsigned int>(width), static_cast<unsigned int>(num_lines * num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = size_t(width) * size_t(num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // compress 4x4 pixel block. for (size_t y = 0; y < size_t(num_lines); y += 4) { for (size_t x = 0; x < size_t(width); x += 4) { float fblock[16]; for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp)); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // heuristics #define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192) // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; std::string e; if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes, int(num_attributes), &e)) { // This code path should not be reachable. assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { // Here, data_width and data_height are the dimensions of the current (sub)level. if (tile_size_x * tile_offset_x > data_width || tile_size_y * tile_offset_y > data_height) { return false; } // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } #ifdef _WIN32 static inline std::wstring UTF8ToWchar(const std::string &str) { int wstr_size = MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0); std::wstring wstr(wstr_size, 0); MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0], (int)wstr.size()); return wstr; } #endif static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; bool has_name = false; bool has_type = false; info->name.clear(); info->type.clear(); info->data_window.min_x = 0; info->data_window.min_y = 0; info->data_window.max_x = 0; info->data_window.max_y = 0; info->line_order = 0; // @fixme info->display_window.min_x = 0; info->display_window.min_y = 0; info->display_window.max_x = 0; info->display_window.max_y = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tiled = 0; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; // For a multipart file, the version field 9th bit is 0. if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) || y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) { if (err) { (*err) = "Tile sizes were invalid."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; info->tiled = 1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->data_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->data_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->data_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->data_window.min_x); tinyexr::swap4(&info->data_window.min_y); tinyexr::swap4(&info->data_window.max_x); tinyexr::swap4(&info->data_window.max_y); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->display_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->display_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->display_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->display_window.min_x); tinyexr::swap4(&info->display_window.min_y); tinyexr::swap4(&info->display_window.max_x); tinyexr::swap4(&info->display_window.max_y); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4(&info->pixel_aspect_ratio); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4(&info->screen_window_center[0]); tinyexr::swap4(&info->screen_window_center[1]); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4(&info->screen_window_width); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(&info->chunk_count); } } else if (attr_name.compare("name") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->name.resize(len); info->name.assign(reinterpret_cast<const char*>(&data[0]), len); has_name = true; } } else if (attr_name.compare("type") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->type.resize(len); info->type.assign(reinterpret_cast<const char*>(&data[0]), len); has_type = true; } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (version->multipart || version->non_image) { if (!has_name) { ss_err << "\"name\" attribute not found in the header." << std::endl; } if (!has_type) { ss_err << "\"type\" attribute not found in the header." << std::endl; } } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window.min_x = info.display_window.min_x; exr_header->display_window.min_y = info.display_window.min_y; exr_header->display_window.max_x = info.display_window.max_x; exr_header->display_window.max_y = info.display_window.max_y; exr_header->data_window.min_x = info.data_window.min_x; exr_header->data_window.min_y = info.data_window.min_y; exr_header->data_window.max_x = info.data_window.max_x; exr_header->data_window.max_y = info.data_window.max_y; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tiled = info.tiled; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; EXRSetNameAttr(exr_header, info.name.c_str()); if (!info.type.empty()) { if (info.type == "scanlineimage") { assert(!exr_header->tiled); } else if (info.type == "tiledimage") { assert(exr_header->tiled); } else if (info.type == "deeptile") { exr_header->non_image = 1; assert(exr_header->tiled); } else if (info.type == "deepscanline") { exr_header->non_image = 1; assert(!exr_header->tiled); } else { assert(false); } } exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy pointer exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } struct OffsetData { OffsetData() : num_x_levels(0), num_y_levels(0) {} std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets; int num_x_levels; int num_y_levels; }; int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) { switch (tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: return 0; case TINYEXR_TILE_MIPMAP_LEVELS: return lx; case TINYEXR_TILE_RIPMAP_LEVELS: return lx + ly * num_x_levels; default: assert(false); } return 0; } static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) { assert(level >= 0); int b = (int)(1u << (unsigned)level); int level_size = toplevel_size / b; if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size) level_size += 1; return std::max(level_size, 1); } static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header, const OffsetData& offset_data, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const unsigned char* head, const size_t size, std::string* err) { int num_channels = exr_header->num_channels; int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); int num_tiles = num_x_tiles * num_y_tiles; int err_code = TINYEXR_SUCCESS; enum { EF_SUCCESS = 0, EF_INVALID_DATA = 1, EF_INSUFFICIENT_DATA = 2, EF_FAILED_TO_DECODE = 4 }; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<unsigned> error_flag(EF_SUCCESS); #else unsigned error_flag(EF_SUCCESS); #endif // Although the spec says : "...the data window is subdivided into an array of smaller rectangles...", // the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window. #if 0 if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) && exr_image->level_x == 0 && exr_image->level_y == 0) { if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } #endif exr_image->tiles = static_cast<EXRTile*>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); int x_tile = tile_idx % num_x_tiles; int y_tile = tile_idx / num_x_tiles; // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile]; if (offset + sizeof(int) * 5 > size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } size_t data_size = size_t(size - (offset + sizeof(int) * 5)); const unsigned char* data_ptr = reinterpret_cast<const unsigned char*>(head + offset); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(&tile_coordinates[0]); tinyexr::swap4(&tile_coordinates[1]); tinyexr::swap4(&tile_coordinates[2]); tinyexr::swap4(&tile_coordinates[3]); if (tile_coordinates[2] != exr_image->level_x) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } if (tile_coordinates[3] != exr_image->level_y) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(&data_len); if (data_len < 2 || size_t(data_len) > data_size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, exr_image->width, exr_image->height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // Failed to decode tile data. error_flag |= EF_FAILED_TO_DECODE; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto& t : workers) { t.join(); } #else } // parallel for #endif // Even in the event of an error, the reserved memory may be freed. exr_image->num_channels = num_channels; exr_image->num_tiles = static_cast<int>(num_tiles); if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA; if (err) { if (error_flag & EF_INSUFFICIENT_DATA) { (*err) += "Insufficient data length.\n"; } if (error_flag & EF_FAILED_TO_DECODE) { (*err) += "Failed to decode tile data.\n"; } } return err_code; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const OffsetData& offset_data, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, exr_header->custom_attributes, int(exr_header->num_custom_attributes), err)) { return TINYEXR_ERROR_INVALID_HEADER; } #endif } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_y < exr_header->data_window.min_y) { if (err) { (*err) += "Invalid data window.\n"; } return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tiled) { if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x << ", " << "tile height = " << exr_header->tile_size_y << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } } const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) { EXRImage* level_image = NULL; for (int level = 0; level < offset_data.num_x_levels; ++level) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode); level_image->level_x = level; level_image->level_y = level; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } else { EXRImage* level_image = NULL; for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y) for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode); level_image->level_x = level_x; level_image->level_y = level_y; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(&line_no); tinyexr::swap4(&data_len); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window.max_y + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window.min_y); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window.min_y; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(&y); tinyexr::swap4(&data_len); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int FloorLog2(unsigned x) { // // For x > 0, floorLog2(y) returns floor(log(x)/log(2)). // int y = 0; while (x > 1) { y += 1; x >>= 1u; } return y; } static int CeilLog2(unsigned x) { // // For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)). // int y = 0; int r = 0; while (x > 1) { if (x & 1) r = 1; y += 1; x >>= 1u; } return y + r; } static int RoundLog2(int x, int tile_rounding_mode) { return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x)); } static int CalculateNumXLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int w = max_x - min_x + 1; num = RoundLog2(w, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static int CalculateNumYLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int h = max_y - min_y + 1; num = RoundLog2(h, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static void CalculateNumTiles(std::vector<int>& numTiles, int toplevel_size, int size, int tile_rounding_mode) { for (unsigned i = 0; i < numTiles.size(); i++) { int l = LevelSize(toplevel_size, i, tile_rounding_mode); assert(l <= std::numeric_limits<int>::max() - size + 1); numTiles[i] = (l + size - 1) / size; } } static void PrecalculateTileInfo(std::vector<int>& num_x_tiles, std::vector<int>& num_y_tiles, const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num_x_levels = CalculateNumXLevels(exr_header); int num_y_levels = CalculateNumYLevels(exr_header); num_x_tiles.resize(num_x_levels); num_y_tiles.resize(num_y_levels); CalculateNumTiles(num_x_tiles, max_x - min_x + 1, exr_header->tile_size_x, exr_header->tile_rounding_mode); CalculateNumTiles(num_y_tiles, max_y - min_y + 1, exr_header->tile_size_y, exr_header->tile_rounding_mode); } static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) { offset_data.offsets.resize(1); offset_data.offsets[0].resize(1); offset_data.offsets[0][0].resize(num_blocks); offset_data.num_x_levels = 1; offset_data.num_y_levels = 1; } // Return sum of tile blocks. static int InitTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const std::vector<int>& num_x_tiles, const std::vector<int>& num_y_tiles) { int num_tile_blocks = 0; offset_data.num_x_levels = static_cast<int>(num_x_tiles.size()); offset_data.num_y_levels = static_cast<int>(num_y_tiles.size()); switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: case TINYEXR_TILE_MIPMAP_LEVELS: assert(offset_data.num_x_levels == offset_data.num_y_levels); offset_data.offsets.resize(offset_data.num_x_levels); for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { offset_data.offsets[l].resize(num_y_tiles[l]); for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[l]); num_tile_blocks += num_x_tiles[l]; } } break; case TINYEXR_TILE_RIPMAP_LEVELS: offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels)); for (int ly = 0; ly < offset_data.num_y_levels; ++ly) { for (int lx = 0; lx < offset_data.num_x_levels; ++lx) { int l = ly * offset_data.num_x_levels + lx; offset_data.offsets[l].resize(num_y_tiles[ly]); for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[lx]); num_tile_blocks += num_x_tiles[lx]; } } } break; default: assert(false); } return num_tile_blocks; } static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0) return true; return false; } static bool isValidTile(const EXRHeader* exr_header, const OffsetData& offset_data, int dx, int dy, int lx, int ly) { if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false; int num_x_levels = offset_data.num_x_levels; int num_y_levels = offset_data.num_y_levels; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: if (lx == 0 && ly == 0 && offset_data.offsets.size() > 0 && offset_data.offsets[0].size() > static_cast<size_t>(dy) && offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_MIPMAP_LEVELS: if (lx < num_x_levels && ly < num_y_levels && offset_data.offsets.size() > static_cast<size_t>(lx) && offset_data.offsets[lx].size() > static_cast<size_t>(dy) && offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels); if (lx < num_x_levels && ly < num_y_levels && (offset_data.offsets.size() > idx) && offset_data.offsets[idx].size() > static_cast<size_t>(dy) && offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) { return true; } } break; default: return false; } return false; } static void ReconstructTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const unsigned char* head, const unsigned char* marker, const size_t /*size*/, bool isMultiPartFile, bool isDeep) { int numXLevels = offset_data.num_x_levels; for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 tileOffset = marker - head; if (isMultiPartFile) { //int partNumber; marker += sizeof(int); } int tileX; memcpy(&tileX, marker, sizeof(int)); tinyexr::swap4(&tileX); marker += sizeof(int); int tileY; memcpy(&tileY, marker, sizeof(int)); tinyexr::swap4(&tileY); marker += sizeof(int); int levelX; memcpy(&levelX, marker, sizeof(int)); tinyexr::swap4(&levelX); marker += sizeof(int); int levelY; memcpy(&levelY, marker, sizeof(int)); tinyexr::swap4(&levelY); marker += sizeof(int); if (isDeep) { tinyexr::tinyexr_int64 packed_offset_table_size; memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size)); marker += sizeof(tinyexr::tinyexr_int64); tinyexr::tinyexr_int64 packed_sample_size; memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size)); marker += sizeof(tinyexr::tinyexr_int64); // next Int64 is unpacked sample size - skip that too marker += packed_offset_table_size + packed_sample_size + 8; } else { int dataSize; memcpy(&dataSize, marker, sizeof(int)); tinyexr::swap4(&dataSize); marker += sizeof(int); marker += dataSize; } if (!isValidTile(exr_header, offset_data, tileX, tileY, levelX, levelY)) return; int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels); offset_data.offsets[level_idx][tileY][tileX] = tileOffset; } } } } // marker output is also static int ReadOffsets(OffsetData& offset_data, const unsigned char* head, const unsigned char*& marker, const size_t size, const char** err) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offset_data.offsets[l][dy][dx] = offset; } } } return TINYEXR_SUCCESS; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_x - exr_header->data_window.min_x == std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; if (exr_header->data_window.max_y < exr_header->data_window.min_y || exr_header->data_window.max_y - exr_header->data_window.min_y == std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if (data_width > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } if (exr_header->tiled) { if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. OffsetData offset_data; size_t num_blocks = 0; // For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header. // If chunk_count > 0 then chunk_count must be equal to the calculated tile count. if (exr_header->tiled) { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header); num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles); if (exr_header->chunk_count > 0) { if (exr_header->chunk_count != static_cast<int>(num_blocks)) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } } int ret = ReadOffsets(offset_data, head, marker, size, err); if (ret != TINYEXR_SUCCESS) return ret; if (IsAnyOffsetsAreInvalid(offset_data)) { ReconstructTileOffsets(offset_data, exr_header, head, marker, size, exr_header->multipart, exr_header->non_image); } } else if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); InitSingleResolutionOffsets(offset_data, num_blocks); } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } InitSingleResolutionOffsets(offset_data, num_blocks); } if (!exr_header->tiled) { std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader &exr_header, std::vector<std::string> &layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader &exr_header, const std::string layer_name, std::vector<LayerChannel> &channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */ NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer( exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * static_cast<int>(exr_header.tile_size_x) + i; const int jj = exr_image.tiles[it].offset_y * static_cast<int>(exr_header.tile_size_y) + j; const int idx = ii + jj * static_cast<int>(exr_image.width); // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); exr_header->multipart = version->multipart ? 1 : 0; exr_header->non_image = version->non_image ? 1 : 0; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } namespace tinyexr { // out_data must be allocated initially with the block-header size // of the current image(-part) type static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data, const unsigned char* const* images, int compression_type, int /*line_order*/, int width, // for tiled : tile.width int /*height*/, // for tiled : header.tile_size_y int x_stride, // for tiled : header.tile_size_x int line_no, // for tiled : 0 int num_lines, // for tiled : tile.height size_t pixel_data_size, const std::vector<ChannelInfo>& channels, const std::vector<size_t>& channel_offset_list, const void* compression_param = 0) // zfp compression param { size_t buf_size = static_cast<size_t>(width) * static_cast<size_t>(num_lines) * static_cast<size_t>(pixel_data_size); //int last2bit = (buf_size & 3); // buf_size must be multiple of four //if(last2bit) buf_size += 4 - last2bit; std::vector<unsigned char> buf(buf_size); size_t start_y = static_cast<size_t>(line_no); for (size_t c = 0; c < channels.size(); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(&f32.f); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned short val = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { float val = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned int val = reinterpret_cast<const unsigned int * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) out_data.insert(out_data.end(), buf.begin(), buf.end()); } else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, width, num_lines); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param); std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else (void)compression_param; assert(0); #endif } else { assert(0); return false; } return true; } static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header, const std::vector<tinyexr::ChannelInfo>& channels, std::vector<std::vector<unsigned char> >& data_list, size_t start_index, // for data_list int num_x_tiles, int num_y_tiles, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const void* compression_param, // must be set if zfp compression is enabled std::string* err) { int num_tiles = num_x_tiles * num_y_tiles; assert(num_tiles == level_image->num_tiles); if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) && level_image->level_x == 0 && level_image->level_y == 0) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = tile_count++) < num_tiles) { #else // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_tiles; i++) { #endif size_t tile_idx = static_cast<size_t>(i); size_t data_idx = tile_idx + start_index; int x_tile = i % num_x_tiles; int y_tile = i / num_x_tiles; EXRTile& tile = level_image->tiles[tile_idx]; const unsigned char* const* images = static_cast<const unsigned char* const*>(tile.images); data_list[data_idx].resize(5*sizeof(int)); size_t data_header_size = data_list[data_idx].size(); bool ret = EncodePixelData(data_list[data_idx], images, exr_header->compression_type, 0, // increasing y tile.width, exr_header->tile_size_y, exr_header->tile_size_x, 0, tile.height, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; } assert(data_list[data_idx].size() > data_header_size); int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size); //tileX, tileY, levelX, levelY // pixel_data_size(int) memcpy(&data_list[data_idx][0], &x_tile, sizeof(int)); memcpy(&data_list[data_idx][4], &y_tile, sizeof(int)); memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int)); memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int)); memcpy(&data_list[data_idx][16], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[data_idx][0])); swap4(reinterpret_cast<int*>(&data_list[data_idx][4])); swap4(reinterpret_cast<int*>(&data_list[data_idx][8])); swap4(reinterpret_cast<int*>(&data_list[data_idx][12])); swap4(reinterpret_cast<int*>(&data_list[data_idx][16])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } return TINYEXR_SUCCESS; } static int NumScanlines(int compression_type) { int num_scanlines = 1; if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } return num_scanlines; } static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, const std::vector<ChannelInfo>& channels, int num_blocks, tinyexr_uint64 chunk_offset, // starting offset of current chunk bool is_multipart, OffsetData& offset_data, // output block offsets, must be initialized std::vector<std::vector<unsigned char> >& data_list, // output tinyexr_uint64& total_size, // output: ending offset of current chunk std::string* err) { int num_scanlines = NumScanlines(exr_header->compression_type); data_list.resize(num_blocks); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; { size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } } const void* compression_param = 0; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { std::string e; bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes, &e); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } compression_param = &zfp_compression_param; } #endif tinyexr_uint64 offset = chunk_offset; tinyexr_uint64 doffset = is_multipart ? 4u : 0u; if (exr_image->tiles) { const EXRImage* level_image = exr_image; size_t block_idx = 0; tinyexr::tinyexr_uint64 block_data_size = 0; int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { if (!level_image) { if (err) { (*err) += "Invalid number of tiled levels for EncodeChunk\n"; } return TINYEXR_ERROR_INVALID_DATA; } int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); if (level_index_from_image != level_index) { if (err) { (*err) += "Incorrect level ordering in tiled image\n"; } return TINYEXR_ERROR_INVALID_DATA; } int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); std::string e; int ret = EncodeTiledLevel(level_image, exr_header, channels, data_list, block_idx, num_x_tiles, num_y_tiles, channel_offset_list, pixel_data_size, compression_param, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty() && err) { (*err) += e; } return ret; } for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j) for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) { offset_data.offsets[level_index][j][i] = offset; swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i])); offset += data_list[block_idx].size() + doffset; block_data_size += data_list[block_idx].size(); ++block_idx; } level_image = level_image->next_level; } assert(static_cast<int>(block_idx) == num_blocks); total_size = offset; } else { // scanlines std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); std::vector<std::thread> workers; std::atomic<int> block_count(0); int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks); for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = block_count++) < num_blocks) { #else bool invalid_data(false); #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { #endif int start_y = num_scanlines * i; int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height); int num_lines = end_Y - start_y; const unsigned char* const* images = static_cast<const unsigned char* const*>(exr_image->images); data_list[i].resize(2*sizeof(int)); size_t data_header_size = data_list[i].size(); bool ret = EncodePixelData(data_list[i], images, exr_header->compression_type, 0, // increasing y exr_image->width, exr_image->height, exr_image->width, start_y, num_lines, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; // "break" cannot be used with OpenMP } assert(data_list[i].size() > data_header_size); int data_len = static_cast<int>(data_list[i].size() - data_header_size); memcpy(&data_list[i][0], &start_y, sizeof(int)); memcpy(&data_list[i][4], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[i][0])); swap4(reinterpret_cast<int*>(&data_list[i][4])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode scanline data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size() + doffset; } total_size = static_cast<size_t>(offset); } return TINYEXR_SUCCESS; } // can save a single or multi-part image (no deep* formats) static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory_out == NULL) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } { for (unsigned int i = 0; i < num_parts; ++i) { if (exr_headers[i]->compression_type < 0) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #else for (int c = 0; c < exr_header->num_channels; ++c) { if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) { SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif } } std::vector<unsigned char> memory; // Header { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; memory.insert(memory.end(), header, header + 4); } // Version // using value from the first header int long_name = exr_headers[0]->long_name; { char marker[] = { 2, 0, 0, 0 }; /* @todo if (exr_header->non_image) { marker[1] |= 0x8; } */ // tiled if (num_parts == 1 && exr_images[0].tiles) { marker[1] |= 0x2; } // long_name if (long_name) { marker[1] |= 0x4; } // multipart if (num_parts > 1) { marker[1] |= 0x10; } memory.insert(memory.end(), marker, marker + 4); } int total_chunk_count = 0; std::vector<int> chunk_count(num_parts); std::vector<OffsetData> offset_data(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { if (!exr_images[i].tiles) { int num_scanlines = NumScanlines(exr_headers[i]->compression_type); chunk_count[i] = (exr_images[i].height + num_scanlines - 1) / num_scanlines; InitSingleResolutionOffsets(offset_data[i], chunk_count[i]); total_chunk_count += chunk_count[i]; } else { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); chunk_count[i] = InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles); total_chunk_count += chunk_count[i]; } } } // Write attributes to memory buffer. std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts); { std::set<std::string> partnames; for (unsigned int i = 0; i < num_parts; ++i) { //channels { std::vector<unsigned char> data; for (int c = 0; c < exr_headers[i]->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_headers[i]->pixel_types[c]; info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_headers[i]->channels[c].name); channels[i].push_back(info); } tinyexr::WriteChannelInfo(data, channels[i]); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_headers[i]->compression_type; swap4(&comp); WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char*>(&comp), 1); } { int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 }; swap4(&data[0]); swap4(&data[1]); swap4(&data[2]); swap4(&data[3]); WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4); int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 }; swap4(&data0[0]); swap4(&data0[1]); swap4(&data0[2]); swap4(&data0[3]); // Note: must be the same across parts (currently, using value from the first header) WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { // Note: must be the same across parts float aspectRatio = 1.0f; swap4(&aspectRatio); WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float)); } { float center[2] = { 0.0f, 0.0f }; swap4(&center[0]); swap4(&center[1]); WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float)); } { float w = 1.0f; swap4(&w); WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char*>(&w), sizeof(float)); } if (exr_images[i].tiles) { unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3); if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u); //unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int datai[3] = { 0, 0, 0 }; unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]); datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x); datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y); data[8] = tile_mode; swap4(reinterpret_cast<unsigned int*>(&data[0])); swap4(reinterpret_cast<unsigned int*>(&data[4])); WriteAttributeToMemory( &memory, "tiles", "tiledesc", reinterpret_cast<const unsigned char*>(data), 9); } // must be present for multi-part files - according to spec. if (num_parts > 1) { // name { size_t len = 0; if ((len = strlen(exr_headers[i]->name)) > 0) { partnames.insert(std::string(exr_headers[i]->name)); if (partnames.size() != i + 1) { SetErrorMessage("'name' attributes must be unique for a multi-part file", err); return 0; } WriteAttributeToMemory( &memory, "name", "string", reinterpret_cast<const unsigned char*>(exr_headers[i]->name), static_cast<int>(len)); } else { SetErrorMessage("Invalid 'name' attribute for a multi-part file", err); return 0; } } // type { const char* type = "scanlineimage"; if (exr_images[i].tiles) type = "tiledimage"; WriteAttributeToMemory( &memory, "type", "string", reinterpret_cast<const unsigned char*>(type), static_cast<int>(strlen(type))); } // chunkCount { WriteAttributeToMemory( &memory, "chunkCount", "int", reinterpret_cast<const unsigned char*>(&chunk_count[i]), 4); } } // Custom attributes if (exr_headers[i]->num_custom_attributes > 0) { for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) { tinyexr::WriteAttributeToMemory( &memory, exr_headers[i]->custom_attributes[j].name, exr_headers[i]->custom_attributes[j].type, reinterpret_cast<const unsigned char*>( exr_headers[i]->custom_attributes[j].value), exr_headers[i]->custom_attributes[j].size); } } { // end of header memory.push_back(0); } } } if (num_parts > 1) { // end of header list memory.push_back(0); } tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64); tinyexr_uint64 total_size = 0; std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { std::string e; int ret = EncodeChunk(&exr_images[i], exr_headers[i], channels[i], chunk_count[i], // starting offset of current chunk after part-number chunk_offset, num_parts > 1, offset_data[i], // output: block offsets, must be initialized data_lists[i], // output total_size, // output &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return 0; } chunk_offset = total_size; } // Allocating required memory if (total_size == 0) { // something went wrong tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char*>(malloc(total_size)); // Writing header memcpy((*memory_out), &memory[0], memory.size()); unsigned char* memory_ptr = *memory_out + memory.size(); size_t sum = memory.size(); // Writing offset data for chunks for (unsigned int i = 0; i < num_parts; ++i) { if (exr_images[i].tiles) { const EXRImage* level_image = &exr_images[i]; int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) { size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size(); sum += num_bytes; assert(sum <= total_size); memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]), num_bytes); memory_ptr += num_bytes; } level_image = level_image->next_level; } } else { size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]); sum += num_bytes; assert(sum <= total_size); std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0]; memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes); memory_ptr += num_bytes; } } // Writing chunk data for (unsigned int i = 0; i < num_parts; ++i) { for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) { if (num_parts > 1) { sum += 4; assert(sum <= total_size); unsigned int part_number = i; swap4(&part_number); memcpy(memory_ptr, &part_number, 4); memory_ptr += 4; } sum += data_lists[i][j].size(); assert(sum <= total_size); memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size()); memory_ptr += data_lists[i][j].size(); } } assert(sum == total_size); return total_size; // OK } } // tinyexr size_t SaveEXRImageToMemory(const EXRImage* exr_image, const EXRHeader* exr_header, unsigned char** memory_out, const char** err) { return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err); } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2 || memory_out == NULL) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err); } int SaveEXRMultipartImageToFile(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, const char* filename, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(&dx); tinyexr::swap4(&dy); tinyexr::swap4(&dw); tinyexr::swap4(&dh); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(&x); tinyexr::swap4(&y); tinyexr::swap4(&w); tinyexr::swap4(&h); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(&line_no); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->next_level = NULL; exr_image->level_x = 0; exr_image->level_y = 0; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } EXRSetNameAttr(exr_header, NULL); return TINYEXR_SUCCESS; } void EXRSetNameAttr(EXRHeader* exr_header, const char* name) { if (exr_header == NULL) { return; } memset(exr_header->name, 0, 256); if (name != NULL) { size_t len = std::min(strlen(name), (size_t)255); if (len) { memcpy(exr_header->name, name, len); } } } int EXRNumLevels(const EXRImage* exr_image) { if (exr_image == NULL) return 0; if(exr_image->images) return 1; // scanlines int levels = 1; const EXRImage* level_image = exr_image; while((level_image = level_image->next_level)) ++levels; return levels; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_image->next_level) { FreeEXRImage(exr_image->next_level); delete exr_image->next_level; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); memset(exr_header, 0, sizeof(EXRHeader)); ConvertHeader(exr_header, infos[i]); exr_header->multipart = exr_version->multipart ? 1 : 0; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (err != 0) { // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<tinyexr::OffsetData> chunk_offset_table_list; chunk_offset_table_list.reserve(num_parts); for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1); tinyexr::OffsetData& offset_data = chunk_offset_table_list.back(); if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) { tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count); std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0]; for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } } else { { std::vector<int> num_x_tiles, num_y_tiles; tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles); if (num_blocks != exr_headers[i]->chunk_count) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number' marker += sizeof(tinyexr::tinyexr_uint64); // = 8 } } } } } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { tinyexr::OffsetData &offset_data = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { const unsigned char *part_number_addr = memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEFINED #endif // TINYEXR_IMPLEMENTATION
omp_parallel_for_if.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel for if directive. Needs at least two threads.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel for if</ompts:directive> <ompts:dependences></ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_for_if</ompts:testcode:functionname>(FILE * logFile){ int known_sum; int num_threads = 0, num_threads2 = 0; int sum = 0, sum2 = 0; int i; int control; control = 0; #pragma omp parallel for <ompts:check>if (control==1)</ompts:check> for (i=0; i <= LOOPCOUNT; i++) { num_threads = omp_get_num_threads(); sum = sum + i; } /*end of for*/ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; fprintf (logFile, "Number of threads determined by omp_get_num_threads: %d\n", num_threads); return (known_sum == sum && num_threads == 1); } /* end of check_paralel_for_private */ </ompts:testcode> </ompts:test>
nested.c
// RUN: %compile-run-and-check #include <omp.h> #include <stdio.h> const int MaxThreads = 1024; const int NumThreads = 64; const int NumThreads1 = 1; int main(int argc, char *argv[]) { int inParallel = -1, numThreads = -1, threadNum = -1; int check1[MaxThreads]; int check2[MaxThreads]; for (int i = 0; i < MaxThreads; i++) { check1[i] = check2[i] = 0; } #pragma omp target map(inParallel, numThreads, threadNum, check1[:], check2[:]) { inParallel = omp_in_parallel(); numThreads = omp_get_num_threads(); threadNum = omp_get_thread_num(); // Expecting active parallel region. #pragma omp parallel num_threads(NumThreads) { int id = omp_get_thread_num(); check1[id] += omp_get_num_threads() + omp_in_parallel(); // Expecting serialized parallel region. #pragma omp parallel { // Expected to be 1. int nestedInParallel = omp_in_parallel(); // Expected to be 1. int nestedNumThreads = omp_get_num_threads(); // Expected to be 0. int nestedThreadNum = omp_get_thread_num(); #pragma omp atomic check2[id] += nestedInParallel + nestedNumThreads + nestedThreadNum; } } } // CHECK: target: inParallel = 0, numThreads = 1, threadNum = 0 printf("target: inParallel = %d, numThreads = %d, threadNum = %d\n", inParallel, numThreads, threadNum); // CHECK-NOT: invalid for (int i = 0; i < MaxThreads; i++) { // Check that all threads reported // omp_get_num_threads() = 64, omp_in_parallel() = 1. int Expected = NumThreads + 1; if (i < NumThreads) { if (check1[i] != Expected) { printf("invalid: check1[%d] should be %d, is %d\n", i, Expected, check1[i]); } } else if (check1[i] != 0) { printf("invalid: check1[%d] should be 0, is %d\n", i, check1[i]); } // Check serialized parallel region. if (i < NumThreads) { if (check2[i] != 2) { printf("invalid: check2[%d] should be 2, is %d\n", i, check2[i]); } } else if (check2[i] != 0) { printf("invalid: check2[%d] should be 0, is %d\n", i, check2[i]); } } inParallel = -1; numThreads = -1; threadNum = -1; for (int i = 0; i < MaxThreads; i++) { check1[i] = check2[i] = 0; } #pragma omp target map(inParallel, numThreads, threadNum, check1[:], check2[:]) { inParallel = omp_in_parallel(); numThreads = omp_get_num_threads(); threadNum = omp_get_thread_num(); // Expecting active parallel region. #pragma omp parallel num_threads(NumThreads1) { int id = omp_get_thread_num(); check1[id] += omp_get_num_threads() + omp_in_parallel(); // Expecting serialized parallel region. #pragma omp parallel { // Expected to be 0. int nestedInParallel = omp_in_parallel(); // Expected to be 1. int nestedNumThreads = omp_get_num_threads(); // Expected to be 0. int nestedThreadNum = omp_get_thread_num(); #pragma omp atomic check2[id] += nestedInParallel + nestedNumThreads + nestedThreadNum; } } } // CHECK: target: inParallel = 0, numThreads = 1, threadNum = 0 printf("target: inParallel = %d, numThreads = %d, threadNum = %d\n", inParallel, numThreads, threadNum); // CHECK-NOT: invalid for (int i = 0; i < MaxThreads; i++) { // Check that all threads reported // omp_get_num_threads() = 1, omp_in_parallel() = 0. int Expected = 1; if (i < NumThreads1) { if (check1[i] != Expected) { printf("invalid: check1[%d] should be %d, is %d\n", i, Expected, check1[i]); } } else if (check1[i] != 0) { printf("invalid: check1[%d] should be 0, is %d\n", i, check1[i]); } // Check serialized parallel region. if (i < NumThreads1) { if (check2[i] != 1) { printf("invalid: check2[%d] should be 1, is %d\n", i, check2[i]); } } else if (check2[i] != 0) { printf("invalid: check2[%d] should be 0, is %d\n", i, check2[i]); } } return 0; }
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/token-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=(GetLogEventMask() & (ImageEvent | TransformEvent | CoderEvent)) != 0 ? MagickTrue : MagickFalse; image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->matte_color=image_info->matte_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if ((double) image->delay > floor(geometry_info.rho+0.5)) image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); } else if ((flags & LessValue) != 0) { if ((double) image->delay < floor(geometry_info.rho+0.5)) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else image->delay=(size_t) CastDoubleToLong(floor(geometry_info.rho+0.5)); if ((flags & SigmaValue) != 0) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info)); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; ImageType image_type; MagickBooleanType homogeneous_colorspace, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; image_type=images->type; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->type != images->type) image_type=UndefinedType; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (image_type != BilevelType) { if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace,exception); } append_image->depth=depth; append_image->alpha_trait=alpha_trait; append_image->page=images->page; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(pathname != NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside != MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,WritePixelMask,clip_mask,exception); image->mask_trait=UpdatePixelTrait; clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale_x, scale_y; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->alpha_trait=image->alpha_trait; clone_image->channels=image->channels; clone_image->mask_trait=image->mask_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=image->debug; clone_image->semaphore=AcquireSemaphoreInfo(); if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale_x=1.0; scale_y=1.0; if (image->columns != 0) scale_x=(double) columns/(double) image->columns; if (image->rows != 0) scale_y=(double) rows/(double) image->rows; clone_image->page.width=(size_t) CastDoubleToLong(floor(scale_x* image->page.width+0.5)); clone_image->page.height=(size_t) CastDoubleToLong(floor(scale_y* image->page.height+0.5)); if (MagickAbsoluteValue(scale_x-scale_y) < 2.0) scale_x=scale_y=MagickMin(scale_x,scale_y); clone_image->page.x=CastDoubleToLong(ceil(scale_x*image->page.x-0.5)); clone_image->tile_offset.x=CastDoubleToLong(ceil(scale_x* image->tile_offset.x-0.5)); clone_image->page.y=CastDoubleToLong(ceil(scale_y*image->page.y-0.5)); clone_image->tile_offset.y=CastDoubleToLong(ceil(scale_y* image->tile_offset.y-0.5)); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->matte_color=image_info->matte_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->custom_stream=image_info->custom_stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=image_info->debug; clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ assert(image_info != (ImageInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=(GetLogEventMask() & ImageEvent) != 0 ? MagickTrue : MagickFalse; image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); switch (type) { case ReadPixelMask: { if ((image->channels & ReadMaskChannel) == 0) return((Image *) NULL); break; } case WritePixelMask: { if ((image->channels & WriteMaskChannel) == 0) return((Image *) NULL); break; } default: { if ((image->channels & CompositeMaskChannel) == 0) return((Image *) NULL); break; } } mask_image=AcquireImage((ImageInfo *) NULL,exception); status=SetImageExtent(mask_image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(mask_image)); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case ReadPixelMask: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; const char *p; int c; MagickBooleanType canonical; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MagickPathExtent); if (IsStringTrue(GetImageOption(image_info,"filename:literal")) != MagickFalse) return(strlen(filename)); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MagickPathExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; char *r; ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),option,(size_t) (MagickPathExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(option)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { const Image *p; assert(image != (Image *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; const Image *p; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; size_t length; ssize_t y; void *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset(pixels,0,length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum)); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: the level of transparency: 0 is fully transparent and QuantumRange % is fully opaque. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) > (QuantumRange/2)) SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlphaChannel(image,OnAlphaChannel,exception); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ static const MagickInfo *SetImageInfoFromExtension(ImageInfo *image_info, const char *component,char *magic,ExceptionInfo *exception) { const MagickInfo *magick_info; MagickFormatType format_type; ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ return(magick_info); } MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], path[MagickPathExtent], *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); if (*component != '\0') { /* Base path sans any compression extension. */ GetPathComponent(image_info->filename,BasePathSansCompressExtension,path); GetPathComponent(path,ExtensionPath,component); } image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*component != '\0') && (IsGlob(component) == MagickFalse)) magick_info=SetImageInfoFromExtension(image_info,component,magic, sans_exception); /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,component); else GetPathComponent(image_info->filename,SubcanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=(const DelegateInfo *) NULL; if (magick_info == (const MagickInfo *) NULL) { delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if ((delegate_info == (const DelegateInfo *) NULL) && ((*component != '\0') && (IsGlob(component) == MagickFalse))) { /* Retry in case GetMagickInfo loaded a custom module. */ magick_info=SetImageInfoFromExtension(image_info,component,magic, sans_exception); } } if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to seekable temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireQuantumMemory(1,magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) memset(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic cache. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->magick_module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o C u s t o m S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoCustomStream() sets the image info custom stream handlers. % % The format of the SetImageInfoCustomStream method is: % % void SetImageInfoCustomStream(ImageInfo *image_info, % CustomStreamInfo *custom_stream) % % A description of each parameter follows: % % o image_info: the image info. % % o custom_stream: your custom stream methods. % */ MagickExport void SetImageInfoCustomStream(ImageInfo *image_info, CustomStreamInfo *custom_stream) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->custom_stream=(CustomStreamInfo *) custom_stream; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(mask,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=0.0; if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows)) intensity=GetPixelIntensity(mask,p); switch (type) { case ReadPixelMask: { SetPixelReadMask(image,ClampToQuantum(intensity),q); break; } case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(intensity),q); break; } default: { SetPixelCompositeMask(image,ClampToQuantum(intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i o n M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegionMask() associates a mask with the image as defined by the % specified region. % % The format of the SetImageRegionMask method is: % % MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type, % const RectangleInfo *region,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o geometry: the mask region. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageRegionMask(Image *image, const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Set image mask as defined by the region. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (region == (const RectangleInfo *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; pixel=QuantumRange; if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) && ((y >= region->y) && (y < (region->y+(ssize_t) region->height)))) pixel=(Quantum) 0; switch (type) { case ReadPixelMask: { SetPixelReadMask(image,pixel,q); break; } case WritePixelMask: { SetPixelWriteMask(image,pixel,q); break; } default: { SetPixelCompositeMask(image,pixel,q); break; } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; const Quantum *p; ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; const Quantum *p; ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; magick_unreferenced(exception); assert(image != (Image *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelInfo *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.blue_primary.y=geometry_info.sigma; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.green_primary.y=geometry_info.sigma; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.red_primary.y=geometry_info.sigma; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=image->chromaticity.white_point.x; if ((flags & SigmaValue) != 0) image->chromaticity.white_point.y=geometry_info.sigma; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
GB_unop__identity_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint16_uint8 // op(A') function: GB_unop_tran__identity_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint16_uint8 ( uint16_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
primo.c
#include <stdio.h> #include <math.h> int primo(long num) { long d; if(num <= 1) return 0; if(num > 3) { if(num % 2 == 0) return 0; long max_divisor = sqrt(num); for(d = 3; d <= max_divisor; d+=2) { if(num % d == 0) return 0; } } return 1; } int main() { long max_num = 5000000; long cont_primo; long soma; int n; if(max_num <= 1) soma = 0; else { if(max_num == 2) soma = 1; else { soma = 1; #pragma omp parallel for private(cont_primo) reduction(+:soma) schedule(guided, 100) for(n = 3; n < max_num; n += 2) { cont_primo = primo(n); soma = soma + cont_primo; } } } printf("Número total de primos: %ld\n", soma); return 0; }
opencl_zip_fmt_plug.c
/* * * This software is Copyright (c) 2012 Dhiru Kholia <dhiru at openwall.com> * with some code (c) 2012 Lukas Odzioba <[email protected]> * and improvements (c) 2014 by magnum and JimF. * * This is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_zip; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_zip); #else #include <string.h> #include <stdint.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "common-opencl.h" #include "pkzip.h" #include "dyna_salt.h" #include "hmac_sha.h" #include "options.h" #define OPENCL_FORMAT 1 #include "pbkdf2_hmac_sha1.h" #define FORMAT_LABEL "zip-opencl" #define FORMAT_NAME "ZIP" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #define BINARY_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(my_salt*) #define SALT_ALIGN sizeof(size_t) typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } zip_password; typedef struct { uint32_t v[(2 * KEY_LENGTH(3) + PWD_VER_LENGTH + 3) / 4]; } zip_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } zip_salt; typedef struct my_salt_t { dyna_salt dsalt; uint32_t comp_len; struct { uint16_t type : 4; uint16_t mode : 4; } v; unsigned char passverify[2]; unsigned char salt[SALT_LENGTH(3)]; //uint64_t data_key; // MSB of md5(data blob). We lookup using this. unsigned char datablob[1]; } my_salt; static my_salt *saved_salt; static unsigned char (*crypt_key)[((WINZIP_BINARY_SIZE + 4)/4)*4]; // ensure 32-bit alignment static cl_int cl_error; static zip_password *inbuffer; static zip_hash *outbuffer; static zip_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static size_t insize, outsize, settingsize; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(zip_password) * gws; outsize = sizeof(zip_hash) * gws; settingsize = sizeof(zip_salt); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); crypt_key = mem_calloc(gws, sizeof(*crypt_key)); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (crypt_key) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(crypt_key); MEM_FREE(inbuffer); MEM_FREE(outbuffer); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(zip_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static void *get_salt(char *ciphertext) { int i; my_salt salt, *psalt; static unsigned char *ptr; /* extract data from "ciphertext" */ c8 *copy_mem = strdup(ciphertext); c8 *cp, *p; if (!ptr) ptr = mem_alloc_tiny(sizeof(my_salt*),sizeof(my_salt*)); p = copy_mem + WINZIP_TAG_LENGTH+1; /* skip over "$zip2$*" */ memset(&salt, 0, sizeof(salt)); cp = strtokm(p, "*"); // type salt.v.type = atoi((const char*)cp); cp = strtokm(NULL, "*"); // mode salt.v.mode = atoi((const char*)cp); cp = strtokm(NULL, "*"); // file_magic enum (ignored) cp = strtokm(NULL, "*"); // salt for (i = 0; i < SALT_LENGTH(salt.v.mode); i++) salt.salt[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])]; cp = strtokm(NULL, "*"); // validator salt.passverify[0] = (atoi16[ARCH_INDEX(cp[0])]<<4) | atoi16[ARCH_INDEX(cp[1])]; salt.passverify[1] = (atoi16[ARCH_INDEX(cp[2])]<<4) | atoi16[ARCH_INDEX(cp[3])]; cp = strtokm(NULL, "*"); // data len sscanf((const char *)cp, "%x", &salt.comp_len); // later we will store the data blob in our own static data structure, and place the 64 bit LSB of the // MD5 of the data blob into a field in the salt. For the first POC I store the entire blob and just // make sure all my test data is small enough to fit. cp = strtokm(NULL, "*"); // data blob // Ok, now create the allocated salt record we are going to return back to John, using the dynamic // sized data buffer. psalt = (my_salt*)mem_calloc(1, sizeof(my_salt) + salt.comp_len); psalt->v.type = salt.v.type; psalt->v.mode = salt.v.mode; psalt->comp_len = salt.comp_len; psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them. memcpy(psalt->salt, salt.salt, sizeof(salt.salt)); psalt->passverify[0] = salt.passverify[0]; psalt->passverify[1] = salt.passverify[1]; // set the JtR core linkage stuff for this dyna_salt psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(my_salt, comp_len); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(my_salt, comp_len, datablob, psalt->comp_len); if (strcmp((const char*)cp, "ZFILE")) { for (i = 0; i < psalt->comp_len; i++) psalt->datablob[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])]; } else { c8 *Fn, *Oh, *Ob; long len; uint32_t id; FILE *fp; Fn = strtokm(NULL, "*"); Oh = strtokm(NULL, "*"); Ob = strtokm(NULL, "*"); fp = fopen((const char*)Fn, "rb"); if (!fp) { psalt->v.type = 1; // this will tell the format to 'skip' this salt, it is garbage goto Bail; } sscanf((const char*)Oh, "%lx", &len); if (fseek(fp, len, SEEK_SET)) { fclose(fp); psalt->v.type = 1; goto Bail; } id = fget32LE(fp); if (id != 0x04034b50U) { fclose(fp); psalt->v.type = 1; goto Bail; } sscanf((const char*)Ob, "%lx", &len); if (fseek(fp, len, SEEK_SET)) { fclose(fp); psalt->v.type = 1; goto Bail; } if (fread(psalt->datablob, 1, psalt->comp_len, fp) != psalt->comp_len) { fclose(fp); psalt->v.type = 1; goto Bail; } fclose(fp); } Bail:; MEM_FREE(copy_mem); memcpy(ptr, &psalt, sizeof(my_salt*)); return (void*)ptr; } static void set_salt(void *salt) { saved_salt = *((my_salt**)salt); memcpy((char*)currentsalt.salt, saved_salt->salt, SALT_LENGTH(saved_salt->v.mode)); currentsalt.length = SALT_LENGTH(saved_salt->v.mode); currentsalt.iterations = KEYING_ITERATIONS; currentsalt.outlen = PWD_VER_LENGTH; currentsalt.skip_bytes = 2 * KEY_LENGTH(saved_salt->v.mode); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (saved_salt->v.type) { // This salt passed valid() but failed get_salt(). // Should never happen. memset(crypt_key, 0, count * WINZIP_BINARY_SIZE); return count; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { if (!memcmp((unsigned char*)outbuffer[index].v, saved_salt->passverify, 2)) { unsigned char pwd_ver[4+64]; pbkdf2_sha1(inbuffer[index].v, inbuffer[index].length, saved_salt->salt, SALT_LENGTH(saved_salt->v.mode), KEYING_ITERATIONS, pwd_ver, KEY_LENGTH(saved_salt->v.mode), KEY_LENGTH(saved_salt->v.mode)); hmac_sha1(pwd_ver, KEY_LENGTH(saved_salt->v.mode), (const unsigned char*)saved_salt->datablob, saved_salt->comp_len, crypt_key[index], WINZIP_BINARY_SIZE); } else memset(crypt_key[index], 0, WINZIP_BINARY_SIZE); } return count; } static int cmp_all(void *binary, int count) { int i; for (i = 0; i < count; i++) if (((uint32_t*)&(crypt_key[i]))[0] == ((uint32_t*)binary)[0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return (((uint32_t*)&(crypt_key[index]))[0] == ((uint32_t*)binary)[0]); } static int cmp_exact(char *source, int index) { void *b = winzip_common_binary(source); return !memcmp(b, crypt_key[index], sizeof(crypt_key[index])); } struct fmt_main fmt_opencl_zip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, WINZIP_BENCHMARK_COMMENT, WINZIP_BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, WINZIP_BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT, { NULL }, { WINZIP_FORMAT_TAG }, winzip_common_tests }, { init, done, reset, fmt_default_prepare, winzip_common_valid, winzip_common_split, winzip_common_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
mandel_omp_nox_dynamic_256.c
/* Sequential Mandlebrot program */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> #include <time.h> #define X_RESN 1000 /* x resolution */ #define Y_RESN 1000 /* y resolution */ #define MAX_ITER (2000) #define CHUNK 256 // ref: https://stackoverflow.com/questions/6749621/how-to-create-a-high-resolution-timer-in-linux-to-measure-program-performance // call this function to start a nanosecond-resolution timer struct timespec timer_start() { struct timespec start_time; clock_gettime(CLOCK_MONOTONIC, &start_time); return start_time; } // call this function to end a timer, returning nanoseconds elapsed as a long long timer_end(struct timespec start_time){ struct timespec end_time; clock_gettime(CLOCK_MONOTONIC, &end_time); long diffInNanos = (end_time.tv_sec - start_time.tv_sec) * (long)1e9 + (end_time.tv_nsec - start_time.tv_nsec); return diffInNanos; } typedef struct complextype { double real, imag; } Compl; int main(int argc, char *argv[]) { struct timespec vartime = timer_start(); /* Mandlebrot variables */ int *ks; ks = (int *)malloc((X_RESN*Y_RESN) * sizeof(int)); double *ds; ds = (double *)malloc((X_RESN*Y_RESN) * sizeof(double)); /* Calculate and draw points */ #pragma omp parallel default(shared) { int num_threads = omp_get_num_threads(); // printf("num_threads = %d\n", num_threads); #pragma omp for schedule(dynamic, CHUNK) for (int it = 0; it < X_RESN*Y_RESN; it++) { int i = it / Y_RESN; int j = it % Y_RESN; // mandelbrot set is defined in the region of x = [-2, +2] and y = [-2, +2] double u = ((double)i - (X_RESN / 2.0)) / (X_RESN / 4.0); double v = ((double)j - (Y_RESN / 2.0)) / (Y_RESN / 4.0); Compl z, c, t; z.real = z.imag = 0.0; c.real = v; c.imag = u; int k = 0; double d = 0.0; double lengthsq, temp; do { /* iterate for pixel color */ t = z; z.imag = 2.0 * t.real * t.imag + c.imag; z.real = t.real * t.real - t.imag * t.imag + c.real; lengthsq = z.real * z.real + z.imag * z.imag; d += pow(pow(z.imag - t.imag, 2.0) + pow(z.real - t.real, 2.0), 0.5); k++; } while (lengthsq < 4.0 && k < MAX_ITER); ks[it] = k; ds[it] = d; } } long time_elapsed_nanos = timer_end(vartime); double elapsed = time_elapsed_nanos*0.000000001; printf("%lf\n", elapsed); /* Program Finished */ return 0; }
GB_unop__identity_uint8_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint8_int64 // op(A') function: GB_unop_tran__identity_uint8_int64 // C type: uint8_t // A type: int64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint8_int64 ( uint8_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint8_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
binarytrees3.c
// The Computer Language Benchmarks Game // https://salsa.debian.org/benchmarksgame-team/benchmarksgame/ // // Contributed by Jeremy Zerfas // Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho. // *reset* // This controls the width of lines that are output by this program. #define MAXIMUM_LINE_WIDTH 60 #include <stdint.h> #include <stdlib.h> #include <stdio.h> typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems. #include <apr_pools.h> // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; typedef struct tree_node{ struct tree_node * left_Node, * right_Node; } tree_node; // Create a binary tree of depth tree_Depth in memory_Pool, set the root node's // value to root_Node_Value, and finally return a pointer to the created binary // tree. static inline tree_node * create_Tree(const intnative_t tree_Depth, apr_pool_t * const memory_Pool){ tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node)); // If tree_Depth is one or more then recursively call create_Tree() in order // to create the left and right subtrees using 2*root_Node_Value-1 and // 2*root_Node_Value respectively as the root values for those subtrees. if(tree_Depth>0){ root_Node->left_Node=create_Tree(tree_Depth-1, memory_Pool); root_Node->right_Node=create_Tree(tree_Depth-1, memory_Pool); }else root_Node->left_Node=root_Node->right_Node=NULL; return root_Node; } // Compute and return the checksum for the binary tree that has root_Node as the // root node. static inline intnative_t compute_Tree_Checksum( const tree_node * const root_Node){ // If there are subtrees then recursively call compute_Tree_Checksum() on // them and factor their values into the checksum, otherwise just return // the value of root_Node. if(root_Node->left_Node) return compute_Tree_Checksum(root_Node->left_Node)+ compute_Tree_Checksum(root_Node->right_Node)+1; else return 1; } int main(int argc, char ** argv){ // Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what // was specified as the argument to the program and minimum_Tree_Depth+2. const intnative_t minimum_Tree_Depth=4; intnative_t maximum_Tree_Depth=atoi(argv[1]); if(maximum_Tree_Depth < minimum_Tree_Depth+2) maximum_Tree_Depth=minimum_Tree_Depth+2; apr_initialize(); apr_pool_t * memory_Pool; // Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1, // compute the checksum of the binary tree, print the statistics, and then // delete the memory pool. apr_pool_create_unmanaged(&memory_Pool); tree_node * stretch_Tree=create_Tree(maximum_Tree_Depth+1, memory_Pool); printf("stretch tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth+1, (intmax_t)compute_Tree_Checksum(stretch_Tree)); apr_pool_destroy(memory_Pool); // Create a memory pool and then create a long-lived binary tree of depth // maximum_Tree_Depth which will be left alone for a while while // more binary trees get allocated and deallocaited as required by the // rules. We'll finish working with this later. apr_pool_create_unmanaged(&memory_Pool); tree_node * long_Lived_Tree=create_Tree(maximum_Tree_Depth, memory_Pool); // Create a lot of binary trees in parallel of depths ranging from // minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their // checksums, destroy the trees, and then record the statistics to // output_Buffer[] so they can be displayed in order later. char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1]; intnative_t current_Tree_Depth; #pragma omp parallel for for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){ intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+ minimum_Tree_Depth); // Create a memory pool for this thread to use. apr_pool_t * thread_Memory_Pool; apr_pool_create_unmanaged(&thread_Memory_Pool); intnative_t i=1, total_Trees_Checksum=0; for(; i<=iterations; ++i){ // Create a binary tree of depth current_Tree_Depth tree_node * const tree_1=create_Tree(current_Tree_Depth, thread_Memory_Pool); total_Trees_Checksum+=compute_Tree_Checksum(tree_1); apr_pool_clear(thread_Memory_Pool); } apr_pool_destroy(thread_Memory_Pool); // Record the statistics for the trees of depth current_Tree_Depth. sprintf(output_Buffer[current_Tree_Depth], "%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)iterations, (intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum); } // Print the statistics for all of the various tree depths. for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2) printf("%s", output_Buffer[current_Tree_Depth]); // Compute the checksum of the long-lived binary tree that we created // earlier, print the statistics, and then delete the memory pool. printf("long lived tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth, (intmax_t)compute_Tree_Checksum(long_Lived_Tree)); apr_pool_destroy(memory_Pool); apr_terminate(); return 0; }
GB_unaryop__abs_fp64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_uint8 // op(A') function: GB_tran__abs_fp64_uint8 // C type: double // A type: uint8_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_uint8 ( double *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
update_ops_named_CNOT.c
#include <stddef.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif //void CNOT_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CNOT_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CNOT_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); void CNOT_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { //CNOT_gate_old_single(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_old_parallel(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_single(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_parallel(control_qubit_index, target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { CNOT_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); } else { CNOT_gate_parallel_simd(control_qubit_index, target_qubit_index, state, dim); } #else CNOT_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { CNOT_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); } else { CNOT_gate_parallel_unroll(control_qubit_index, target_qubit_index, state, dim); } #else CNOT_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); #endif #endif } void CNOT_gate_single_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; if (target_qubit_index == 0) { // swap neighboring two basis for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; CTYPE temp = state[basis_index]; state[basis_index] = state[basis_index + 1]; state[basis_index + 1] = temp; } } else if (control_qubit_index == 0) { // no neighboring swap for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } else { // a,a+1 is swapped to a^m, a^m+1, respectively for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0 + 1]; state[basis_index_0] = state[basis_index_1]; state[basis_index_0 + 1] = state[basis_index_1 + 1]; state[basis_index_1] = temp0; state[basis_index_1 + 1] = temp1; } } } #ifdef _OPENMP void CNOT_gate_parallel_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; if (target_qubit_index == 0) { // swap neighboring two basis #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; CTYPE temp = state[basis_index]; state[basis_index] = state[basis_index + 1]; state[basis_index + 1] = temp; } } else if (control_qubit_index == 0) { // no neighboring swap #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } else { // a,a+1 is swapped to a^m, a^m+1, respectively #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0 + 1]; state[basis_index_0] = state[basis_index_1]; state[basis_index_0 + 1] = state[basis_index_1 + 1]; state[basis_index_1] = temp0; state[basis_index_1 + 1] = temp1; } } } #endif #ifdef _USE_SIMD void CNOT_gate_single_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; if (target_qubit_index == 0) { // swap neighboring two basis for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_permute4x64_pd(data, 78); // (3210) -> (1032) : 1*2 + 4*3 + 16*0 + 64*1 = 2+12+64=78 _mm256_storeu_pd(ptr, data); } } else if (control_qubit_index == 0) { // no neighboring swap for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } else { // a,a+1 is swapped to a^m, a^m+1, respectively for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #ifdef _OPENMP void CNOT_gate_parallel_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; if (target_qubit_index == 0) { // swap neighboring two basis #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_permute4x64_pd(data, 78); // (3210) -> (1032) : 1*2 + 4*3 + 16*0 + 64*1 = 2+12+64=78 _mm256_storeu_pd(ptr, data); } } else if (control_qubit_index == 0) { // no neighboring swap #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } else { // a,a+1 is swapped to a^m, a^m+1, respectively #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #endif #endif /* #ifdef _OPENMP void CNOT_gate_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } #endif void CNOT_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index); ITYPE basis_c1t0 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask; ITYPE basis_c1t1 = basis_c1t0 ^ target_mask; swap_amplitude(state, basis_c1t0, basis_c1t1); } } #ifdef _OPENMP void CNOT_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index); ITYPE basis_c1t0 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask; ITYPE basis_c1t1 = basis_c1t0 ^ target_mask; swap_amplitude(state, basis_c1t0, basis_c1t1); } } #endif void CNOT_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } */
nqmq.c
#include <omp.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <sys/time.h> #define DATA_FILE_NAME "nqmq.dat" #define DATA_LINE_MAX_LEN 80 char **cities; int **distances; int **through; int num_cities = 0; // By default use one thread of execution int num_threads = 1; void calculate_shortest_paths() { int id = omp_get_thread_num(); // Determine the rows we will be calculating int start = id * num_cities / num_threads; int end = (id + 1) * num_cities / num_threads; for (int k = 0; k < num_cities; ++k) { // Wait for all threads before we do shortest path calculations on the // adjacency matrix for this intermediate city #pragma omp barrier for (int i = start; i < end; ++i) { for (int j = 0; j < num_cities; ++j) { // If The k node or target node is infinity (INT_MAX) then // we are just going to overflow the numbers. Ignore them. // We can also safely ignore cities between to themselves if (distances[i][k] == INT_MAX || distances[k][j] == INT_MAX || i == j) continue; // Check if there is a faster path through node k int new_dist = distances[i][k] + distances[k][j]; if (distances[i][j] <= new_dist) continue; // This way is faster! Update the min distance and keep track of // the node that we can travel through to get here distances[i][j] = new_dist; through[i][j] = k; } } } printf(" \e[0;33mThread-%d \e[0;32m->\e[0m Finished all calculations for rows %2d => %2d\n", id, start + 1, end); } void print_path_directions(int a, int b) { int intermediate = through[a][b]; if (intermediate == -1) { printf(" %s \e[0;32m->\e[0m %s (%d miles)\n", cities[a], cities[b], distances[a][b]); } else { print_path_directions(a, intermediate); print_path_directions(intermediate, b); } } int main(int argc, char *argv[]) { puts("\e[0;34m==>\e[0m Reading in values from the data file..."); // Read in the needed information from the data file. Using this data we can // setup the cities array and distances matrix. We will also setup the { char line[DATA_LINE_MAX_LEN]; FILE *data_file; data_file = fopen(DATA_FILE_NAME, "r"); // The first line will be the number of cities fscanf(data_file, "%d", &num_cities); fgets(line, DATA_LINE_MAX_LEN, data_file); // Allocate space for the city names cities = malloc(sizeof(char*) * num_cities); // Read in all cities for (int i = 0; i < num_cities; ++i) { fgets(line, DATA_LINE_MAX_LEN, data_file); // Remove newline line[strlen(line) - 1] = 0; cities[i] = malloc(strlen(line) * sizeof(char)); strcpy(cities[i], line); } printf(" Read in %d cities...\n", num_cities); // Calculate how much memory we need to allocate for the adjacency // matrix int mem_size = num_cities * sizeof(int*) + num_cities * num_cities * sizeof(int); // Allocate the memory for the adjacency matrix and through matrix distances = malloc(mem_size); through = malloc(mem_size); // Set the row indexes as pointers to the columns for (int i = 0; i < num_cities; ++i) { distances[i] = (int*)(distances + num_cities + 1) + (i * num_cities); through[i] = (int*)(through + num_cities + 1) + (i * num_cities); } // All cities should have a infinite distance between them, we can // represent this with the INT_MAX constant for (int i = 0; i < num_cities; ++i) { for (int j = 0; j < num_cities; ++j) { distances[i][j] = INT_MAX; through[i][j] = -1; } } // All cities have a 0 distance between their selves for (int i = 0; i < num_cities; distances[i][i] = 0, ++i); // Fill in the edges that we know from the data int total; for (total = 0; 1; ++total) { int city_a = 0, city_b = 0, dist = 0; fscanf(data_file, "%d %d %d", &city_a, &city_b, &dist); fgets(line, DATA_LINE_MAX_LEN, data_file); if (city_a == -1) break; // The cities are _NOT_ zero indexed in the data file distances[city_a - 1][city_b - 1] = dist; distances[city_b - 1][city_a - 1] = dist; } printf(" Read in %d connecting roads with distances...\n", total); fclose(data_file); } // The number of threads to use is the first argument if (argc > 1) { num_threads = atoi(argv[1]); } if (num_cities % num_threads != 0) { printf("\e[0;31m==> %d threads is not evenly divisible into %d cities\n", num_threads, num_cities); exit(1); } printf("\e[0;34m==>\e[0m Starting up %d threads to calculate shortest paths...\n", num_threads); struct timeval time_start; struct timeval time_end; gettimeofday(&time_start, NULL); #pragma omp parallel num_threads(num_threads) calculate_shortest_paths(); gettimeofday(&time_end, NULL); // Calculate how long it took to find the shortest paths long long execution_time = 1000000LL * (time_end.tv_sec - time_start.tv_sec) + (time_end.tv_usec - time_start.tv_usec); printf("\e[0;34m==>\e[0m Finished calculating shortest paths in %lldµ seconds.\n\n", execution_time); // Display the menu puts("NQMQ Menu"); puts("---------"); puts(""); // Display all cities with numbers for (int i = 0; i < num_cities; ++i) { printf("%2d. %s\n", i + 1, cities[i]); } int start, end; printf("\nPath from: "); scanf("%d", &start); printf("Path to: "); scanf("%d", &end); // These need to be zero indexed --start; --end; printf("\n\e[0;36m==>\e[0m %s to %s:\n\n", cities[start], cities[end]); if (distances[start][end] == INT_MAX) { printf("\e[0;31m==> No path available between these cities"); exit(1); } print_path_directions(start, end); printf("\n\e[0;32m==>\e[0m Total Distance: %d miles\n", distances[start][end]); // Free up the cities array and distances array for (int i = 0; i < num_cities; free(cities[i++])); free(cities); free(distances); return 0; }
ga.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <mpi.h> #include <omp.h> #include "../include/imagen.h" #include "../include/ga.h" #include "../include/derivados_mpi.h" #define PRINT 1 static int aleatorio(int max) { return (rand() % (max + 1)); } void init_imagen_aleatoria(RGB *imagen, int max, int total) { for (int i = 0; i < total; i++) { imagen[i].r = aleatorio(max); imagen[i].g = aleatorio(max); imagen[i].b = aleatorio(max); } } RGB *imagen_aleatoria(int max, int total) { RGB *imagen = (RGB *)malloc(total * sizeof(RGB)); assert(imagen); init_imagen_aleatoria(imagen, max, total); return imagen; } static int comp_fitness(const void *a, const void *b) { /* qsort pasa un puntero al elemento que está ordenando */ return (*(Individuo *)a).fitness - (*(Individuo *)b).fitness; } void crear_imagen(const RGB *imagen_objetivo, int num_pixels, int ancho, int alto, int max, int num_generaciones, int tam_poblacion, RGB *imagen_resultado, const char *output_file) { double initial_time_fitness = 0; double final_time_fitness = 0; double total_time_fitness = 0; double fitness_anterior = 0, fitness_actual, diferencia_fitness; int rank, world_size; Individuo *poblacion = NULL; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &world_size); if (rank == 0) { poblacion = malloc(tam_poblacion * sizeof(Individuo)); assert(poblacion); /* Todos los nodos calculan su parte */ for (int i = 0; i < tam_poblacion; i++) { init_imagen_aleatoria(poblacion[i].imagen, max, num_pixels); } for (int i = 0; i < tam_poblacion; i++) { fitness(imagen_objetivo, &poblacion[i], num_pixels); } qsort(poblacion, tam_poblacion, sizeof(Individuo), comp_fitness); //final_time_fitness = MPI_Wtime(); //total_time_fitness += final_time_fitness - initial_time_fitness; // Ordenar individuos según la función de bondad (menor "fitness" --> más aptos) } MPI_Datatype rgb_type; MPI_Datatype individuo_type; crear_tipo_datos(num_pixels, &rgb_type, &individuo_type); int chunkSize = tam_poblacion / world_size; int leftover = tam_poblacion % world_size; Individuo *poblacionLocal = malloc(chunkSize * sizeof(Individuo)); MPI_Scatter(&poblacion[leftover], chunkSize, individuo_type, poblacionLocal, chunkSize, individuo_type, 0, MPI_COMM_WORLD); // B. Evolucionar la Población (durante un número de generaciones) for (int g = 0; g < num_generaciones; g++) { if(rank == 0) fitness_anterior = poblacion[0].fitness; int cruzarChunkSize = chunkSize/2; int cruzarLeftover = leftover/2; // Promocionar a los descendientes de los individuos más aptos if (rank == 0) { for (int i = 0; i < cruzarLeftover; i+=2) { cruzar(&poblacion[i], &poblacion[i+1], &poblacion[cruzarLeftover/2+i], &poblacion[cruzarLeftover/2+i+1], num_pixels); } } for (int i = 0; i < cruzarChunkSize; i+=2) { cruzar(&poblacionLocal[i], &poblacionLocal[i + 1], &poblacionLocal[cruzarChunkSize/2 + i], &poblacionLocal[cruzarChunkSize/2 + i + 1], num_pixels); } // Mutar una parte de la individuos de la población (se decide que muten tam_poblacion/4) int mutation_start = (tam_poblacion / 4) / world_size; if(rank == 0) { for(int i = leftover/4; i< leftover; i++){ mutar(&poblacion[i], max, num_pixels); } } for (int i = mutation_start; i < chunkSize; i++) { mutar(&poblacionLocal[i], max, num_pixels); } if (rank == 0) { initial_time_fitness = MPI_Wtime(); for (int i = 0; i < leftover; i++) { fitness(imagen_objetivo, &poblacion[i], num_pixels); } } /* Todos los nodos calculan su parte */ for (int i = 0; i < chunkSize; i++) { fitness(imagen_objetivo, &poblacionLocal[i], num_pixels); } // Cada 10 iteraciones recuperamos subpoblaciones, ordenamos y volvemos a distribuir if((g % 10 == 0 && g) || g == num_generaciones - 1){ MPI_Gather(poblacionLocal, chunkSize, individuo_type, &poblacion[leftover], chunkSize, individuo_type, 0, MPI_COMM_WORLD); // Cambiar el tipo de MPI por el derivado if (rank == 0) { qsort(poblacion, tam_poblacion, sizeof(Individuo), comp_fitness); final_time_fitness = MPI_Wtime(); total_time_fitness += final_time_fitness - initial_time_fitness; fitness_actual = poblacion[0].fitness; diferencia_fitness = -(fitness_actual - fitness_anterior) / fitness_actual * 100; if (PRINT) { printf("Generacion %d - ", g); printf("Fitness = %e - ", fitness_actual); printf("Diferencia con Fitness Anterior = %.2e%c\n", diferencia_fitness, 37); } } MPI_Scatter(&poblacion[leftover], chunkSize, individuo_type, poblacionLocal, chunkSize, individuo_type, 0, MPI_COMM_WORLD); } qsort(poblacionLocal, chunkSize, sizeof(Individuo), comp_fitness); } // Devuelve Imagen Resultante if (rank == 0) { //qsort(poblacion, tam_poblacion, sizeof(Individuo), comp_fitness); printf("Tiempo invertido en cálculo fitness: %f\n", total_time_fitness); memmove(imagen_resultado, poblacion[0].imagen, num_pixels * sizeof(RGB)); printf("Imagen movida\n"); // Release memory } if (rank == 0) free(poblacion); free(poblacionLocal); } void cruzar(Individuo *padre1, Individuo *padre2, Individuo *hijo1, Individuo *hijo2, int num_pixels) { // Elegir un "punto" de corte aleatorio a partir del cual se realiza el intercambio de los genes. // * Cruzar los genes de cada padre con su hijo // * Intercambiar los genes de cada hijo con los del otro padre int corte = aleatorio(num_pixels - 1); #pragma omp parallel { #pragma omp for for (int i = 0; i < corte; i++) { hijo1->imagen[i] = padre1->imagen[i]; hijo2->imagen[i] = padre2->imagen[i]; } #pragma omp for for (int i = corte; i < num_pixels; i++) { hijo1->imagen[i] = padre2->imagen[i]; hijo2->imagen[i] = padre1->imagen[i]; } } } void fitness(const RGB *objetivo, Individuo *individuo, int num_pixels) { // Determina la calidad del individuo (similitud con el objetivo) // calculando la suma de la distancia existente entre los pixeles double fitness = 0; #pragma omp parallel for reduction(+:fitness) for (int i = 0; i < num_pixels; i++) { fitness += abs(objetivo[i].r - individuo->imagen[i].r) + abs(objetivo[i].g - individuo->imagen[i].g) + abs(objetivo[i].b - individuo->imagen[i].b); } individuo->fitness = fitness; } void mutar(Individuo *actual, int max, int num_pixels) { // Cambia el valor de algunos puntos de la imagen de forma aleatoria. // Decidir cuantos pixels mutar. Si el valor es demasiado pequeño, // la convergencia es muy pequeña, y si es demasiado alto diverge. double ratioMutacion = 0.002; int numMutar = (int)num_pixels * ratioMutacion; for (int i = 0; i < numMutar; i++) { int index = aleatorio(num_pixels - 1); actual->imagen[index].r = aleatorio(max); actual->imagen[index].g = aleatorio(max); actual->imagen[index].b = aleatorio(max); } }
nr_numint.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <[email protected]> */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include "cint.h" #include "gto/grid_ao_drv.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" #include <assert.h> #define BOXSIZE 56 int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice, int *ao_loc) { if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) { return 0; } const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; int bas_id; int box_id = 0; int bound = BOXSIZE; int has0 = 0; empty[box_id] = 1; for (bas_id = sh0; bas_id < sh1; bas_id++) { empty[box_id] &= !non0table[bas_id]; if (ao_loc[bas_id] == bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = 1; } else if (ao_loc[bas_id] > bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = !non0table[bas_id]; } } return has0; } static void dot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int bgrids, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; double beta = 0; if (has0) { int box_id, blen, i, j; size_t b0; for (box_id = 0; box_id < nbox; box_id++) { if (!empty[box_id]) { b0 = box_id * BOXSIZE; blen = MIN(nao-b0, BOXSIZE); dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen, &D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc, &beta, vm, &ngrids); beta = 1.0; } } if (beta == 0) { // all empty for (i = 0; i < nocc; i++) { for (j = 0; j < bgrids; j++) { vm[i*ngrids+j] = 0; } } } } else { dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao, &D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids); } } /* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */ void VXCdot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int nbas, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel default(none) \ shared(vm, ao, dm, nao, nocc, ngrids, nbas, \ non0table, shls_slice, ao_loc) { int ip, ib; #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_dm(vm+ip, ao+ip, dm, nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE), non0table+ib*nbas, shls_slice, ao_loc); } } } /* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */ static void dot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int bgrids, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; if (has0) { int ib, jb, leni, lenj; int j1 = nbox; size_t b0i, b0j; for (ib = 0; ib < nbox; ib++) { if (!empty[ib]) { b0i = ib * BOXSIZE; leni = MIN(nao-b0i, BOXSIZE); if (hermi) { j1 = ib + 1; } for (jb = 0; jb < j1; jb++) { if (!empty[jb]) { b0j = jb * BOXSIZE; lenj = MIN(nao-b0j, BOXSIZE); dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1, ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids, &D1, vv+b0i*nao+b0j, &nao); } } } } } else { dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids, &D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao); } } /* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */ void VXCdot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int nbas, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; memset(vv, 0, sizeof(double) * nao * nao); #pragma omp parallel default(none) \ shared(vv, ao1, ao2, nao, ngrids, nbas, hermi, \ non0table, shls_slice, ao_loc) { int ip, ib; double *v_priv = calloc(nao*nao+2, sizeof(double)); #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_ao(v_priv, ao1+ip, ao2+ip, nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi, non0table+ib*nbas, shls_slice, ao_loc); } #pragma omp critical { for (ip = 0; ip < nao*nao; ip++) { vv[ip] += v_priv[ip]; } } free(v_priv); } if (hermi != 0) { NPdsymm_triu(nao, vv, hermi); } }
nco_rgr.c
/* $Header$ */ /* Purpose: NCO regridding utilities */ /* Copyright (C) 2015--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License with exceptions described in the LICENSE file */ #include "nco_rgr.h" /* Regridding */ int /* O [enm] Return code */ nco_rgr_ctl /* [fnc] Control regridding logic */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Control regridding logic */ int rcd=NCO_NOERR; const char fnc_nm[]="nco_rgr_ctl()"; nco_bool flg_grd=False; /* [flg] Create SCRIP-format grid file */ nco_bool flg_map=False; /* [flg] Create ESMF-format mapfile */ nco_bool flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ nco_bool flg_smf=False; /* [flg] ESMF regridding (unused) */ nco_bool flg_tps=False; /* [flg] Tempest regridding (unused) */ nco_bool flg_vrt=False; /* [flg] Interpolate to new vertical grid */ nco_bool flg_wgt=False; /* [flg] Regrid with external weights */ /* Main control branching occurs here Branching complexity and utility will increase as regridding features are added */ if(rgr->flg_grd) flg_grd=True; if(rgr->flg_grd_src && rgr->flg_grd_dst && rgr->flg_wgt) flg_map=True; if(rgr->flg_nfr) flg_nfr=True; if(rgr->flg_wgt && !(rgr->flg_grd_src && rgr->flg_grd_dst)) flg_wgt=True; if(rgr->fl_vrt) flg_vrt=True; assert(!flg_smf); assert(!flg_tps); /* Create SCRIP-format grid file */ if(flg_grd) rcd=nco_grd_mk(rgr); /* Create ESMF-format map file */ if(flg_map) rcd=nco_map_mk(rgr); /* Infer SCRIP-format grid file from data file */ if(flg_nfr) rcd=nco_grd_nfr(rgr); /* Interpolate data file to new vertical grid */ if(flg_vrt) rcd=nco_ntp_vrt(rgr,trv_tbl); /* Regrid data horizontally using weights from mapping file */ if(flg_wgt) rcd=nco_rgr_wgt(rgr,trv_tbl); /* Regrid using ESMF library 20150701: On-line weight generation with ESMF never worked well and was abandoned */ if(flg_smf){ #ifdef ENABLE_ESMF (void)fprintf(stderr,"%s: %s calling nco_rgr_esmf() to generate and apply regridding map\n",nco_prg_nm_get(),fnc_nm); rcd=nco_rgr_esmf(rgr); /* Close output and free dynamic memory */ (void)nco_fl_out_cls(rgr->fl_out,rgr->fl_out_tmp,rgr->out_id); #else /* !ENABLE_ESMF */ (void)fprintf(stderr,"%s: ERROR %s reports attempt to use ESMF regridding without built-in support. Re-configure with --enable_esmf.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); #endif /* !ENABLE_ESMF */ } /* !flg_smf */ /* Regrid using TempestRemap regridding 20180314: Weight generation with Tempest is implemented off-line via ncremap, not internally on-line However, do not deprecate this since TempestRemap2 has a library that could be accessed on-line */ if(flg_tps) rcd=nco_rgr_tps(rgr); return rcd; } /* end nco_rgr_ctl() */ rgr_sct * /* O [sct] Pointer to free'd regridding structure */ nco_rgr_free /* [fnc] Deallocate regridding structure */ (rgr_sct *rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Free all dynamic memory in regridding structure */ /* free() standalone command-line arguments */ if(rgr->cmd_ln) rgr->cmd_ln=(char *)nco_free(rgr->cmd_ln); if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); if(rgr->fl_grd_src) rgr->fl_grd_src=(char *)nco_free(rgr->fl_grd_src); if(rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)nco_free(rgr->fl_grd_dst); if(rgr->fl_in) rgr->fl_in=(char *)nco_free(rgr->fl_in); if(rgr->fl_map) rgr->fl_map=(char *)nco_free(rgr->fl_map); if(rgr->fl_msh) rgr->fl_msh=(char *)nco_free(rgr->fl_msh); if(rgr->fl_out) rgr->fl_out=(char *)nco_free(rgr->fl_out); if(rgr->fl_out_tmp) rgr->fl_out_tmp=(char *)nco_free(rgr->fl_out_tmp); if(rgr->fl_vrt) rgr->fl_vrt=(char *)nco_free(rgr->fl_vrt); if(rgr->var_nm) rgr->var_nm=(char *)nco_free(rgr->var_nm); if(rgr->xtn_var) rgr->xtn_var=(char **)nco_sng_lst_free(rgr->xtn_var,rgr->xtn_nbr); /* free() strings associated with grid properties */ if(rgr->fl_grd) rgr->fl_grd=(char *)nco_free(rgr->fl_grd); if(rgr->fl_hnt_dst) rgr->fl_hnt_dst=(char *)nco_free(rgr->fl_hnt_dst); if(rgr->fl_hnt_src) rgr->fl_hnt_src=(char *)nco_free(rgr->fl_hnt_src); if(rgr->fl_skl) rgr->fl_skl=(char *)nco_free(rgr->fl_skl); if(rgr->fl_ugrid) rgr->fl_ugrid=(char *)nco_free(rgr->fl_ugrid); /* Tempest */ if(rgr->drc_tps) rgr->drc_tps=(char *)nco_free(rgr->drc_tps); /* free() memory used to construct KVMs */ if(rgr->rgr_nbr > 0) rgr->rgr_arg=nco_sng_lst_free(rgr->rgr_arg,rgr->rgr_nbr); /* free() memory copied from KVMs */ if(rgr->area_nm) rgr->area_nm=(char *)nco_free(rgr->area_nm); if(rgr->bnd_nm) rgr->bnd_nm=(char *)nco_free(rgr->bnd_nm); if(rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)nco_free(rgr->bnd_tm_nm); if(rgr->col_nm_in) rgr->col_nm_in=(char *)nco_free(rgr->col_nm_in); if(rgr->col_nm_out) rgr->col_nm_out=(char *)nco_free(rgr->col_nm_out); if(rgr->frc_nm) rgr->frc_nm=(char *)nco_free(rgr->frc_nm); if(rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)nco_free(rgr->ilev_nm_in); if(rgr->ilev_nm_out) rgr->ilev_nm_out=(char *)nco_free(rgr->ilev_nm_out); if(rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)nco_free(rgr->lat_bnd_nm); if(rgr->lat_nm_in) rgr->lat_nm_in=(char *)nco_free(rgr->lat_nm_in); if(rgr->lat_nm_out) rgr->lat_nm_out=(char *)nco_free(rgr->lat_nm_out); if(rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)nco_free(rgr->lat_vrt_nm); if(rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)nco_free(rgr->lat_wgt_nm); if(rgr->lev_nm_in) rgr->lev_nm_in=(char *)nco_free(rgr->lev_nm_in); if(rgr->lev_nm_out) rgr->lev_nm_out=(char *)nco_free(rgr->lev_nm_out); if(rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)nco_free(rgr->lon_bnd_nm); if(rgr->lon_nm_in) rgr->lon_nm_in=(char *)nco_free(rgr->lon_nm_in); if(rgr->lon_nm_out) rgr->lon_nm_out=(char *)nco_free(rgr->lon_nm_out); if(rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)nco_free(rgr->lon_vrt_nm); if(rgr->msk_nm) rgr->msk_nm=(char *)nco_free(rgr->msk_nm); if(rgr->plev_nm_in) rgr->plev_nm_in=(char *)nco_free(rgr->plev_nm_in); if(rgr->vrt_nm) rgr->vrt_nm=(char *)nco_free(rgr->vrt_nm); /* Lastly, free() regrid structure itself */ if(rgr) rgr=(rgr_sct *)nco_free(rgr); return rgr; } /* end nco_rgr_free() */ rgr_sct * /* O [sct] Regridding structure */ nco_rgr_ini /* [fnc] Initialize regridding structure */ (const char * const cmd_ln, /* I [sng] Command-line */ const int in_id, /* I [id] Input netCDF file ID */ char **rgr_arg, /* [sng] Regridding arguments */ const int rgr_arg_nbr, /* [nbr] Number of regridding arguments */ char * const rgr_in, /* I [sng] File containing fields to be regridded */ char * const rgr_out, /* I [sng] File containing regridded fields */ char * const rgr_grd_src, /* I [sng] File containing input grid */ char * const rgr_grd_dst, /* I [sng] File containing destination grid */ char * const rgr_map, /* I [sng] File containing mapping weights from source to destination grid */ char * const rgr_var, /* I [sng] Variable for special regridding treatment */ char * const rgr_vrt, /* I [sng] File containing vertical coordinate grid */ const double wgt_vld_thr, /* I [frc] Weight threshold for valid destination value */ char **xtn_var, /* [sng] I Extensive variables */ const int xtn_nbr) /* [nbr] I Number of extensive variables */ { /* Purpose: Initialize regridding structure */ const char fnc_nm[]="nco_rgr_ini()"; rgr_sct *rgr; /* Allocate */ rgr=(rgr_sct *)nco_malloc(sizeof(rgr_sct)); /* Initialize variables directly or indirectly set via command-line (except for key-value arguments) */ rgr->cmd_ln=strdup(cmd_ln); /* [sng] Command-line */ rgr->flg_usr_rqs=False; /* [flg] User requested regridding */ rgr->out_id=int_CEWI; /* [id] Output netCDF file ID */ rgr->in_id=in_id; /* [id] Input netCDF file ID */ rgr->rgr_arg=rgr_arg; /* [sng] Regridding arguments */ rgr->rgr_nbr=rgr_arg_nbr; /* [nbr] Number of regridding arguments */ rgr->drc_tps=NULL; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ rgr->flg_grd_src= rgr_grd_src ? True : False; /* [flg] User-specified input grid */ rgr->fl_grd_src=rgr_grd_src; /* [sng] File containing input grid */ rgr->flg_grd_dst= rgr_grd_dst ? True : False; /* [flg] User-specified destination grid */ rgr->fl_grd_dst=rgr_grd_dst; /* [sng] File containing destination grid */ rgr->fl_in=rgr_in; /* [sng] File containing fields to be regridded */ rgr->fl_out=rgr_out; /* [sng] File containing regridded fields */ rgr->fl_out_tmp=NULL_CEWI; /* [sng] Temporary file containing regridded fields */ rgr->flg_wgt= rgr_map ? True : False; /* [flg] User-specified mapping weights */ rgr->fl_map=rgr_map; /* [sng] File containing mapping weights from source to destination grid */ rgr->fl_vrt=rgr_vrt; /* [sng] [sng] File containing vertical coordinate grid */ rgr->var_nm=rgr_var; /* [sng] Variable for special regridding treatment */ rgr->xtn_var=xtn_var; /* [sng] Extensive variables */ rgr->xtn_nbr=xtn_nbr; /* [nbr] Number of extensive variables */ /* Did user explicitly request regridding? */ if(rgr_arg_nbr > 0 || rgr_grd_src != NULL || rgr_grd_dst != NULL || rgr_map != NULL || rgr_vrt != NULL) rgr->flg_usr_rqs=True; /* Initialize arguments after copying */ if(!rgr->fl_out) rgr->fl_out=(char *)strdup("/data/zender/rgr/rgr_out.nc"); if(!rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)strdup("/data/zender/scrip/grids/remap_grid_T42.nc"); // if(!rgr->var_nm) rgr->var_nm=(char *)strdup("ORO"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"flg_usr_rqs = %d, ",rgr->flg_usr_rqs); (void)fprintf(stderr,"rgr_nbr = %d, ",rgr->rgr_nbr); (void)fprintf(stderr,"fl_grd_src = %s, ",rgr->fl_grd_src ? rgr->fl_grd_src : "NULL"); (void)fprintf(stderr,"fl_grd_dst = %s, ",rgr->fl_grd_dst ? rgr->fl_grd_dst : "NULL"); (void)fprintf(stderr,"fl_in = %s, ",rgr->fl_in ? rgr->fl_in : "NULL"); (void)fprintf(stderr,"fl_out = %s, ",rgr->fl_out ? rgr->fl_out : "NULL"); (void)fprintf(stderr,"fl_out_tmp = %s, ",rgr->fl_out_tmp ? rgr->fl_out_tmp : "NULL"); (void)fprintf(stderr,"fl_map = %s, ",rgr->fl_map ? rgr->fl_map : "NULL"); (void)fprintf(stderr,"fl_vrt = %s, ",rgr->fl_vrt ? rgr->fl_vrt : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Flags */ if(wgt_vld_thr == NC_MIN_DOUBLE){ rgr->flg_rnr=False; }else if(wgt_vld_thr >= 0.0 && wgt_vld_thr <= 1.0){ /* NB: Weight thresholds of 0.0 or nearly zero can lead to underflow or divide-by-zero errors */ // const double wgt_vld_thr_min=1.0e-10; /* [frc] Minimum weight threshold for valid destination value */ rgr->flg_rnr=True; rgr->wgt_vld_thr=wgt_vld_thr; }else{ (void)fprintf(stderr,"%s: ERROR weight threshold must be in [0.0,1.0] and user supplied wgt_vld_thr = %g\n",nco_prg_nm_get(),wgt_vld_thr); nco_exit(EXIT_FAILURE); } /* endif */ /* Parse extended kvm options */ char *sng_fnl=NULL; int cnv_nbr; /* [nbr] Number of elements converted by sscanf() */ int rgr_var_idx; /* [idx] Index over rgr_lst (i.e., all names explicitly specified in all "--rgr var1[,var2]=val" options) */ int rgr_var_nbr=0; kvm_sct *rgr_lst=NULL; /* [sct] List of all regrid specifications */ if(rgr_arg_nbr > 0){ /* Join arguments together */ sng_fnl=nco_join_sng(rgr_arg,rgr_arg_nbr); rgr_lst=nco_arg_mlt_prs(sng_fnl); if(sng_fnl) sng_fnl=(char *)nco_free(sng_fnl); /* Count number of keys */ for(rgr_var_idx=0;(rgr_lst+rgr_var_idx)->key;rgr_var_idx++,rgr_var_nbr++);/* !rgr_var_idx */ } /* !rgr_arg_nbr */ /* NULL-initialize key-value properties required for string variables */ rgr->area_nm=NULL; /* [sng] Name of variable containing gridcell area */ rgr->bnd_nm=NULL; /* [sng] Name of dimension to employ for spatial bounds */ rgr->bnd_tm_nm=NULL; /* [sng] Name of dimension to employ for temporal bounds */ rgr->col_nm_in=NULL; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ rgr->col_nm_out=NULL; /* [sng] Name of horizontal spatial output dimension on unstructured grid */ rgr->frc_nm=NULL; /* [sng] Name of variable containing gridcell fraction */ rgr->ilev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ rgr->ilev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer interfaces */ rgr->lat_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for latitude */ rgr->lat_dmn_nm=NULL; /* [sng] Name of latitude dimension in inferred grid */ rgr->lat_nm_in=NULL; /* [sng] Name of input dimension to recognize as latitude */ rgr->lat_nm_out=NULL; /* [sng] Name of output dimension for latitude */ rgr->lat_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for latitude */ rgr->lat_wgt_nm=NULL; /* [sng] Name of variable containing latitude weights */ rgr->lev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ rgr->lev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer midpoints */ rgr->lon_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for longitude */ rgr->lon_dmn_nm=NULL; /* [sng] Name of longitude dimension in inferred grid */ rgr->lon_nm_in=NULL; /* [sng] Name of dimension to recognize as longitude */ rgr->lon_nm_out=NULL; /* [sng] Name of output dimension for longitude */ rgr->lon_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for longitude */ rgr->msk_nm=NULL; /* [sng] Name of variable containing destination mask */ rgr->plev_nm_in=NULL; /* [sng] Name of input variable recognize as pure-pressure coordinate */ rgr->sgs_frc_nm=NULL; /* [sng] Name of variable sub-gridscale fraction */ rgr->sgs_msk_nm=NULL; /* [sng] Name of variable sub-gridscale mask */ rgr->vrt_nm=NULL; /* [sng] Name of dimension to employ for vertices */ /* Initialize key-value properties used in grid generation */ rgr->edg_typ=nco_edg_nil; /* [enm] Edge/Arc-type for triangle edges */ rgr->fl_grd=NULL; /* [sng] Name of SCRIP grid file to create */ rgr->fl_hnt_dst=NULL; /* [sng] ERWG hint destination */ rgr->fl_hnt_src=NULL; /* [sng] ERWG hint source */ rgr->fl_msh=NULL; /* [sng] Name of SCRIP intersection mesh file to create */ rgr->fl_skl=NULL; /* [sng] Name of skeleton data file to create */ rgr->fl_ugrid=NULL; /* [sng] Name of UGRID grid file to create */ rgr->flg_area_out=True; /* [flg] Add area to output */ rgr->flg_cf_units=False; /* [flg] Generate CF-compliant (breaks ERWG 7.1.0r-) units fields in SCRIP-format grid files */ rgr->flg_cll_msr=True; /* [flg] Add cell_measures attribute */ rgr->flg_crv=False; /* [flg] Use curvilinear coordinates */ rgr->flg_dgn_area=False; /* [flg] Diagnose rather than copy inferred area */ rgr->flg_dgn_bnd=False; /* [flg] Diagnose rather than copy inferred bounds */ rgr->flg_erwg_units=True; /* [flg] Generate ERWG 7.1.0r-compliant SCRIP-format grid files */ rgr->flg_grd=False; /* [flg] Create SCRIP-format grid file */ rgr->flg_msk_out=False; /* [flg] Add mask to output */ rgr->flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ rgr->flg_stg=True; /* [flg] Write staggered grid with FV output */ rgr->grd_ttl=strdup("None given (supply with --rgr grd_ttl=\"Grid Title\")"); /* [enm] Grid title */ rgr->grd_typ=nco_grd_2D_eqa; /* [enm] Grid type */ rgr->idx_dbg=0; /* [idx] Index of gridcell for debugging */ rgr->lat_drc=nco_grd_lat_drc_s2n; /* [enm] Latitude grid direction */ rgr->lat_typ=nco_grd_lat_eqa; /* [enm] Latitude grid type */ rgr->lon_typ=nco_grd_lon_Grn_ctr; /* [enm] Longitude grid type */ rgr->lat_nbr=180; /* [nbr] Number of latitudes in destination grid */ rgr->lon_nbr=360; /* [nbr] Number of longitudes in destination grid */ rgr->lat_crv=0.0; /* [dgr] Latitudinal curvilinearity */ rgr->lon_crv=0.0; /* [dgr] Longitudinal curvilinearity */ rgr->lat_sth=NC_MAX_DOUBLE; /* [dgr] Latitude of southern edge of grid */ rgr->lon_wst=NC_MAX_DOUBLE; /* [dgr] Longitude of western edge of grid */ rgr->lat_nrt=NC_MAX_DOUBLE; /* [dgr] Latitude of northern edge of grid */ rgr->lon_est=NC_MAX_DOUBLE; /* [dgr] Longitude of eastern edge of grid */ rgr->msk_var=NULL; /* [sng] Mask-template variable */ rgr->ply_tri_mth=nco_ply_tri_mth_csz; /* [enm] Polygon-to-triangle decomposition method */ rgr->sgs_nrm=1.0; /* [sng] Sub-gridscale normalization */ rgr->tst=0L; /* [enm] Generic key for testing (undocumented) */ rgr->ntp_mth=nco_ntp_log; /* [enm] Interpolation method */ rgr->xtr_mth=nco_xtr_fll_ngh; /* [enm] Extrapolation method */ /* Parse key-value properties */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ for(rgr_var_idx=0;rgr_var_idx<rgr_var_nbr;rgr_var_idx++){ if(!strcmp(rgr_lst[rgr_var_idx].key,"grid") || !strcasecmp(rgr_lst[rgr_var_idx].key,"scrip")){ rgr->fl_grd=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !grid */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_dst") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_dst")){ rgr->fl_hnt_dst=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_dst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_src") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_src")){ rgr->fl_hnt_src=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_src */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_variable")){ rgr->msk_var=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_var */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msh") || !strcmp(rgr_lst[rgr_var_idx].key,"mesh")){ rgr->fl_msh=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !msh */ if(!strcmp(rgr_lst[rgr_var_idx].key,"skl")){ rgr->fl_skl=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !skl */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"ugrid")){ rgr->fl_ugrid=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_nfr=True; continue; } /* !ugrid */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_vrt") || !strcasecmp(rgr_lst[rgr_var_idx].key,"vrt")){ rgr->fl_vrt=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_area") || !strcmp(rgr_lst[rgr_var_idx].key,"no_area_out")){ rgr->flg_area_out=False; continue; } /* !area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_msk") || !strcmp(rgr_lst[rgr_var_idx].key,"no_msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask_out")){ rgr->flg_msk_out=False; continue; } /* !msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_out")){ rgr->flg_msk_out=True; continue; } /* !mask */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"cll_msr")){ rgr->flg_cll_msr=True; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"no_cll_msr")){ rgr->flg_cll_msr=False; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"curvilinear") || !strcmp(rgr_lst[rgr_var_idx].key,"crv")){ rgr->flg_crv=True; continue; } /* !curvilinear */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_area") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_area")){ rgr->flg_dgn_area=True; continue; } /* !diagnose_area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_bounds") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_bnd")){ rgr->flg_dgn_bnd=True; continue; } /* !diagnose_bounds */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"CF_units")){ rgr->flg_cf_units=True; rgr->flg_erwg_units=False; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"edg_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"tri_arc") || !strcmp(rgr_lst[rgr_var_idx].key,"vrt_cnc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"grt_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"gtc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"great_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"geodesic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"orthodrome")){ rgr->edg_typ=nco_edg_gtc; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"sml_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ltr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"small_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"latitude_triangle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"true")){ rgr->edg_typ=nco_edg_smc; (void)fprintf(stderr,"%s: WARNING Requested to run with small-circle edges. This option has not yet been tested and validated. Use only at your own risk.\n",nco_prg_nm_get()); }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"crt") || !strcasecmp(rgr_lst[rgr_var_idx].val,"cartesian") || !strcasecmp(rgr_lst[rgr_var_idx].val,"planar") || !strcasecmp(rgr_lst[rgr_var_idx].val,"flat")){ rgr->edg_typ=nco_edg_crt; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !edg_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"erwg_units") || !strcmp(rgr_lst[rgr_var_idx].key,"esmf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"degrees")){ rgr->flg_cf_units=False; rgr->flg_erwg_units=True; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"infer") || !strcmp(rgr_lst[rgr_var_idx].key,"nfr")){ rgr->flg_nfr=True; continue; } /* !infer */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_stagger") || !strcmp(rgr_lst[rgr_var_idx].key,"no_stg")){ rgr->flg_stg=False; continue; } /* !stagger */ if(!strcmp(rgr_lst[rgr_var_idx].key,"grd_ttl") || !strcmp(rgr_lst[rgr_var_idx].key,"ttl")){ if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); rgr->grd_ttl=(char *)strdup(rgr_lst[rgr_var_idx].val); /* 20180828 Replace unquoted tildes with spaces (like LaTeX, NCL) so ncremap users can put tildes in place of spaces in ttl 20180905 Reverted this since quoting command in ncremap is superior solution */ if(False){ size_t ttl_lng=strlen(rgr->grd_ttl); for(size_t ttl_idx=0L;ttl_idx<ttl_lng;ttl_idx++) if(rgr->grd_ttl[ttl_idx] == '~'){ if(ttl_idx == 0L) rgr->grd_ttl[ttl_idx]=' '; // Always convert tilde to space if first character else if(rgr->grd_ttl[ttl_idx-1L] != '\\') rgr->grd_ttl[ttl_idx]=' '; // Convert tilde in other locations unless backslash-quoted } /* !tilde */ } /* !0 */ continue; } /* !grd_ttl */ if(!strcmp(rgr_lst[rgr_var_idx].key,"idx_dbg")){ rgr->idx_dbg=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !idx_dbg */ if(!strcmp(rgr_lst[rgr_var_idx].key,"latlon")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lat_nbr,&rgr->lon_nbr); assert(cnv_nbr == 2); continue; } /* !latlon */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lonlat")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lon_nbr,&rgr->lat_nbr); assert(cnv_nbr == 2); continue; } /* !lonlat */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nbr")){ rgr->lat_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lat_nbr */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nbr")){ rgr->lon_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lon_nbr */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"snwe")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lat_sth,&rgr->lat_nrt,&rgr->lon_wst,&rgr->lon_est); if(cnv_nbr != 4) (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); assert(cnv_nbr == 4); if(cnv_nbr != 4) abort(); /* CEWI Use cnv_nbr at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ continue; } /* !snwe */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"wesn")){ if(cnv_nbr != 4) cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lon_wst,&rgr->lon_est,&rgr->lat_sth,&rgr->lat_nrt); assert(cnv_nbr == 4); continue; } /* !wesn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_crv")){ rgr->lat_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lat_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_crv")){ rgr->lon_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lon_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_sth")){ rgr->lat_sth=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); // rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_sth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_wst")){ rgr->lon_wst=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_wst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nrt")){ rgr->lat_nrt=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); //rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_nrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_est")){ rgr->lon_est=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_est */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_drc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"s2n") || !strcasecmp(rgr_lst[rgr_var_idx].val,"south2north") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ston") || !strcasecmp(rgr_lst[rgr_var_idx].val,"southnorth")){ rgr->lat_drc=nco_grd_lat_drc_s2n; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"n2s") || !strcasecmp(rgr_lst[rgr_var_idx].val,"north2south") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ntos") || !strcasecmp(rgr_lst[rgr_var_idx].val,"northsouth")){ rgr->lat_drc=nco_grd_lat_drc_n2s; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_drc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"cap") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fix") || !strcasecmp(rgr_lst[rgr_var_idx].val,"yarmulke")){ rgr->lat_typ=nco_grd_lat_fv; rgr->grd_typ=nco_grd_2D_fv; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"eqa") || !strcasecmp(rgr_lst[rgr_var_idx].val,"rgl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"unf") || !strcasecmp(rgr_lst[rgr_var_idx].val,"uni")){ rgr->lat_typ=nco_grd_lat_eqa; rgr->grd_typ=nco_grd_2D_eqa; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"gss")){ rgr->lat_typ=nco_grd_lat_gss; rgr->grd_typ=nco_grd_2D_gss; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_180")) rgr->lon_typ=nco_grd_lon_180_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_180")) rgr->lon_typ=nco_grd_lon_180_ctr; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_Grn")) rgr->lon_typ=nco_grd_lon_Grn_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_Grn")) rgr->lon_typ=nco_grd_lon_Grn_ctr; else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lon_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"area_nm")){ rgr->area_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !area_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_nm")){ rgr->bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_tm_nm")){ rgr->bnd_tm_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_tm_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"col_nm")){ rgr->col_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_out")){ rgr->col_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"frc_nm")){ rgr->frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !frc_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm")){ rgr->ilev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_out")){ rgr->ilev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_bnd_nm")){ rgr->lat_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn")){ rgr->lat_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_nm")){ rgr->lat_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_out")){ rgr->lat_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_vrt_nm")){ rgr->lat_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_wgt_nm")){ rgr->lat_wgt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_wgt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lev_nm")){ rgr->lev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_out")){ rgr->lev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_bnd_nm")){ rgr->lon_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn")){ rgr->lon_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_nm")){ rgr->lon_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_out")){ rgr->lon_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_vrt_nm")){ rgr->lon_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"plev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"plev_nm")){ rgr->plev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !plev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ply_tri")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"csz")){ rgr->ply_tri_mth=nco_ply_tri_mth_csz; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"centroid") || !strcasecmp(rgr_lst[rgr_var_idx].val,"snl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"mat")){ rgr->ply_tri_mth=nco_ply_tri_mth_ctr; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ply_tri */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_frc_nm")){ rgr->sgs_frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_frc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_msk_nm")){ rgr->sgs_msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_nrm")){ rgr->sgs_nrm=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !sgs_nrm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"tst")){ rgr->tst=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !tst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_nm")){ rgr->msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_nm")){ rgr->vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_ntp") || !strcmp(rgr_lst[rgr_var_idx].key,"ntp_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"lin") || !strcasecmp(rgr_lst[rgr_var_idx].val,"linear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lnr")){ rgr->ntp_mth=nco_ntp_lnr; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"log") || !strcasecmp(rgr_lst[rgr_var_idx].val,"logarithmic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lgr")){ rgr->ntp_mth=nco_ntp_log; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ntp_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_xtr") || !strcmp(rgr_lst[rgr_var_idx].key,"xtr_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"nrs_ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nearest_neighbor") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nn")){ rgr->xtr_mth=nco_xtr_fll_ngh; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"mss_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"msv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fll_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"missing_value")){ rgr->xtr_mth=nco_xtr_fll_msv; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !xtr_mth */ (void)fprintf(stderr,"%s: ERROR %s reports unrecognized key-value option to --rgr switch: %s\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key); nco_exit(EXIT_FAILURE); } /* end for */ /* Eliminate sticky wickets: Give nfr precedence over grd */ if(rgr->flg_nfr && rgr->flg_grd) rgr->flg_grd=False; /* Revert to defaults for any names not specified on command-line */ if(!rgr->area_nm) rgr->area_nm=(char *)strdup("area"); /* [sng] Name of variable containing gridcell area */ if(!rgr->bnd_nm) rgr->bnd_nm=(char *)strdup("nvertices"); /* [sng] Name of dimension to employ for spatial bounds */ /* NB: CESM uses nbnd and ilev for temporal and vertical bounds, respectively (CESM outputs no horizontal spatial bounds). NCO defaults to nbnd for all bounds with two endpoints. */ if(!rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)strdup("nbnd"); /* [sng] Name of dimension to employ for temporal bounds */ if(!rgr->col_nm_in) rgr->col_nm_in=(char *)strdup("ncol"); /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ if(!rgr->frc_nm) rgr->frc_nm=(char *)strdup("frac_b"); /* [sng] Name of variable containing gridcell fraction */ if(!rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)strdup("ilev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ if(!rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)strdup("lat_bnds"); /* [sng] Name of rectangular boundary variable for latitude */ if(!rgr->lat_nm_in) rgr->lat_nm_in=(char *)strdup("lat"); /* [sng] Name of input dimension to recognize as latitude */ if(!rgr->lev_nm_in) rgr->lev_nm_in=(char *)strdup("lev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ if(!rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)strdup("lat_vertices"); /* [sng] Name of non-rectangular boundary variable for latitude */ if(!rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)strdup("gw"); /* [sng] Name of variable containing latitude weights */ if(!rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)strdup("lon_bnds"); /* [sng] Name of rectangular boundary variable for longitude */ if(!rgr->lon_nm_in) rgr->lon_nm_in=(char *)strdup("lon"); /* [sng] Name of dimension to recognize as longitude */ if(!rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)strdup("lon_vertices"); /* [sng] Name of non-rectangular boundary variable for longitude */ if(!rgr->msk_nm) rgr->msk_nm=(char *)strdup("mask"); /* [sng] Name of variable containing destination mask */ if(!rgr->vrt_nm) rgr->vrt_nm=(char *)strdup("nv"); /* [sng] Name of dimension to employ for vertices */ if(!rgr->plev_nm_in) rgr->plev_nm_in=(char *)strdup("plev"); /* [sng] Name of variable to recognize as pure pressure coordinate */ /* Derived from defaults and command-line arguments */ // On second thought, do not strdup() these here. This way, NULL means user never specified lon/lat-out names // if(!rgr->col_nm_out) rgr->col_nm_out=(char *)strdup("ncol"); /* [sng] Name of dimension to output as horizontal spatial dimension on unstructured grid */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup("lat"); /* [sng] Name of dimension to output as latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup("lon"); /* [sng] Name of dimension to output as longitude */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup(rgr_lat_nm_in); /* [sng] Name of output dimension for latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup(rgr_lon_nm_in); /* [sng] Name of output dimension for longitude */ /* Free kvms */ if(rgr_lst) rgr_lst=nco_kvm_lst_free(rgr_lst,rgr_var_nbr); return rgr; } /* end nco_rgr_ini() */ int /* O [enm] Return code */ nco_ntp_vrt /* [fnc] Interpolate vertically */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Interpolate fields to new vertical grid specified in a vertical file */ const char fnc_nm[]="nco_ntp_vrt()"; /* [sng] Function name */ char *fl_tpl; char *fl_pth_lcl=NULL; int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int tpl_id; /* [id] Input netCDF file ID (for vertical grid template) */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int rec_idx; /* [idx] Record dimension index */ nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining vertical grid from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_vrt); /* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */ fl_tpl=(char *)strdup(rgr->fl_vrt); /* Make sure file is on local system and is readable or die trying */ fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id); /* Formula-terms for hybrid pressure vertical grid on unstructured CAM/EAM horizontal grid: prs_mdp[time,lev,col]=P0*hyam[lev] +PS[time,col]*hybm[lev] prs_ntf[time,lev,col]=P0*hyai[ilev]+PS[time,col]*hybi[ilev] */ /* Formula-terms for hybrid pressure vertical grid on ECMWF RLL horizontal grid: prs_mdp[time,lev,lat,lon]=hyam[lev] +exp(lnsp[time,lat,lon])*hybm[lev] prs_ntf[time,lev,lat,lon]=hyai[ilev]+exp(lnsp[time,lat,lon])*hybi[ilev] */ /* For simplicity and code re-use, all single-variable (not hybrid-variable) coordinate systems adopt "lev" semantics This includes pure pressure coordinates and eventually will include sigma, depth, and height coordinates Only hybrid coordinates will refer to the "ilev" levels and indices All single coordinate systems will refer to "lev" levels and indices */ int dpt_id; /* [id] Ocean depth ID */ int hyai_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer interfaces ID */ int hyam_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer midpoints ID */ int hybi_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer interfaces ID */ int hybm_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer midpoints ID */ int ilev_id=NC_MIN_INT; /* [id] Interface pressure ID */ int lev_id=NC_MIN_INT; /* [id] Midpoint pressure ID */ int p0_id=NC_MIN_INT; /* [id] Reference pressure ID */ int ps_id=NC_MIN_INT; /* [id] Surface pressure ID */ int plev_id; /* [id] Air pressure ID */ nco_bool flg_grd_hyb_cameam=False; /* [flg] Hybrid coordinate vertical grid uses CAM/EAM conventions */ nco_bool flg_grd_hyb_ecmwf=False; /* [flg] Hybrid coordinate vertical grid uses ECMWF conventions */ nco_bool flg_grd_in_dpt=False; /* [flg] Input depth coordinate vertical grid */ nco_bool flg_grd_in_hyb=False; /* [flg] Input hybrid coordinate vertical grid */ nco_bool flg_grd_in_prs=False; /* [flg] Input pressure coordinate vertical grid */ nco_bool flg_grd_out_dpt=False; /* [flg] Output depth coordinate vertical grid */ nco_bool flg_grd_out_hyb=False; /* [flg] Output hybrid coordinate vertical grid */ nco_bool flg_grd_out_prs=False; /* [flg] Output pressure coordinate vertical grid */ nco_bool flg_vrt_tm=False; /* [flg] Output depends on time-varying vertical grid */ nco_grd_vrt_typ_enm nco_vrt_grd_in=nco_vrt_grd_nil; /* [enm] Vertical grid type for input grid */ nco_grd_vrt_typ_enm nco_vrt_grd_out=nco_vrt_grd_nil; /* [enm] Vertical grid type for output grid */ nco_ntp_typ_enm ntp_mth=rgr->ntp_mth; /* [enm] Interpolation method */ nco_xtr_typ_enm xtr_mth=rgr->xtr_mth; /* [enm] Extrapolation method */ /* Determine output grid type */ if((rcd=nco_inq_varid_flg(tpl_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_hyb; /* EAM */ flg_grd_out_hyb=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"plev",&plev_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_prs; /* NCEP */ flg_grd_out_prs=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_dpt; /* MPAS */ flg_grd_out_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in vertical grid file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ensure vertical grid coordinate file contains a valid vertical grid coordinate\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ if(flg_grd_out_hyb){ rcd=nco_inq_varid(tpl_id,"hyai",&hyai_id); rcd=nco_inq_varid(tpl_id,"hyam",&hyam_id); rcd=nco_inq_varid(tpl_id,"hybi",&hybi_id); rcd=nco_inq_varid(tpl_id,"hybm",&hybm_id); rcd=nco_inq_varid(tpl_id,"P0",&p0_id); rcd=nco_inq_varid_flg(tpl_id,"ilev",&ilev_id); rcd=nco_inq_varid_flg(tpl_id,"lev",&lev_id); rcd=nco_inq_varid_flg(tpl_id,"PS",&ps_id); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_inq_varid(tpl_id,"plev",&lev_id); } /* !flg_grd_out_prs */ if(flg_grd_out_dpt){ rcd=nco_inq_varid(tpl_id,"depth",&lev_id); } /* !flg_grd_out_dpt */ const int hyai_id_tpl=hyai_id; /* [id] Hybrid A coefficient at layer interfaces ID */ const int hyam_id_tpl=hyam_id; /* [id] Hybrid A coefficient at layer midpoints ID */ const int hybi_id_tpl=hybi_id; /* [id] Hybrid B coefficient at layer interfaces ID */ const int hybm_id_tpl=hybm_id; /* [id] Hybrid B coefficient at layer midpoints ID */ const int p0_id_tpl=p0_id; /* [id] Reference pressure ID */ const int ilev_id_tpl=ilev_id; /* [id] Interface pressure ID */ const int lev_id_tpl=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_tpl=ps_id; /* [id] Surface pressure ID */ char *ilev_nm_in=NULL; /* [sng] Interface level name */ char *lev_nm_in; char *ilev_nm_out; char *lev_nm_out; char *plev_nm_in; /* [sng] Pure-pressure coordnate name */ char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ int *dmn_ids_in=NULL; /* [nbr] Input file dimension IDs */ int *dmn_ids_out=NULL; /* [nbr] Output file dimension IDs */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ int dmn_nbr_ps; /* [nbr] Number of dimensions in PS variable */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ int dmn_nbr_out; /* [nbr] Number of dimensions in output file */ int dmn_id_ilev_out=NC_MIN_INT; /* [id] Dimension ID for interface level in output file */ int dmn_id_lev_out=NC_MIN_INT; /* [id] Dimension ID for midpoint level in output file */ int dmn_id_ilev_in=NC_MIN_INT; /* [id] Dimension ID for interface level in file to be interpolated */ int dmn_id_lev_in=NC_MIN_INT; /* [id] Dimension ID for midpoint level in file to be interpolated */ int dmn_id_tm_in=NC_MIN_INT; /* [id] Dimension ID for time in file to be interpolated */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int dmn_idx_tm_in=NC_MIN_INT; /* [idx] Index of record coordinate in input hybrid coordinate PS field */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_srt=NULL; long ilev_nbr_in; long lev_nbr_in; long ilev_nbr_out; long lev_nbr_out; long tm_idx=0L; /* [idx] Current timestep */ long tm_nbr=1L; /* [idx] Number of timesteps in vertical grid */ long tm_nbr_in=1L; /* [nbr] Number of timesteps in input vertical grid definition */ long tm_nbr_out=1L; /* [nbr] Number of timesetps in output vertical grid definition */ size_t grd_idx; /* [idx] Gridcell index */ size_t grd_sz_in=1L; /* [nbr] Number of elements in single layer of input grid */ size_t grd_sz_out=1L; /* [nbr] Number of elements in single layer of output grid */ size_t idx_fst; /* [idx] Index-offset to current surface pressure timeslice */ if(flg_grd_out_hyb){ /* Interrogate hyai/hyam to obtain ilev/lev dimensions */ rcd=nco_inq_vardimid(tpl_id,hyai_id,&dmn_id_ilev_out); rcd=nco_inq_vardimid(tpl_id,hyam_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_ilev_out,&ilev_nbr_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_ilev_out,dmn_nm); ilev_nm_out=strdup(dmn_nm); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); lev_nm_out=strdup(dmn_nm); /* Interrogate PS, if any, for horizontal dimensions */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_varndims(tpl_id,ps_id,&dmn_nbr_ps); dmn_nbr_out=dmn_nbr_ps; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); dmn_srt=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); rcd=nco_inq_vardimid(tpl_id,ps_id,dmn_ids_out); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(tpl_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_out[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_out == 1) grd_sz_out*=dmn_cnt_out[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_out > 1 && dmn_cnt_out[dmn_idx] > 1L){ tm_nbr_out=dmn_cnt_out[dmn_idx]; if(tm_nbr_out > 1L) flg_vrt_tm=True; } /* tm_nbr_out > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); } /* !ps_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(tpl_id,lev_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); ilev_nbr_out=lev_nbr_out; } /* !flg_grd_out_prs */ double *hyai_out=NULL; /* [frc] Hybrid A coefficient at layer interfaces on output grid */ double *hyam_out=NULL; /* [frc] Hybrid A coefficient at layer midpoints on output grid */ double *hybi_out=NULL; /* [frc] Hybrid B coefficient at layer interfaces on output grid */ double *hybm_out=NULL; /* [frc] Hybrid B coefficient at layer midpoints on output grid */ double *ilev_out=NULL; /* [hPa] Interface pressure on output grid */ double *lev_out=NULL; /* [hPa] Midpoint pressure on output grid */ double *ps_out=NULL; /* [Pa] Surface pressure on output grid */ double *prs_mdp_out=NULL; /* [Pa] Midpoint pressure on output grid */ double *prs_ntf_out=NULL; /* [Pa] Interface pressure on output grid */ double p0_out; /* [Pa] Reference pressure on output grid */ long ilev_idx; /* [idx] Interface level index */ long lev_idx; /* [idx] Level index */ const nc_type crd_typ_out=NC_DOUBLE; nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ var_typ_rgr=NC_DOUBLE; /* NB: Perform interpolation in double precision */ if(flg_grd_out_hyb){ hyai_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hyam_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); hybi_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hybm_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); ilev_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,hyai_id,hyai_out,crd_typ_out); rcd=nco_get_var(tpl_id,hyam_id,hyam_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybi_id,hybi_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybm_id,hybm_out,crd_typ_out); rcd=nco_get_var(tpl_id,p0_id,&p0_out,crd_typ_out); if(ilev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ilev_id,ilev_out,crd_typ_out); }else{ /* p0 is in Pa but ilev traditionally given in hPa */ for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) ilev_out[ilev_idx]=p0_out*(hyai_out[ilev_idx]+hybi_out[ilev_idx])/100.0; } /* !ilev_id_tpl */ if(lev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); }else{ /* p0 is in Pa but lev traditionally given in hPa */ for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) lev_out[lev_idx]=p0_out*(hyam_out[lev_idx]+hybm_out[lev_idx])/100.0; } /* !ilev_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ /* For vertical interpolation (unlike horizontal regridding), the destination grid is known a priori Straightforward copy all variables and attributes that define grid from fl_tpl to output would work in theory, but would not allow dynamic identification and relabeling of names */ /* if(flg_grd_out_hyb){ const int vrt_grd_lst_nbr=8; const char *vrt_grd_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/PS"}; } if(flg_grd_out_prs){ const int vrt_grd_lst_nbr=1; const char *vrt_grd_lst[]={"/plev"}; } */ /* Above this line, fl_tpl and tpl_id refer to vertical coordinate file (i.e., template file) Below this line, fl_in and in_id refer to input file to be vertically regridded We do not close template file until after copying all grid variables For maximum efficiency, we do this just after defining all interpolated variables in output That way no file needs to exit define mode or enter data mode more than once However this requires keeping template file, input data file, and output file simulataneously open */ in_id=rgr->in_id; out_id=rgr->out_id; /* Determine input grid type */ if(rgr->plev_nm_in) plev_nm_in=rgr->plev_nm_in; if((rcd=nco_inq_varid_flg(in_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_hyb; /* EAM */ flg_grd_in_hyb=True; }else if((rcd=nco_inq_varid_flg(in_id,plev_nm_in,&plev_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_prs; /* NCEP */ flg_grd_in_prs=True; }else if((rcd=nco_inq_varid_flg(in_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_dpt; /* NCEP */ flg_grd_in_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in input file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT only invoke vertical interpolation on files that contain variables with vertical dimensions, and with known vertical coordinate variable names. These default to \"hyai\" for hybrid, \"plev\" for pressure, \"depth\" for depth. See http://nco.sf.net/nco.html#lev_nm for options to change these names at run-time, e.g., \"--rgr plev_nm=vrt_nm\"\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ /* Sanity checks: One type of input and one type of output grid detected */ assert(!(flg_grd_in_hyb && flg_grd_in_prs)); assert(!(flg_grd_in_hyb && flg_grd_in_dpt)); assert(!(flg_grd_in_prs && flg_grd_in_dpt)); assert(flg_grd_in_hyb || flg_grd_in_prs || flg_grd_in_dpt); assert(!(flg_grd_out_hyb && flg_grd_out_prs)); assert(!(flg_grd_out_hyb && flg_grd_out_dpt)); assert(!(flg_grd_out_prs && flg_grd_out_dpt)); assert(flg_grd_out_hyb || flg_grd_out_prs || flg_grd_out_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Input grid flags : flg_grd_in_hyb = %d, flg_grd_in_prs = %d, flg_grd_in_dpt = %d\n",nco_prg_nm_get(),flg_grd_in_hyb,flg_grd_in_prs,flg_grd_in_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Output grid flags: flg_grd_out_hyb = %d, flg_grd_out_prs = %d, flg_grd_out_dpt = %d\n",nco_prg_nm_get(),flg_grd_out_hyb,flg_grd_out_prs,flg_grd_out_dpt); /* 20191219: This block is not used, deprecate it? Or use once new coordinates like altitude, depth supported? */ nco_vrt_ntp_typ_enm nco_vrt_ntp_typ=nco_ntp_nil; /* Vertical interpolation type */ if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_hyb_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_hyb_to_prs; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_prs_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_prs_to_prs; assert(nco_vrt_ntp_typ != nco_ntp_nil); /* Variables on input grid, i.e., on grid in data file to be interpolated */ if(flg_grd_in_hyb){ rcd=nco_inq_varid(in_id,"hyai",&hyai_id); rcd=nco_inq_varid(in_id,"hyam",&hyam_id); rcd=nco_inq_varid(in_id,"hybi",&hybi_id); rcd=nco_inq_varid(in_id,"hybm",&hybm_id); /* 20190602: ECMWF hybrid vertical grid parameters and dimensions differ from CAM/EAM: ECMWF defines vertical dimensions "nhym" and "nhyi" specifically for hy[ab][im] and uses "lev" and "lev_2" for all other variables, whereas CAM/EAM uses same dimensions "lev" and "ilev" for all vertical variables including hybrid coefficients ECMWF provides "hya?" as a constant in Pa and "hyb?" as a dimensionless coefficient of PS, whereas CAM/EAM provides "hya?" and "hyb?" both as dimensionless coefficients of P0 and PS ECMWF provides "lev" and "lev_2" with midpoint and surface pressure indices (not values), respectively, whereas CAM/EAM provides "lev" and "ilev" coordinate values in hPa ECMWF provides dimensionless "lnsp" for log(surface pressure) whereas CAM/EAM provides "PS" for surface pressure in Pa ECMWF "lnsp" has degenerate level dimension "lev_2" whereas CAM/EAM "PS" has no "ilev" dimension ECMWF uses hya? instead of reference pressure whereas CAM/EAM provides "P0" in hPa */ if((rcd=nco_inq_varid_flg(in_id,"lnsp",&ps_id)) == NC_NOERR) flg_grd_hyb_ecmwf=True; else if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR) flg_grd_hyb_cameam=True; else{ (void)fprintf(stderr,"%s: ERROR %s Unable to find surface pressure variable required for hybrid grid in input file\n",nco_prg_nm_get(),fnc_nm); abort(); } /* !rcd */ if(flg_grd_hyb_cameam){ rcd=nco_inq_varid(in_id,"P0",&p0_id); ilev_id=NC_MIN_INT; lev_id=NC_MIN_INT; if(ilev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"ilev",&ilev_id); if(lev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"lev",&lev_id); } /* !flg_grd_hyb_cameam */ /* 20190603: We require ECMWF IFS input to have a "lev" coordinate so we can use "lev" dimension not "nhyb" */ if(flg_grd_hyb_ecmwf) rcd=nco_inq_varid(in_id,"lev",&lev_id); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ rcd=nco_inq_varid(in_id,plev_nm_in,&lev_id); if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR){ /* Output file creation procedure discriminates between input surface pressure dimensioned as CAM/EAM vs. ECMWF */ flg_grd_hyb_cameam=True; if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file. PS will be copied directly from pure-pressure grid input dataset to, and used to construct the pressures of, the output hybrid-coordinate data file.\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_hyb && (ps_id_tpl != NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in both vertical-grid file and pure-pressure input data file. The vertical grid-file takes precedence. PS will be copied directly from vertical-grid file to, and used to construct the pressures of, the output hybrid-coordinate data file. PS in input pure-pressure file will be ignored.\n",nco_prg_nm_get(),fnc_nm); }else{ if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)){ (void)fprintf(stderr,"%s: ERROR %s does not find variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file or in vertical grid-file for hybrid-pressure output. PS must be present in at least one of these files in order to construct the output hybrid-coordinate pressures.\nHINT: Append a valid PS to the inpud data file or vertical grid-file.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !ps_id_tpl */ } /* !ps_id */ } /* !flg_grd_in_prs */ if(flg_grd_in_dpt){ rcd=nco_inq_varid(in_id,"depth",&lev_id); } /* !flg_grd_in_dpt */ const int ilev_id_in=ilev_id; /* [id] Interface pressure ID */ const int lev_id_in=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_in=ps_id; /* [id] Surface pressure ID */ /* Identify all record-dimensions in input file */ rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ if(flg_grd_in_hyb){ /* Get hybrid vertical information first */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,hyai_id,&dmn_id_ilev_in); if(flg_grd_hyb_cameam) rcd=nco_inq_vardimid(in_id,hyam_id,&dmn_id_lev_in); if(flg_grd_hyb_ecmwf) rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_ilev_in,&ilev_nbr_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_ilev_in,dmn_nm); ilev_nm_in=strdup(dmn_nm); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); /* Define horizontal grid if no PS is provided (i.e., pure-pressure to pure-pressure interpolation) */ if(!flg_grd_out_hyb){ /* Problem: What is horizontal grid size of pressure grid file? Algorithm: Examine first multi-dimensional variable that includes plev dimension Assume horizontal dimensions vary more rapidly than (i.e., follow) plev Compute horizontal grid size accordingly Set output horizontal size to input horizontal size */ int var_nbr; /* [nbr] Number of variables in file */ int var_idx; /* [idx] Index over variables in file */ rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_in*sizeof(long)); for(var_idx=0;var_idx<var_nbr;var_idx++){ rcd=nco_inq_varndims(in_id,var_idx,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,var_idx,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++) if(dmn_ids_in[dmn_idx] == dmn_id_lev_in) break; /* Does current variable have lev dimension? */ if(dmn_idx < dmn_nbr_in){ /* Yes. Do any dimensions vary more rapidly than lev? */ if(dmn_idx < dmn_nbr_in-1){ /* Yes. Assume remaining dimension are horizontal spatial dimensions */ char var_nm[NC_MAX_NAME+1L]; (void)nc_inq_varname(in_id,var_idx,var_nm); for(int dmn_idx_hrz=dmn_idx+1;dmn_idx_hrz<dmn_nbr_in;dmn_idx_hrz++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx_hrz],dmn_cnt_in+dmn_idx_hrz); grd_sz_in*=dmn_cnt_in[dmn_idx_hrz]; } /* !dmn_idx_hrz */ break; } /* !dmn_idx */ } /* !dmn_idx */ } /* !var_idx */ assert(var_idx != var_nbr); grd_sz_out=grd_sz_in; } /* !flg_grd_out_hyb */ } /* !flg_grd_in_prs */ double *hyai_in=NULL; /* [frc] Hybrid A coefficient at layer interfaces on input grid */ double *hyam_in=NULL; /* [frc] Hybrid A coefficient at layer midpoints on input grid */ double *hybi_in=NULL; /* [frc] Hybrid B coefficient at layer interfaces on input grid */ double *hybm_in=NULL; /* [frc] Hybrid B coefficient at layer midpoints on input grid */ double *lev_in=NULL; /* [Pa] Air pressure on input grid */ double *prs_mdp_in=NULL; /* [Pa] Midpoint pressure on input grid */ double *prs_ntf_in=NULL; /* [Pa] Interface pressure on input grid */ double *ps_in=NULL; /* [Pa] Surface pressure on input grid */ double p0_in; /* [Pa] Reference pressure on input grid */ if(flg_grd_in_hyb){ hyai_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hyam_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); hybi_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hybm_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,hyai_id,hyai_in,crd_typ_out); rcd=nco_get_var(in_id,hyam_id,hyam_in,crd_typ_out); rcd=nco_get_var(in_id,hybi_id,hybi_in,crd_typ_out); rcd=nco_get_var(in_id,hybm_id,hybm_in,crd_typ_out); if(flg_grd_hyb_cameam) rcd=nco_get_var(in_id,p0_id,&p0_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Decompose ECMWF hya? convention into CAM/EAM-like product of P0 and hya? */ p0_in=100000.0; for(size_t idx=0;idx<lev_nbr_in;idx++){ hyai_in[idx]/=p0_in; hyam_in[idx]/=p0_in; } /* !idx */ } /* flg_grd_hyb_ecmwf */ } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ lev_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,lev_id,lev_in,crd_typ_out); } /* !flg_grd_in_prs */ /* Always obtain surface pressure if input or output grid is hybrid */ if(flg_grd_in_hyb || flg_grd_out_hyb){ /* Copy horizontal grid information from input file LHS variables were set above if PS is in template file */ if(ps_id_tpl == NC_MIN_INT){ /* NB: dmn_nbr_in/out in this block refer only to horizontal dimensions necessary to define PS */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); /* This is harmlessly repeated for hybrid input files */ dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); if(!dmn_srt) dmn_srt=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); /* NB: Only allocate dmn_srt once */ rcd=nco_inq_vardimid(in_id,ps_id,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_in == 1) grd_sz_in*=dmn_cnt_in[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_in > 1 && dmn_cnt_in[dmn_idx] > 1L){ dmn_id_tm_in=dmn_ids_in[dmn_idx]; dmn_idx_tm_in=dmn_idx; tm_nbr_in=dmn_cnt_in[dmn_idx_tm_in]; if(tm_nbr_in > 1L) flg_vrt_tm=True; } /* tm_nbr_in > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ /* Given all input PS information, define output PS information */ dmn_nbr_ps=dmn_nbr_out=dmn_nbr_in; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); /* fxm: next line works for hyb_in and is buggy for prs_in */ memcpy(dmn_ids_out,dmn_ids_in,dmn_nbr_in*sizeof(int)); memcpy(dmn_cnt_out,dmn_cnt_in,dmn_nbr_in*sizeof(long)); grd_sz_out=grd_sz_in; tm_nbr_out=tm_nbr_in; } /* !ps_id_tpl */ /* Timestep sequencing NB: tm_nbr_??? variables count timesteps in vertical grid definitions These are not necessarily the same as the number of timesteps in either file Time-invariant hybrid or pure-pressure coordinates are valid vertical grids for timeseries Usually hybrid grids have as many timesteps in the grids as in the timeseries Usually pressure grids are time-invariant (as of 20190511 time-varying pure pressure grids are still not supported) This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ if(tm_nbr_in > 1L || tm_nbr_out > 1L){ if(tm_nbr_in > tm_nbr_out) assert((float)tm_nbr_in/(float)tm_nbr_out == tm_nbr_in/tm_nbr_out); else assert((float)tm_nbr_out/(float)tm_nbr_in == tm_nbr_out/tm_nbr_in); } /* !tm_nbr_in */ tm_nbr=tm_nbr_in > tm_nbr_out ? tm_nbr_in : tm_nbr_out; /* Sanity checks */ assert(grd_sz_in == grd_sz_out); assert(tm_nbr_in == tm_nbr_out); ps_in=(double *)nco_malloc_dbg(tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_in value buffer"); /* Surface pressure comes from either hybrid vertical grid-files, hybrid data files, or pressure data files that provide surface pressure */ if(flg_grd_in_hyb || (flg_grd_in_prs && ps_id_tpl == NC_MIN_INT)) rcd=nco_get_var(in_id,ps_id,ps_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Convert ECMWF-provided log(surface_pressure) to surface_pressure */ const size_t ps_sz_in=tm_nbr_in*grd_sz_in; /* [nbr] Number of elements in ps_in */ for(size_t idx=0;idx<ps_sz_in;idx++) ps_in[idx]=exp(ps_in[idx]); } /* flg_grd_hyb_ecmwf */ /* Finally have enough information to allocate output pressure grid */ ps_out=(double *)nco_malloc_dbg(tm_nbr_out*grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_out value buffer"); /* Get PS from output horizontal grid, if available, otherwise copy from input horizontal grid */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ps_id_tpl,ps_out,crd_typ_out); /* NB: Here we read from tpl_id one last time */ }else{ memcpy(ps_out,ps_in,tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr)); } /* !ps_id_tpl */ } /* ! */ /* Compare input and output surface pressure fields to determine whether subterranean extrapolation required */ nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */ flg_add_msv_att=False; /* Extrapolation type xtr_fll_msv may cause need to create _FillValue attributes */ if(xtr_mth == nco_xtr_fll_msv){ const size_t ps_sz=tm_nbr*grd_sz_in; // [nbr] Size of surface-pressure field double *prs_max_in=NULL; /* [Pa] Maximum midpoint pressure on input grid */ double *prs_max_out=NULL; /* [Pa] Maximum midpoint pressure on output grid */ double *prs_min_in=NULL; /* [Pa] Minimum midpoint pressure on input grid */ double *prs_min_out=NULL; /* [Pa] Minimum midpoint pressure on output grid */ long idx_lev_max; // [idx] Index of midpoint level with greatest pressure long idx_lev_min; // [idx] Index of midpoint level with lowest pressure size_t idx; // [idx] Counting index prs_max_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_in value buffer"); prs_max_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_out value buffer"); prs_min_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_in value buffer"); prs_min_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_out value buffer"); if(flg_grd_in_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_in-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_in; for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++){ prs_max_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_max]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_max]; prs_min_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_min]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_in_hyb */ if(flg_grd_out_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_out-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_out; for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++){ prs_max_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_max]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_max]; prs_min_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_min]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_in_prs){ double lev_in_max; double lev_in_min; if(lev_in[0] < lev_in[1]) lev_in_max=lev_in[lev_nbr_in-1]; else lev_in_max=lev_in[0]; if(lev_in[0] < lev_in[1]) lev_in_min=lev_in[0]; else lev_in_max=lev_in[lev_nbr_in-1]; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_max_in[idx_in]=lev_in_max; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_min_in[idx_in]=lev_in_min; } /* !flg_grd_in_prs */ if(flg_grd_out_prs){ double lev_out_max; double lev_out_min; if(lev_out[0] < lev_out[1]) lev_out_max=lev_out[lev_nbr_out-1]; else lev_out_max=lev_out[0]; if(lev_out[0] < lev_out[1]) lev_out_min=lev_out[0]; else lev_out_min=lev_out[lev_nbr_out-1]; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_max_out[idx_out]=lev_out_max; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_min_out[idx_out]=lev_out_min; } /* !flg_grd_out_prs */ for(idx=0;idx<ps_sz;idx++) if(prs_max_out[idx] > prs_max_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; for(idx=0;idx<ps_sz;idx++) if(prs_min_out[idx] < prs_min_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; if(flg_add_msv_att && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one point in at least one output level requires extrapolation (not interpolation). Will ensure that all interpolated fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm); if(prs_max_in) prs_max_in=(double *)nco_free(prs_max_in); if(prs_max_out) prs_max_out=(double *)nco_free(prs_max_out); if(prs_min_in) prs_min_in=(double *)nco_free(prs_min_in); if(prs_min_out) prs_min_out=(double *)nco_free(prs_min_out); } /* !xtr_mth */ /* Lay-out regridded file */ //(void)fprintf(stdout,"%s: DEBUG quark1 dmn_nbr_out = %d, dmn_nbr_ps = %d\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps); /* Use explicitly specified output names, if any, otherwise use template names (either explicitly specified or discovered by fuzzing) */ if(rgr->lev_nm_out) lev_nm_out=rgr->lev_nm_out; if(rgr->ilev_nm_out){ if(flg_grd_out_hyb) ilev_nm_out=rgr->ilev_nm_out; if(flg_grd_out_prs) lev_nm_out=rgr->ilev_nm_out; } /* !ilev_nm_out */ if(flg_grd_out_prs){ /* Unless user explicitly specifies output name, use same name as input */ if(!rgr->lev_nm_out) lev_nm_out=(char *)strdup(plev_nm_in); /* Hybrid-sigma/pressure interface variables, if any, must also be output to pure-pressure files on lev grid */ ilev_nm_out=(char *)strdup(lev_nm_out); } /* !flg_grd_out_prs */ /* Define new vertical dimensions before all else */ if(flg_grd_out_hyb){ rcd=nco_def_dim(out_id,ilev_nm_out,ilev_nbr_out,&dmn_id_ilev_out); rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); /* Horizontal dimensions necessary to define PS variable */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_dimname(tpl_id,dmn_ids_out[dmn_idx],dmn_nm); }else{ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); } /* !ps_id_tpl */ if(flg_grd_hyb_cameam) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); /* 20190602: ECMWF IFS PS variable has degenerate vertical dimension (lev_2). Avoid re-definition */ if(flg_grd_hyb_ecmwf) if(strcmp(dmn_nm,ilev_nm_out)) if(strcmp(dmn_nm,lev_nm_out)) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !dmn_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); } /* !flg_grd_out_prs */ /* Do not extract grid variables (that are also extensive variables) like ilev, lev, hyai, hyam, hybi, hybm */ /* Exception list source: CAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS EAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS ECMWF: hyai, hyam, hybi, hybm, lev, lnsp NCEP: plev */ const int var_xcl_lst_nbr=10; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/plev","/PS","/lnsp"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ long idx; /* [idx] Generic index */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ /* 20191001: Do not automatically define plev_nm_in in pressure-grid output files The variable named lev_nm_out in the input data file is always defined in the output file So if plev_nm_in == lev_nm_out it will be defined anyway */ if(flg_grd_in_prs && flg_grd_out_prs && strcmp(plev_nm_in,lev_nm_out)){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm,plev_nm_in)) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ //const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ //const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ //const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ if(flg_grd_out_hyb){ rcd+=nco_def_var(out_id,"hyai",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hyai_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyai_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hyam",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hyam_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyam_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybi",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hybi_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybi_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybm",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hybm_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybm_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,ilev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&ilev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ilev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"P0",crd_typ_out,dmn_nbr_0D,(int *)NULL,&p0_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,p0_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; // for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ // rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_nm); // (void)fprintf(stdout,"%s: DEBUG quark5 dmn_nbr_out = %d, dmn_nbr_ps = %d, dmn_idx = %d, dmn_ids_out[%d] = %d, dmn_nm = %s\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps,dmn_idx,dmn_idx,dmn_ids_out[dmn_idx],dmn_nm); // } /* !dmn_idx */ if(flg_grd_hyb_cameam) rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_ps,dmn_ids_out,&ps_id); if(flg_grd_hyb_ecmwf){ /* Remove degenerate ECMWF vertical dimension so that output PS has dmn_nbr_ps-1 not dmn_nbr_ps dimensions */ int dmn_nbr_out_ecmwf=0; for(dmn_idx=0;dmn_idx<dmn_nbr_ps;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); if(strcmp(dmn_nm,ilev_nm_out) && strcmp(dmn_nm,lev_nm_out) && strcmp(dmn_nm,"lev_2")) rcd=nco_inq_dimid(out_id,dmn_nm,dmn_ids_out+dmn_nbr_out_ecmwf++); } /* !dmn_idx */ rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_out_ecmwf,dmn_ids_out,&ps_id); } /* !flg_grd_hyb_ecmwf */ if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ps_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,hyai_id_tpl,hyai_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hyam_id_tpl,hyam_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybi_id_tpl,hybi_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybm_id_tpl,hybm_id,PCK_ATT_CPY); if(p0_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,p0_id_tpl,p0_id,PCK_ATT_CPY); /* p0 not expected to be in ECMWF grids */ if(ilev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ilev_id_tpl,ilev_id,PCK_ATT_CPY); else if(ilev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,ilev_id_in,ilev_id,PCK_ATT_CPY); if(lev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); else if(lev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,lev_id_in,lev_id,PCK_ATT_CPY); if(ps_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ps_id_tpl,ps_id,PCK_ATT_CPY); else (void)nco_att_cpy(in_id,out_id,ps_id_in,ps_id,PCK_ATT_CPY); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); dmn_id_ilev_out=dmn_id_lev_out; } /* !flg_grd_out_prs */ /* No further access to template file, close it */ nco_close(tpl_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl); char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ nco_bool has_ilev; /* [flg] Contains interface level dimension */ nco_bool has_lev; /* [flg] Contains midpoint level dimension */ nco_bool has_tm; /* [flg] Contains time dimension */ nco_bool need_prs_ntf=False; /* [flg] At least one variable to regrid is on interface levels */ nco_bool need_prs_mdp=False; /* [flg] At least one variable to regrid is on midpoint levels */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; has_ilev=False; has_lev=False; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing coordinates with "standard_name" = "atmosphere_hybrid_sigma_pressure_coordinate" */ if(!has_ilev && ilev_nm_in) has_ilev=!strcmp(dmn_nm_cp,ilev_nm_in); if(!has_lev) has_lev=!strcmp(dmn_nm_cp,lev_nm_in); } /* end loop over dimensions */ /* Regrid variables that contain either vertical dimension */ if(has_ilev || has_lev){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; if(has_ilev) need_prs_ntf=True; if(has_lev) need_prs_mdp=True; } /* endif */ assert(!(has_ilev && has_lev)); /* Copy all variables that are not regridded or omitted */ if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++; } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit interpolation criteria. The vertical interpolator expects something to interpolate, and variables not interpolated are copied straight to output. HINT: If the name(s) of the input vertical grid dimensions (e.g., ilev and lev) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"ilev\", \"lev\", and/or \"plev\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid. For hybrid-pressure coordinate grids, ensure that the \"ilev\" and \"lev\" variable names are known with, e.g., \"ncks --rgr ilev_nm=interface_level --rgr lev_nm=midpoint_level\" or \"ncremap -R '--rgr ilev=interface_level --rgr lev=midpoint_level'\". For pure pressure grids, ensure the \"plev\" coordinate name is defined with, e.g., \"ncks --rgr plev_nm=pressure_level\" or \"ncremap -R '--rgr plev=pressure_level'\".\n",nco_prg_nm_get(),fnc_nm); if(nco_dbg_lvl_get() >= nco_dbg_fl){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Interpolate %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); aed_sct aed_mtd_fll_val; char *att_nm_fll_val=strdup("_FillValue"); int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ float mss_val_flt; double mss_val_dbl; if(flg_add_msv_att){ aed_mtd_fll_val.att_nm=att_nm_fll_val; aed_mtd_fll_val.mode=aed_create; aed_mtd_fll_val.sz=1L; mss_val_dbl=NC_FILL_DOUBLE; mss_val_flt=NC_FILL_FLOAT; } /* !flg_add_msv_att */ /* Define interpolated and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Interpolate */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); if(ilev_nm_in && !strcmp(dmn_nm,ilev_nm_in)){ /* Change ilev dimension */ dmn_id_out[dmn_idx]=dmn_id_ilev_out; dmn_cnt_out[dmn_idx]=ilev_nbr_out; }else if(!strcmp(dmn_nm,lev_nm_in)){ /* Change lev dimension */ dmn_id_out[dmn_idx]=dmn_id_lev_out; dmn_cnt_out[dmn_idx]=lev_nbr_out; }else{ /* Dimensions ilev/lev_nm_in have already been defined as ilev/lev_nm_out, replicate all other dimensions */ rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); } /* !ilev */ if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-interpolated variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); /* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */ if(flg_add_msv_att && trv.flg_rgr){ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val){ nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */ aed_mtd_fll_val.var_nm=var_nm; aed_mtd_fll_val.id=var_id_out; aed_mtd_fll_val.type=var_typ_out; if(var_typ_out == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt; else if(var_typ_out == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl; flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val); if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm); } /* !has_mss_val */ } /* !flg_add_msv_att */ } /* !rcd */ } /* !var */ } /* end idx_tbl */ /* Free pre-allocated array space */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Copy all grid variables */ if(flg_grd_out_hyb){ (void)nco_put_var(out_id,hyai_id,hyai_out,crd_typ_out); (void)nco_put_var(out_id,hyam_id,hyam_out,crd_typ_out); (void)nco_put_var(out_id,hybi_id,hybi_out,crd_typ_out); (void)nco_put_var(out_id,hybm_id,hybm_out,crd_typ_out); (void)nco_put_var(out_id,ilev_id,ilev_out,crd_typ_out); (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); (void)nco_put_var(out_id,p0_id,&p0_out,crd_typ_out); (void)nco_put_var(out_id,ps_id,ps_out,crd_typ_out); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ nco_bool flg_ntp_log=True; /* [flg] Interpolate in log(vertical_coordinate) */ if(ntp_mth == nco_ntp_lnr) flg_ntp_log=False; size_t idx_in; /* [idx] Index into 3D input variables */ size_t idx_out; /* [idx] Index into 3D output variables */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ /* Interpolate or copy variable values */ double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *prs_ntp_in; /* [Pa] Interpolated pressure array on input grid */ double *prs_ntp_out; /* [Pa] Interpolated pressure array on output grid */ int lvl_idx_in; /* [idx] Level index on input grid */ int lvl_idx_out; /* [idx] Level index on output grid */ int lvl_nbr_in; /* [nbr] Number of levels for current interpolated variable on input grid */ int lvl_nbr_out; /* [nbr] Number of levels for current interpolated variable on output grid */ int thr_idx; /* [idx] Thread index */ size_t grd_nbr=grd_sz_in; /* [nbr] Horizonal grid size */ size_t idx_dbg=rgr->idx_dbg; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* Repeating above documentation for the forgetful: NB: tm_nbr is max(timesteps) in vertical grid definitions, not number of records in either file This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ /* Index-offset to current surface pressure timeslice */ idx_fst=tm_idx*grd_sz_in; if(need_prs_mdp){ /* Allocated and define midpoint pressures */ if(tm_idx == 0) prs_mdp_in=(double *)nco_malloc_dbg(grd_sz_in*lev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_in value buffer"); if(tm_idx == 0) prs_mdp_out=(double *)nco_malloc_dbg(grd_sz_out*lev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=p0_in*hyam_in[lev_idx]+ps_in[idx_fst+grd_idx]*hybm_in[lev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=p0_out*hyam_out[lev_idx]+ps_out[idx_fst+grd_idx]*hybm_out[lev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=lev_in[lev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=lev_out[lev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*lev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_mdp_in[idx_in]=log(prs_mdp_in[idx_in]); var_sz_out=grd_sz_out*lev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_mdp_out[idx_out]=log(prs_mdp_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_mdp */ if(need_prs_ntf){ /* Allocate and define interface pressures */ if(tm_idx == 0) prs_ntf_in=(double *)nco_malloc_dbg(grd_sz_in*ilev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_in value buffer"); if(tm_idx == 0) prs_ntf_out=(double *)nco_malloc_dbg(grd_sz_out*ilev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=p0_in*hyai_in[ilev_idx]+ps_in[idx_fst+grd_idx]*hybi_in[ilev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=p0_out*hyai_out[ilev_idx]+ps_out[idx_fst+grd_idx]*hybi_out[ilev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=lev_in[ilev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=lev_out[ilev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*ilev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_ntf_in[idx_in]=log(prs_ntf_in[idx_in]); var_sz_out=grd_sz_out*ilev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_ntf_out[idx_out]=log(prs_ntf_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_ntf */ /* Set firstprivate variables to initial values */ has_ilev=False; has_lev=False; has_tm=False; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Interpolation progress: # means interpolated, ~ means copied\n"); #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) # pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # pragma omp target teams distribute parallel for # else # pragma omp parallel for firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) # endif /* !GCC > 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Interpolate variable */ var_nm=trv.nm; var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); if(dmn_id_in[dmn_idx] == dmn_id_ilev_in) has_ilev=True; if(dmn_id_in[dmn_idx] == dmn_id_lev_in) has_lev=True; if(dmn_id_in[dmn_idx] == dmn_id_tm_in) has_tm=True; if(flg_vrt_tm && has_tm && dmn_id_in[dmn_idx] == dmn_id_tm_in){ dmn_cnt_in[dmn_idx]=1L; dmn_srt[dmn_idx]=tm_idx; }else{ dmn_srt[dmn_idx]=0L; } /* !flg_vrt_tm */ var_sz_in*=dmn_cnt_in[dmn_idx]; } /* !dmn_idx */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ /* Dimension count vector is same as input except for lvl dimension */ dmn_cnt_out[dmn_idx]=dmn_cnt_in[dmn_idx]; if(has_ilev && dmn_id_out[dmn_idx] == dmn_id_ilev_out) dmn_cnt_out[dmn_idx]=ilev_nbr_out; if(has_lev && dmn_id_out[dmn_idx] == dmn_id_lev_out) dmn_cnt_out[dmn_idx]=lev_nbr_out; var_sz_out*=dmn_cnt_out[dmn_idx]; } /* end loop over dimensions */ var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); /* Missing value setup */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val) mss_val_dbl=NC_FILL_DOUBLE; if(has_ilev){ /* Interpolate current variable from input interface pressure grid to output interface pressure grid */ lvl_nbr_in=ilev_nbr_in; lvl_nbr_out=ilev_nbr_out; prs_ntp_in=prs_ntf_in; prs_ntp_out=prs_ntf_out; }else{ /* Interpolate current variable from input midpoint pressure grid to output midpoint pressure grid */ lvl_nbr_in=lev_nbr_in; lvl_nbr_out=lev_nbr_out; prs_ntp_in=prs_mdp_in; prs_ntp_out=prs_mdp_out; } /* !ilev */ /* Procedure: Extract input/output coordinate/data arrays into 1D column order This enables actual interpolation code to be written for, or take advantage of, 1D interpolation routines After interpolating into 1D sequential memory, copy back to ND output and repeat */ double *crd_in=NULL; /* Input vertical coordinate (must be monotonic) */ double *crd_out=NULL; /* Output vertical coordinate (must be monotonic) */ double *dat_in=NULL; /* Input data (to be interpolated) on input vertical coordinate grid */ double *dat_out=NULL; /* Output data (interpolated) output vertical coordinate grid (i.e., the answer) */ double *crd_in_mnt; /* Input vertical coordinate reversed if necessary to be monotonically increasing */ double *crd_out_mnt; /* Output vertical coordinate reversed if necessary to be monotonically increasing */ double *dat_in_mnt; /* Input data (to be interpolated) reversed if necessary along with input grid */ double *dat_out_mnt; /* Output data (interpolated) reversed if necessary along with output grid */ nco_xtr_sct xtr_LHS; nco_xtr_sct xtr_RHS; size_t brk_lft_idx; size_t brk_rgt_idx; size_t in_idx; size_t in_nbr; size_t out_nbr; size_t out_idx; /* Default extrapolation uses nearest valid neighbor */ xtr_LHS.xtr_fll=True; xtr_LHS.xtr_vrb=False; xtr_LHS.typ_fll=xtr_mth; xtr_RHS.xtr_fll=True; xtr_RHS.xtr_vrb=False; xtr_RHS.typ_fll=xtr_mth; /* Special-case extrapolation methods allowed for all except missing-value extrapolation types */ if(xtr_mth != nco_xtr_fll_msv){ if(!strcmp(var_nm,"T") || !strcmp(var_nm,"ta")) xtr_RHS.typ_fll=nco_xtr_fll_tpt; else if(!strcmp(var_nm,"Z3") || !strcmp(var_nm,"zg")) xtr_LHS.typ_fll=xtr_RHS.typ_fll=nco_xtr_fll_gph; } /* !xtr_mth */ crd_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); crd_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); in_nbr=lvl_nbr_in; out_nbr=lvl_nbr_out; nco_bool in_ncr; /* [flg] Input coordinate monotonically increases */ nco_bool out_ncr; /* [flg] Output coordinate monotonically increases */ /* Determine monotonicity direction only once, based on first vertical column */ if(prs_ntp_in[grd_nbr]-prs_ntp_in[0] > 0.0) in_ncr=True; else in_ncr=False; out_ncr=True; if(out_nbr > 1) if(prs_ntp_out[grd_nbr]-prs_ntp_out[0] < 0.0) out_ncr=False; /* If necessary, allocate (once, and re-use it) additional memory to hold reversed arrays */ if(!in_ncr){ crd_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); } /* !in_ncr */ if(!out_ncr){ crd_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); } /* !out_ncr */ /* Constants and parameters for extrapolation */ const double gamma_moist=6.5/10000.0; /* [K/Pa] Temperature extrapolation assumes constant moist adiabatic lower atmosphere lapse rate dT/dp=constant=(6.5 K)/(100 mb) = (6.5 K)/(10000 Pa) */ const double Rd_rcp_g0=287.0/9.81; /* [K/Pa] Geopotential height extrapolation uses hypsometric equation Z2-Z1=(Rd*Tv_avg/g0)*ln(p1/p2)=(Rd*Tv_avg/g0)*(ln(p1)-ln(p2)) */ const double tpt_vrt_avg=288.0; /* [K] Mean virtual temperature assumed for geopotential height extrapolation */ nco_bool FIRST_WARNING_LHS; /* [flg] First warning for LHS extrapolation */ nco_bool FIRST_WARNING_RHS; /* [flg] First warning for RHS extrapolation */ if(tm_idx == 0){ /* Only print extrapolation warnings for first timestep to prevent noisy output NB: Algorithm prevents any warnings for extrapolations that appear after first timestep */ FIRST_WARNING_LHS=True; FIRST_WARNING_RHS=True; } /* !tm_idx */ /* Outer loop over columns */ for(grd_idx=0;grd_idx<grd_nbr;grd_idx++){ /* Initialize pseudo-1D variables with consecutive memory addresses to avoid indirection */ for(lvl_idx_in=0;lvl_idx_in<lvl_nbr_in;lvl_idx_in++){ idx_in=grd_idx+lvl_idx_in*grd_nbr; crd_in[lvl_idx_in]=prs_ntp_in[idx_in]; dat_in[lvl_idx_in]=var_val_dbl_in[idx_in]; } /* !lvl_idx_in */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; crd_out[lvl_idx_out]=prs_ntp_out[idx_out]; } /* !lvl_idx_out */ /* Interpolation code easier to write/debug if crd_in and crd_out both monotonically increase However, monotonically decreasing coordinates useful in many cases, such as depth coordinate, and pressure levels arranged largest to smallest (favored by CMIP) Next code block reverses array(s) if necessary so coordinates monotonically increase Code uses crd_in_mnt, dat_in_mnt, crd_out_mnt where "_mnt" reminds of "monotonically increasing" assumption Following code lifted from CSZ's libcsz.a library source code ~/sw/c++/vec.hh */ if(in_ncr){ crd_in_mnt=crd_in; dat_in_mnt=dat_in; }else{ for(in_idx=0;in_idx<in_nbr;in_idx++){ crd_in_mnt[in_idx]=crd_in[in_nbr-in_idx-1]; dat_in_mnt[in_idx]=dat_in[in_nbr-in_idx-1]; } /* !in_idx */ } /* !in_ncr */ if(out_ncr){ crd_out_mnt=crd_out; dat_out_mnt=dat_out; }else{ for(out_idx=0;out_idx<out_nbr;out_idx++) crd_out_mnt[out_idx]=crd_out[out_nbr-out_idx-1]; } /* !out_ncr */ // Initialize bracketing index brk_lft_idx=0; // Loop over desired output coordinates for(out_idx=0;out_idx<out_nbr;out_idx++){ // Order of conditions is important since second condition is illegal if brk_lft_idx >= in_nbr while((brk_lft_idx < in_nbr) && (crd_in_mnt[brk_lft_idx] < crd_out_mnt[out_idx])){ brk_lft_idx++; } // !while brk_lft_idx--; // Handle identity interpolation separately to preserve symmetry in extrapolation code if(brk_lft_idx != in_nbr-1){ if(crd_in_mnt[brk_lft_idx+1] == crd_out_mnt[out_idx]){ dat_out_mnt[out_idx]=dat_in_mnt[brk_lft_idx+1]; if(brk_lft_idx == -1) brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works continue; // Jump to next iteration } // !crd_in_mnt } // !brk_lft_idx if(brk_lft_idx == -1){ // LHS Extrapolation required // Degenerate case: crd_out_mnt[out_idx] < crd_in_mnt[0] brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires LHS extrapolation beyond leftmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_lft_idx,crd_in_mnt[brk_lft_idx],brk_lft_idx,dat_in_mnt[brk_lft_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_LHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full LHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_LHS.xtr_fll */ switch(xtr_LHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[0]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[0]- (crd_in_mnt[0]-crd_out_mnt[out_idx])* (dat_in_mnt[1]-dat_in_mnt[0])/(crd_in_mnt[1]-crd_in_mnt[0]); break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*(crd_in_mnt[0]-crd_out_mnt[out_idx]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*log(crd_in_mnt[0]/crd_out_mnt[out_idx]); if(FIRST_WARNING_LHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated upward towards space using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_LHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_LHS.typ_fll\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_LHS.typ_fll if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s LHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else if(brk_lft_idx < in_nbr-1){ // Normal case: crd_out_mnt is interpolable brk_rgt_idx=brk_lft_idx+1; // NB: brk_rgt_idx is ALWAYS greater than brk_lft_idx // This simulaneously meets two criteria: // 1. Divide-by-zero errors are impossible in the next step // 2. The identity interpolation is satisfied since crd_dlt == 0.0: // i.e., If crd_out_mnt[idx] == crd_in_mnt[brk_lft_idx] then dat_out_mnt[out_idx] := dat_in_mnt[brk_lft_idx] // Linearly interpolate dat_out_mnt[out_idx]= dat_in_mnt[brk_lft_idx]+ (crd_out_mnt[out_idx]-crd_in_mnt[brk_lft_idx])* (dat_in_mnt[brk_rgt_idx]-dat_in_mnt[brk_lft_idx])/ (crd_in_mnt[brk_rgt_idx]-crd_in_mnt[brk_lft_idx]); }else if(brk_lft_idx == in_nbr-1){ // RHS Extrapolation required // Degenerate case: brk_lft_idx is last element of crd_in_mnt brk_rgt_idx=brk_lft_idx; if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires RHS extrapolation beyond rightmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_rgt_idx,crd_in_mnt[brk_rgt_idx],brk_rgt_idx,dat_in_mnt[brk_rgt_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_RHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full RHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_RHS.xtr_fll */ switch(xtr_RHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])* (dat_in_mnt[in_nbr-1]-dat_in_mnt[in_nbr-2])/ (crd_in_mnt[in_nbr-1]-crd_in_mnt[in_nbr-2]); break; case nco_xtr_fll_tpt: if(flg_ntp_log) /* Exponentiate so coordinates are linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (exp(crd_out_mnt[out_idx])-exp(crd_in_mnt[in_nbr-1]))*gamma_moist; else /* Coordinates are already linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])*gamma_moist; if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s temperature extrapolated toward/into surface assuming constant moist adiabatic lapse rate = %g K/(100 mb) for variable %s\n",nco_prg_nm_get(),fnc_nm,gamma_moist*10000.0,var_nm); FIRST_WARNING_RHS=False; break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*(crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*log(crd_out_mnt[out_idx]/crd_in_mnt[in_nbr-1]); if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated toward/into surface using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_RHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_RHS\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_RHS.typ_fll if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s RHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else{ (void)fprintf(fp_stdout,"%s: ERROR %s Unforeseen value of brk_lft_idx\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } // !RHS } // !out_idx /* Un-reverse output data to be on original grid */ if(!out_ncr) for(out_idx=0;out_idx<out_nbr;out_idx++) dat_out[out_idx]=dat_out_mnt[out_nbr-out_idx-1]; // End of vec.hh code /* Copy answers into output array */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; var_val_dbl_out[idx_out]=dat_out[lvl_idx_out]; } /* !lvl_idx_out */ if(nco_dbg_lvl_get() >= nco_dbg_io && grd_idx == idx_dbg){ (void)fprintf(fp_stdout,"%s: DEBUG %s variable %s at idx_dbg = %lu\n",nco_prg_nm_get(),fnc_nm,var_nm,idx_dbg); for(out_idx=0;out_idx<out_nbr;out_idx++){ (void)fprintf(fp_stdout,"out_idx = %lu dat_out = %g\n",out_idx,dat_out[out_idx]); } /* !out_idx */ } /* !dbg */ } /* !grd_idx */ if(crd_in) crd_in=(double *)nco_free(crd_in); if(crd_out) crd_out=(double *)nco_free(crd_out); if(dat_in) dat_in=(double *)nco_free(dat_in); if(dat_out) dat_out=(double *)nco_free(dat_out); if(!in_ncr){ if(crd_in_mnt) crd_in_mnt=(double *)nco_free(crd_in_mnt); if(dat_in_mnt) dat_in_mnt=(double *)nco_free(dat_in_mnt); } /* !in_ncr */ if(!out_ncr){ if(crd_out_mnt) crd_out_mnt=(double *)nco_free(crd_out_mnt); if(dat_out_mnt) dat_out_mnt=(double *)nco_free(dat_out_mnt); } /* !out_ncr */ #pragma omp critical { /* begin OpenMP critical */ rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); } /* end OpenMP critical */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded 20190511: Copy them only once */ if(tm_idx == 0){ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !tm_idx */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); } /* !tm_idx */ if(att_nm_fll_val) att_nm_fll_val=(char *)nco_free(att_nm_fll_val); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(ilev_nm_in) ilev_nm_in=(char *)nco_free(ilev_nm_in); if(lev_nm_in) lev_nm_in=(char *)nco_free(lev_nm_in); if(hyai_in) hyai_in=(double *)nco_free(hyai_in); if(hyam_in) hyam_in=(double *)nco_free(hyam_in); if(hybi_in) hybi_in=(double *)nco_free(hybi_in); if(hybm_in) hybm_in=(double *)nco_free(hybm_in); if(ps_in) ps_in=(double *)nco_free(ps_in); if(prs_mdp_in) prs_mdp_in=(double *)nco_free(prs_mdp_in); if(prs_ntf_in) prs_ntf_in=(double *)nco_free(prs_ntf_in); if(hyai_out) hyai_out=(double *)nco_free(hyai_out); if(hyam_out) hyam_out=(double *)nco_free(hyam_out); if(hybi_out) hybi_out=(double *)nco_free(hybi_out); if(hybm_out) hybm_out=(double *)nco_free(hybm_out); if(ilev_out) ilev_out=(double *)nco_free(ilev_out); if(lev_in) lev_in=(double *)nco_free(lev_in); if(lev_out) lev_out=(double *)nco_free(lev_out); if(ps_out) ps_out=(double *)nco_free(ps_out); if(prs_mdp_out) prs_mdp_out=(double *)nco_free(prs_mdp_out); if(prs_ntf_out) prs_ntf_out=(double *)nco_free(prs_ntf_out); return rcd; } /* !nco_ntp_vrt() */ int /* O [enm] Return code */ nco_rgr_wgt /* [fnc] Regrid with external weights */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Regrid fields using external weights contained in a mapfile Examine ESMF, SCRIP, Tempest map-files: ncks --cdl -M -m ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc | m ncks --cdl -M -m ${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc | m ncks --cdl -M -m ${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc | m Test ESMF, SCRIP, Tempest map-files: ncks -D 5 -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc ${DATA}/ne30/rgr/ne30_1D.nc ~/foo.nc Mapfile formats ESMF, GRIDSPEC, SCRIP, and UGRID described here: http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_6_3_0rp1/ESMF_refdoc/node3.html#sec:fileformat:scrip Conventions: grid_size: Number of gridcells (product of lat*lon) address: Source and destination index for each link pair num_links: Number of unique address pairs in remapping, i.e., size of sparse matrix num_wgts: Number of weights per vertice for given remapping (we only handle num_wgts == 1 below) = 1 Bilinear Destination grid value determined by weights times known source grid values at vertices of source quadrilateral that bounds destination point P One weight per vertice guarantees fxm but is not conservative Bilinear requires logically rectangular grid = 1 Distance-based: Distance-weighted uses values at num_neighbors points The weight is inversely proportional to the angular distance from the destination point to each neighbor on the source grid = 3 Second-order conservative: Described in Jones, P. W. (1999), Monthly Weather Review, 127, 2204-2210 First-order conservative schemes assume fluxes are constant within gridcell Destination fluxes are simple summations of sources fluxes weighted by overlap areas Old clm and bds remappers use a first-order algorithm Second-order improves this by using a first-order Taylor expansion of flux Source flux is centroid value plus directional offset determined by dot product of directional gradient and vector pointing from vertice to centroid. Three weights per vertice are centroid weight, weight times local theta-gradient from centroid to vertice, and weight times local phi-gradient from centroid to vertice. = 4 Bicubic: The four weights are gradients in each direction plus a cross-gradient term Same principle as bilinear, but more weights per vertice Bicubic requires logically rectangular grid wgt: Maximum number of source cells contributing to destination cell is not a dimension in SCRIP remapping files because SCRIP stores everying in 1-D sparse matrix arrays Definition of sparse matrix formulations and normalization terminology, SCRIP manual p. 8, 13, 16: for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ // Remap source function f = 1 in all unmasked source gridcells, zero elsewhere, to function F on destination grid // Normalization: fractional area (fracarea) (F = 1 where destination overlaps umasked source grid) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]; // Normalization: destination area (destarea) (weights in each destination cell sum to its area frcation) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/dst_area[ddr_dst[lnk_idx]]; // Normalization: none (F = angular area that participates in remapping) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/(dst_area[ddr_dst[lnk_idx]]*dst_frc[ddr_dst[lnk_idx]); } // end loop over lnk Documentation: NCL special cases described in popRemap.ncl, e.g., at https://github.com/yyr/ncl/blob/master/ni/src/examples/gsun/popRemap.ncl ESMF Regridding Status: https://www.earthsystemcog.org/projects/esmf Sample regrid T42->POP43, SCRIP: ncks -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_wgt()"; /* [sng] Function name */ char *fl_in; char *fl_pth_lcl=NULL; const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const double eps_rlt=1.0e-14; /* [frc] Round-off error tolerance */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double area_out_ttl=0.0; /* [frc] Exact sum of area */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int dst_grid_corners_id; /* [id] Destination grid corners dimension ID */ int dst_grid_rank_id; /* [id] Destination grid rank dimension ID */ int dst_grid_size_id; /* [id] Destination grid size dimension ID */ int num_links_id; /* [id] Number of links dimension ID */ int num_wgts_id=NC_MIN_INT; /* [id] Number of weights dimension ID */ int src_grid_corners_id; /* [id] Source grid corners dimension ID */ int src_grid_rank_id; /* [id] Source grid rank dimension ID */ int src_grid_size_id; /* [id] Source grid size dimension ID */ long int lat_idx; long int lon_idx; short int bnd_idx; nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool flg_dgn_area_out=False; /* [flg] Diagnose area_out from grid boundaries */ nco_bool flg_bnd_1D_usable=False; /* [flg] Usable 1D cell vertices exist */ nco_bool flg_stg=rgr->flg_stg; /* [flg] Write staggered grid with FV output */ nco_grd_2D_typ_enm nco_grd_2D_typ=nco_grd_2D_nil; /* [enm] Two-dimensional grid-type enum */ nco_grd_lat_typ_enm nco_grd_lat_typ=nco_grd_lat_nil; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm nco_grd_lon_typ=nco_grd_lon_nil; /* [enm] Longitude grid-type enum */ nco_mpf_sct mpf; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining mapping weights from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_map); /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_map); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); /* Identify mapping file type using string generated by weight-generator: ESMF: title = "ESMF Offline Regridding Weight Generator" ESMF_weight_only: title = "ESMF Regrid Weight Generator" NCO: Title = "netCDF Operators (NCO) Offline Regridding Weight Generator" SCRIP: conventions = "SCRIP" Tempest: Title = "TempestRemap Offline Regridding Weight Generator" */ char *att_val; char *att_cnv_val=NULL; char *att_ttl_val=NULL; char *cnv_sng=NULL; /* netCDF standard is uppercase Conventions, though some models user lowercase */ char att_sng_Cnv[]="Conventions"; /* [sng] Unidata standard string (uppercase) */ char att_sng_cnv[]="conventions"; /* [sng] Unidata non-standard string (lowercase) */ char att_sng_Ttl[]="Title"; /* [sng] NCO and Tempest use "Title" attribute, and Tempest does not use "Conventions" */ char att_sng_ttl[]="title"; /* [sng] ERWG 7.1 weight_only uses "title" not "Conventions" attribute */ char name0_sng[]="name0"; /* [sng] Attribute where Tempest stores least-rapidly-varying dimension name */ nco_rgr_mpf_typ_enm nco_rgr_mpf_typ=nco_rgr_mpf_nil; /* [enm] Type of remapping file */ nco_rgr_typ_enm nco_rgr_typ=nco_rgr_grd_nil; /* [enm] Type of grid conversion */ /* Look for map-type signature in [cC]onventions or [tT]itle attribute */ att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_cnv); if(!att_cnv_val) att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Cnv); att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_ttl); if(!att_ttl_val) att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Ttl); /* If "[cC]onventions" or "[tT]itle" attribute was found, it determines map-file type... */ if(att_cnv_val && strstr(att_cnv_val,"SCRIP")) nco_rgr_mpf_typ=nco_rgr_mpf_SCRIP; if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_ttl_val){ if(strstr(att_ttl_val,"ESMF Offline Regridding Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF; else if(strstr(att_ttl_val,"netCDF Operators")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; else if(strstr(att_ttl_val,"Tempest")) nco_rgr_mpf_typ=nco_rgr_mpf_Tempest; else if(strstr(att_ttl_val,"ESMF Regrid Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF_weight_only; } /* !att_ttl_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil){ (void)fprintf(stderr,"%s: WARNING %s unable to discern map-file type from global attributes \"[cC]onventions\" = \"%s\" and/or \"[tT]itle\" = \"%s\"\n",nco_prg_nm_get(),fnc_nm,att_cnv_val ? att_cnv_val : "",att_ttl_val ? att_ttl_val : ""); nco_rgr_mpf_typ=nco_rgr_mpf_unknown; } /* !nco_rgr_mpf_typ */ if(att_cnv_val) att_cnv_val=(char *)nco_free(att_cnv_val); if(att_ttl_val) att_ttl_val=(char *)nco_free(att_ttl_val); switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_dimid(in_id,"src_grid_size",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"dst_grid_size",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"src_grid_corners",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"dst_grid_corners",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); rcd+=nco_inq_dimid(in_id,"num_links",&num_links_id); rcd+=nco_inq_dimid(in_id,"num_wgts",&num_wgts_id); break; case nco_rgr_mpf_ESMF_weight_only: rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: rcd+=nco_inq_dimid(in_id,"n_a",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"n_b",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"nv_a",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"nv_b",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); if(nco_rgr_mpf_typ != nco_rgr_mpf_Tempest){ rcd+=nco_inq_dimid_flg(in_id,"num_wgts",&num_wgts_id); if(rcd != NC_NOERR){ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s reports map-file does not contain \"num_wgts\" dimension. ERWG always produces this as an orphan dimension, so post-processing could have removed it without harming other map-file fields. No harm, no foul.\n",nco_prg_nm_get(),fnc_nm); rcd=NC_NOERR; } /* !rcd */ } /* !nco_rgr_mpf_Tempest */ rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; default: (void)fprintf(stderr,"%s: ERROR %s unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Use dimension IDs to get dimension sizes */ rcd+=nco_inq_dimlen(in_id,num_links_id,&mpf.num_links); if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_dimlen(in_id,src_grid_size_id,&mpf.src_grid_size); rcd+=nco_inq_dimlen(in_id,dst_grid_size_id,&mpf.dst_grid_size); rcd+=nco_inq_dimlen(in_id,src_grid_corners_id,&mpf.src_grid_corners); rcd+=nco_inq_dimlen(in_id,dst_grid_corners_id,&mpf.dst_grid_corners); rcd+=nco_inq_dimlen(in_id,src_grid_rank_id,&mpf.src_grid_rank); rcd+=nco_inq_dimlen(in_id,dst_grid_rank_id,&mpf.dst_grid_rank); /* TempestRemap does not generate num_wgts */ if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || num_wgts_id == NC_MIN_INT){ mpf.num_wgts=int_CEWI; }else{ rcd+=nco_inq_dimlen(in_id,num_wgts_id,&mpf.num_wgts); } /* !num_wgts_id */ assert(mpf.src_grid_size < INT_MAX && mpf.dst_grid_size < INT_MAX); }else{ mpf.src_grid_size=long_CEWI; mpf.dst_grid_size=long_CEWI; mpf.src_grid_corners=long_CEWI; mpf.dst_grid_corners=long_CEWI; mpf.src_grid_rank=long_CEWI; mpf.dst_grid_rank=long_CEWI; mpf.num_wgts=int_CEWI; } /* !ESMF_weight_only */ cnv_sng=strdup("normalization"); nco_rgr_nrm_typ_enm nco_rgr_nrm_typ=nco_rgr_nrm_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strstr(att_val,"fracarea")) nco_rgr_nrm_typ=nco_rgr_nrm_fracarea; /* 20190912: map_gx1v6T_to_1x1_bilin.nc and map_0.1T_tripole_to_0.1x0.1_bilin.nc store "fracarea" in normalization attribute. I think NCAR created both maps for POP, probably by running ERWG with option --norm_type=fracarea. Hence "fracarea" seems to be the NCAR-way of guaranteeing that ESMF re-normalization is not performed by default. */ if(strstr(att_val,"destarea")) nco_rgr_nrm_typ=nco_rgr_nrm_destarea; /* ESMF conserve "aave" and bilinear "bilin" generate "destarea" by default */ if(strstr(att_val,"none")) nco_rgr_nrm_typ=nco_rgr_nrm_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* 20150712: Tempest does not store a normalization attribute 20170620: ESMF weight_only does not store a normalization attribute 20190312: NCO does not yet store a normalization attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_unknown || nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) nco_rgr_nrm_typ=nco_rgr_nrm_unknown; } /* endif normalization */ assert(nco_rgr_nrm_typ != nco_rgr_nrm_nil); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); cnv_sng=strdup("map_method"); nco_rgr_mth_typ_enm nco_rgr_mth_typ=nco_rgr_mth_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strcasestr(att_val,"Conservative")) nco_rgr_mth_typ=nco_rgr_mth_conservative; if(strcasestr(att_val,"Bilinear")) nco_rgr_mth_typ=nco_rgr_mth_bilinear; if(strcasestr(att_val,"none")) nco_rgr_mth_typ=nco_rgr_mth_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* NCO and Tempest do not store a map_method attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_unknown) nco_rgr_mth_typ=nco_rgr_mth_unknown; } /* endif */ assert(nco_rgr_mth_typ != nco_rgr_mth_nil); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s regridding input metadata and grid sizes: ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"mapfile_generator = %s, map_method = %s, normalization = %s, src_grid_size = n_a = %li, dst_grid_size = n_b = %li, src_grid_corners = nv_a = %li, dst_grid_corners = nv_b = %li, src_grid_rank = %li, dst_grid_rank = %li, num_links = n_s = %li, num_wgts = %li\n",nco_rgr_mpf_sng(nco_rgr_mpf_typ),nco_rgr_mth_sng(nco_rgr_mth_typ),nco_rgr_nrm_sng(nco_rgr_nrm_typ),mpf.src_grid_size,mpf.dst_grid_size,mpf.src_grid_corners,mpf.dst_grid_corners,mpf.src_grid_rank,mpf.dst_grid_rank,mpf.num_links,mpf.num_wgts); } /* endif dbg */ /* 20190726: Allow normalization type to be "none" for bilinear regridding which UKMO SCRIP files set to "none"*/ if(nco_rgr_mth_typ == nco_rgr_mth_conservative && nco_rgr_nrm_typ == nco_rgr_nrm_none){ (void)fprintf(stdout,"%s: ERROR %s reports requested normalization type = %s is not yet supported. Specifically, masks specified by a mask variable (dst_grid_imask,mask_b) are ignored. More specifically, any destination mask information is assumed to be built into the weight array so that no source points will contribute to masked locations. Talk to Charlie if you want this changed.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); nco_exit(EXIT_FAILURE); } /* !msk */ /* Got to here in bullet-proofing code for weight-only map-files */ if(nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) (void)fprintf(stderr,"%s: WARNING %s reached end of ESMF_weight_only section\n",nco_prg_nm_get(),fnc_nm); assert(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only); /* Set type of grid conversion */ if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_1D_to_1D; if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_1D_to_2D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_2D_to_1D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_2D_to_2D; assert(nco_rgr_typ != nco_rgr_grd_nil); /* Save typing later */ nco_bool flg_grd_in_1D=False; nco_bool flg_grd_in_2D=False; nco_bool flg_grd_out_1D=False; nco_bool flg_grd_out_2D=False; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_1D_to_2D) flg_grd_in_1D=True; if(nco_rgr_typ == nco_rgr_grd_2D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_in_2D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_1D) flg_grd_out_1D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_2D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_out_2D=True; int dmn_nbr_hrz_crd; /* [nbr] Number of horizontal dimensions in output grid */ if(flg_grd_out_2D) dmn_nbr_hrz_crd=2; else dmn_nbr_hrz_crd=1; /* Obtain grid values necessary to compute output latitude and longitude coordinates */ int area_dst_id; /* [id] Area variable ID */ int col_src_adr_id; /* [id] Source address (col) variable ID */ int dmn_sz_in_int_id; /* [id] Source grid dimension sizes ID */ int dmn_sz_out_int_id; /* [id] Destination grid dimension sizes ID */ int dst_grd_crn_lat_id; /* [id] Destination grid corner latitudes variable ID */ int dst_grd_crn_lon_id; /* [id] Destination grid corner longitudes variable ID */ int dst_grd_ctr_lat_id; /* [id] Destination grid center latitudes variable ID */ int dst_grd_ctr_lon_id; /* [id] Destination grid center longitudes variable ID */ int frc_dst_id; /* [id] Fraction variable ID */ int msk_dst_id=NC_MIN_INT; /* [id] Mask variable ID */ int row_dst_adr_id; /* [id] Destination address (row) variable ID */ int wgt_raw_id; /* [id] Remap matrix variable ID */ switch(nco_rgr_mpf_typ){ /* Obtain fields whose name depends on mapfile type */ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_varid(in_id,"dst_grid_area",&area_dst_id); /* ESMF: area_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lon",&dst_grd_ctr_lon_id); /* ESMF: xc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lat",&dst_grd_ctr_lat_id); /* ESMF: yc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lon",&dst_grd_crn_lon_id); /* ESMF: xv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lat",&dst_grd_crn_lat_id); /* ESMF: yv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_frac",&frc_dst_id); /* ESMF: frac_b */ rcd+=nco_inq_varid(in_id,"dst_address",&row_dst_adr_id); /* ESMF: row */ rcd+=nco_inq_varid(in_id,"src_address",&col_src_adr_id); /* ESMF: col */ rcd+=nco_inq_varid(in_id,"remap_matrix",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_ESMF_weight_only: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_varid(in_id,"area_b",&area_dst_id); /* SCRIP: dst_grid_area */ rcd+=nco_inq_varid(in_id,"xc_b",&dst_grd_ctr_lon_id); /* SCRIP: dst_grid_center_lon */ rcd+=nco_inq_varid(in_id,"yc_b",&dst_grd_ctr_lat_id); /* SCRIP: dst_grid_center_lat */ rcd+=nco_inq_varid(in_id,"xv_b",&dst_grd_crn_lon_id); /* SCRIP: dst_grid_corner_lon */ rcd+=nco_inq_varid(in_id,"yv_b",&dst_grd_crn_lat_id); /* SCRIP: dst_grid_corner_lat */ rcd+=nco_inq_varid(in_id,"frac_b",&frc_dst_id); /* SCRIP: dst_grid_frac */ } /* !nco_rgr_mpf_ESMF_weight_only */ rcd+=nco_inq_varid(in_id,"row",&row_dst_adr_id); /* SCRIP: dst_address */ rcd+=nco_inq_varid(in_id,"col",&col_src_adr_id); /* SCRIP: src_address */ rcd+=nco_inq_varid(in_id,"S",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; default: (void)fprintf(stderr,"%s: ERROR %s unknown map file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Obtain fields whose presence depends on mapfile type */ nco_bool flg_msk_out=rgr->flg_msk_out; /* [flg] Add mask to output */ msk_dst_id=NC_MIN_INT; if(flg_msk_out){ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_ESMF: case nco_rgr_mpf_NCO: rcd+=nco_inq_varid(in_id,"mask_b",&msk_dst_id); /* SCRIP: dst_grid_imask */ break; case nco_rgr_mpf_SCRIP: rcd+=nco_inq_varid(in_id,"dst_grid_imask",&msk_dst_id); /* ESMF: mask_b */ break; case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: /* 20190315: TempestRemap did not propagate mask_b (or mask_a) until ~201902 */ rcd+=nco_inq_varid_flg(in_id,"mask_b",&msk_dst_id); if(rcd == NC_ENOTVAR){ (void)fprintf(stderr,"%s: INFO %s reports map-file lacks mask_b. %sContinuing anyway without masks...\n",nco_prg_nm_get(),fnc_nm,(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest) ? "Probably this TempestRemap map-file was created before ~201902 when TR began to propagate mask_a/b variables." : ""); } /* !rcd */ rcd=NC_NOERR; break; default: (void)fprintf(stderr,"%s: ERROR %s unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); } /* !nco_rgr_mpf_typ */ if(msk_dst_id == NC_MIN_INT) flg_msk_out=False; } /* !flg_msk_out */ /* Obtain fields whose names are independent of mapfile type */ rcd+=nco_inq_varid(in_id,"src_grid_dims",&dmn_sz_in_int_id); rcd+=nco_inq_varid(in_id,"dst_grid_dims",&dmn_sz_out_int_id); int lon_psn_src; /* [idx] Ordinal position of longitude in rectangular source grid dimension-size array */ int lat_psn_src; /* [idx] Ordinal position of latitude in rectangular source grid dimension-size array */ int lon_psn_dst=int_CEWI; /* [idx] Ordinal position of longitude in rectangular destination grid dimension-size array */ int lat_psn_dst=int_CEWI; /* [idx] Ordinal position of latitude in rectangular destination grid dimension-size array */ if(flg_grd_in_2D){ lon_psn_src=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn_src=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ /* Until 20150814, Tempest stored [src/dst]_grid_dims as [lat,lon] unlike SCRIP's [lon,lat] order Newer behavior follows SCRIP [lon,lat] order Challenge: Support both older and newer Tempest mapfiles Tempest (unlike SCRIP and ESMF) annotates mapfile [src/dst]_grid_dims with attributes that identify axis to which each element of [src/dst]_grid_dims refers Solution: Use Tempest mapfile [src/dst]_grid_dims attributes "name0" and/or "name1" to determine if axes' positions follow old order */ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_src=1; lat_psn_src=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_in_2D */ if(flg_grd_out_2D){ lon_psn_dst=0; lat_psn_dst=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_dst=1; lat_psn_dst=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_out_2D */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ double *area_out; /* [sr] Area of destination grid */ double *frc_out=NULL; /* [frc] Fraction of destination grid */ double *lat_bnd_out=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular destination grid */ double *lat_crn_out=NULL; /* [dgr] Latitude corners of rectangular destination grid */ double *lat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of rectangular destination grid */ double *lat_ntf_out=NULL; /* [dgr] Latitude interfaces of rectangular destination grid */ double *lat_wgt_out=NULL; /* [dgr] Latitude weights of rectangular destination grid */ double *lon_bnd_out=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular destination grid */ double *lon_crn_out=NULL; /* [dgr] Longitude corners of rectangular destination grid */ double *lon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of rectangular destination grid */ double *lon_ntf_out=NULL; /* [dgr] Longitude interfaces of rectangular destination grid */ double *slat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of staggered FV destination grid */ double *slat_wgt_out=NULL_CEWI; /* [frc] Latitude weights of staggered FV destination grid */ double *slon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of staggered FV destination grid */ double *wgt_raw; /* [frc] Remapping weights */ int *col_src_adr; /* [idx] Source address (col) */ int *row_dst_adr; /* [idx] Destination address (row) */ int *msk_out=NULL; /* [flg] Mask on destination grid */ int *dmn_sz_in_int; /* [nbr] Array of dimension sizes of source grid */ int *dmn_sz_out_int; /* [nbr] Array of dimension sizes of destination grid */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_cnt=NULL; long *dmn_srt=NULL; long *dmn_srd=NULL; long idx; /* [idx] Counting index for unrolled grids */ /* Allocate space to hold dimension metadata for destination grid */ dmn_srt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srd=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srt[0]=0L; dmn_cnt[0]=mpf.src_grid_rank; dmn_sz_in_int=(int *)nco_malloc(mpf.src_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_in_int_id,dmn_srt,dmn_cnt,dmn_sz_in_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.dst_grid_rank; dmn_sz_out_int=(int *)nco_malloc(mpf.dst_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_out_int_id,dmn_srt,dmn_cnt,dmn_sz_out_int,(nc_type)NC_INT); /* Check-for and workaround faulty Tempest and MPAS-O/I grid sizes */ if(flg_grd_in_1D && (mpf.src_grid_size != dmn_sz_in_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports input grid dimension sizes disagree: mpf.src_grid_size = %ld != %d = dmn_sz_in[0]. Problem may be caused by incorrect src_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.src_grid_size,dmn_sz_in_int[0]); dmn_sz_in_int[0]=mpf.src_grid_size; } /* !bug */ if(flg_grd_out_1D && (mpf.dst_grid_size != dmn_sz_out_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports output grid dimension sizes disagree: mpf.dst_grid_size = %ld != %d = dmn_sz_out[0]. Problem may be caused by incorrect dst_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.dst_grid_size,dmn_sz_out_int[0]); dmn_sz_out_int[0]=mpf.dst_grid_size; } /* !bug */ long col_nbr_in; /* [idx] Number of columns in source grid */ long lon_nbr_in; /* [idx] Number of longitudes in rectangular source grid */ long lat_nbr_in; /* [idx] Number of latitudes in rectangular source grid */ const size_t grd_sz_in=mpf.src_grid_size; /* [nbr] Number of elements in single layer of input grid */ const size_t grd_sz_out=mpf.dst_grid_size; /* [nbr] Number of elements in single layer of output grid */ if(flg_grd_in_1D){ col_nbr_in=dmn_sz_in_int[0]; lon_nbr_in=dmn_sz_in_int[0]; lat_nbr_in=dmn_sz_in_int[0]; }else if(flg_grd_in_2D){ col_nbr_in=0; lon_nbr_in=dmn_sz_in_int[lon_psn_src]; lat_nbr_in=dmn_sz_in_int[lat_psn_src]; /* Sanity-check */ assert(lat_nbr_in*lon_nbr_in == (long)grd_sz_in); } /* !src_grid_rank */ const int bnd_tm_nbr_out=2; /* [nbr] Number of boundaries for output time */ int bnd_nbr_out=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */ long col_nbr_out=long_CEWI; /* [nbr] Number of columns in destination grid */ long lon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in rectangular destination grid */ long lat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in rectangular destination grid */ long slat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in staggered FV grid destination grid */ long slon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in staggered FV grid destination grid */ if(flg_grd_out_1D){ bnd_nbr_out=mpf.dst_grid_corners; col_nbr_out=dmn_sz_out_int[0]; lat_nbr_out=dmn_sz_out_int[0]; lon_nbr_out=dmn_sz_out_int[0]; /* Sanity-check */ assert(col_nbr_out == (long)grd_sz_out); }else if(flg_grd_out_2D){ col_nbr_out=lat_nbr_out*lon_nbr_out; lat_nbr_out=dmn_sz_out_int[lat_psn_dst]; lon_nbr_out=dmn_sz_out_int[lon_psn_dst]; slat_nbr_out=lat_nbr_out-1L; slon_nbr_out=lon_nbr_out; /* Sanity-check */ assert(lat_nbr_out*lon_nbr_out == (long)grd_sz_out); } /* !dst_grid_rank */ /* Ensure coordinates are in degrees not radians for simplicity and CF-compliance NB: ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc has [xy]?_a in degrees and [xy]?_b in radians! */ nco_bool flg_crd_rdn=False; /* [flg] Destination coordinates are in radians not degrees */ char unt_sng[]="units"; /* [sng] netCDF-standard units attribute name */ att_val=nco_char_att_get(in_id,dst_grd_ctr_lat_id,unt_sng); if(att_val){ /* Match "radian" and "radians" */ if(strstr(att_val,"radian")) flg_crd_rdn=True; if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ nco_bool flg_grd_out_crv=False; /* [flg] Curvilinear coordinates */ nco_bool flg_grd_out_rct=False; /* [flg] Rectangular coordinates */ const nc_type crd_typ_out=NC_DOUBLE; if(flg_grd_out_2D){ lon_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_crn_out,crd_typ_out); /* User may specify curvilinear grid (with --rgr crv). Otherwise, manually test for curvilinear source grid. */ flg_grd_out_crv=rgr->flg_crv; /* [flg] Curvilinear coordinates */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid specified to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); }else{ long idx_tst=long_CEWI; /* [idx] Index of first latitude or longitude */ for(idx=0;idx<(long)grd_sz_out;idx++){ if(idx%lon_nbr_out == 0) idx_tst=idx; if(lat_ctr_out[idx] != lat_ctr_out[idx_tst]) break; // (void)fprintf(stdout,"%s: DEBUG lat_ctr_out[%li] = %g, lat_ctr_out[%li] = %g\n",nco_prg_nm_get(),idx,lat_ctr_out[idx],idx_tst,lat_ctr_out[idx_tst]); /* fxm: also test lon */ } /* !rectangular */ if(idx != (long)grd_sz_out) flg_grd_out_crv=True; else flg_grd_out_rct=True; if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Output grid detected to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); } /* !flg_grd_out_crv */ if(flg_grd_out_crv) bnd_nbr_out=mpf.dst_grid_corners; if(flg_grd_out_rct) bnd_nbr_out=2; /* NB: Assumes rectangular latitude and longitude and is invalid for other quadrilaterals */ } /* !flg_grd_out_2D */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s grid conversion type = %s with expected input and prescribed output grid sizes: ",nco_prg_nm_get(),fnc_nm,nco_rgr_grd_sng(nco_rgr_typ)); (void)fprintf(stderr,"lat_in = %li, lon_in = %li, col_in = %li, lat_out = %li, lon_out = %li, col_out = %li\n",lat_nbr_in,lon_nbr_in,col_nbr_in,lat_nbr_out,lon_nbr_out,col_nbr_out); } /* endif dbg */ /* Allocate space for and obtain coordinates */ if(flg_grd_out_1D){ lon_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); lon_ctr_out=(double *)nco_malloc(lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lat_nbr_out*nco_typ_lng(crd_typ_out)); lat_wgt_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_ntf_out=(double *)nco_malloc((lon_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lat_ntf_out=(double *)nco_malloc((lat_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(lon_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(lat_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_rct */ /* Arrays unroll into all longitudes for first latitude, then second latitude, ... Obtain longitudes by reading first block contiguously (unstrided) Obtain latitudes by reading unrolled data with stride of lon_nbr */ if(flg_grd_out_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_bnd_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_bnd_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0;idx<col_nbr_out;idx++){ lon_ctr_out[idx]*=rdn2dgr; lat_ctr_out[idx]*=rdn2dgr; } /* !idx */ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++){ lon_bnd_out[idx]*=rdn2dgr; lat_bnd_out[idx]*=rdn2dgr; } /* !idx */ } /* !rdn */ /* Is 1D interface information usable? Yes, unless if all interfaces are zeros NB: fxm Better algorithm for "usable" is that not all interfaces in any cell are equal */ flg_bnd_1D_usable=True; for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lon_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out){ flg_bnd_1D_usable=False; }else{ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lat_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out) flg_bnd_1D_usable=False; } /* !usable */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr_out;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr_out;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ /* fxm: sub-sample these from the already-read ctr/crn arrays */ dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; rcd=nco_get_vars(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_ctr_out,crd_typ_out); dmn_srt[0L]=dmn_srt[1]=0L; dmn_cnt[0L]=lon_nbr_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; dmn_srt[1]=0L; dmn_cnt[1]=mpf.dst_grid_corners; dmn_srd[1]=1L; rcd=nco_get_vars(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_crn_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0L;idx<lon_nbr_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lon_nbr_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_rct */ if(flg_grd_out_crv){ if(flg_crd_rdn){ for(idx=0L;idx<(long)grd_sz_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_crv */ /* Allocate space for and obtain area, fraction, and mask, which are needed for both 1D and 2D grids */ area_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,area_dst_id,dmn_srt,dmn_cnt,area_out,crd_typ_out); frc_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,frc_dst_id,dmn_srt,dmn_cnt,frc_out,crd_typ_out); if(msk_dst_id != NC_MIN_INT){ msk_out=(int *)nco_malloc(grd_sz_out*nco_typ_lng(NC_INT)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,msk_dst_id,dmn_srt,dmn_cnt,msk_out,(nc_type)NC_INT); } /* !msk */ /* Derive 2D interface boundaries from lat and lon grid-center values NB: Procedures to derive interfaces from midpoints on rectangular grids are theoretically possible However, ESMF often outputs interfaces values (e.g., yv_b) for midpoint coordinates (e.g., yc_b) For example, ACME standard map from ne120np4 to 181x360 has yc_b[0] = yv_b[0] = -90.0 Latitude = -90 is, by definition, not a midpoint coordinate This appears to be an artifact of the non-physical representation of the FV grid, i.e., a grid center located at the pole where longitudes collapse in the model, but cannot be represented as collapsed on a rectangular 2D grid with non-zero areas. Unfortunately, ESMF supports this nonsense by labeling the grid center as at the pole so that applications can easily diagnose an FV grid when they read-in datasets. A superior application could diagnose FV just fine from actual non-polar gridcell centers Maybe ESMF could introduce a flag or something to indicate/avoid this special case? Safer to read boundary interfaces directly from grid corner/vertice arrays in map file Derivation of boundaries xv_b, yv_b from _correct_ xc_b, yc_b is follows Do not implement this procedure until resolving midpoint/center issue described above: lon_ntf_out[0L]=0.5*(lon_ctr_out[0L]+lon_ctr_out[lon_nbr_out-1L])-180.0; // Extrapolation lat_ntf_out[0L]=lat_ctr_out[0L]-0.5*(lat_ctr_out[1L]-lat_ctr_out[0L]); // Extrapolation for(idx=1L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=0.5*(lon_ctr_out[idx-1L]+lon_ctr_out[idx]); for(idx=1L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=0.5*(lat_ctr_out[idx-1L]+lat_ctr_out[idx]); lon_ntf_out[lon_nbr_out]=lon_ntf_out[0L]+360.0; lat_ntf_out[lat_nbr_out]=lat_ctr_out[lat_nbr_out-1L]+0.5*(lat_ctr_out[lat_nbr_out-1L]-lat_ctr_out[lat_nbr_out-2L]); */ if(flg_grd_out_rct){ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ if(lat_ctr_out[1L] < lat_ctr_out[0L]) flg_s2n=False; /* Obtain 1-D rectangular interfaces from unrolled 1-D vertice arrays */ for(idx=0L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=lon_crn_out[mpf.dst_grid_corners*idx]; lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-(mpf.dst_grid_corners-1L)]; lon_spn=lon_ntf_out[lon_nbr_out]-lon_ntf_out[0L]; for(idx=0L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=lat_crn_out[mpf.dst_grid_corners*idx]; lat_ntf_out[lat_nbr_out]=lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L]; lat_spn=fabs(lat_ntf_out[lat_nbr_out]-lat_ntf_out[0L]); /* Place 1-D rectangular interfaces into 2-D coordinate boundaries */ for(idx=0L;idx<lon_nbr_out;idx++){ lon_bnd_out[2L*idx]=lon_ntf_out[idx]; lon_bnd_out[2L*idx+1L]=lon_ntf_out[idx+1L]; } /* end loop over longitude */ for(idx=0L;idx<lat_nbr_out;idx++){ lat_bnd_out[2L*idx]=lat_ntf_out[idx]; lat_bnd_out[2L*idx+1L]=lat_ntf_out[idx+1L]; } /* end loop over latitude */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2L*idx],lon_ctr_out[idx],lon_bnd_out[2L*idx+1L]); for(idx=0L;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2L*idx],lat_ctr_out[idx],lat_bnd_out[2L*idx+1L]); } /* endif dbg */ /* Global or regional grid? */ nco_grd_xtn_enm nco_grd_xtn; /* [enm] Extent of grid */ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; /* Diagnose type of latitude output grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf_out[0L]+lat_spn*1.5/lat_nbr_out; else lat_ctr_tst_eqa=lat_ntf_out[0L]-lat_spn*1.5/lat_nbr_out; if(flg_s2n) lat_ctr_tst_fv=lat_ntf_out[0L]+lat_spn/(lat_nbr_out-1L); else lat_ctr_tst_fv=lat_ntf_out[0L]-lat_spn/(lat_nbr_out-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement to slightly worse than single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps, and NCL-generated Gaussian grids for CESM, are accurate to at most ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 is worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6532 */ if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_eqa) nco_grd_lat_typ=nco_grd_lat_eqa; if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_fv) nco_grd_lat_typ=nco_grd_lat_fv; double *wgt_Gss_out=NULL; // [frc] Gaussian weights double precision if(nco_grd_lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ double *lat_sin_out; // [frc] Sine of Gaussian latitudes double precision lat_sin_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); wgt_Gss_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr_out,flg_s2n,lat_sin_out,wgt_Gss_out); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin_out[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stderr,"%s: INFO %s reports lat_ctr_out[1] = %g, lat_ctr_tst_gss = %g\n",nco_prg_nm_get(),fnc_nm,lat_ctr_out[1L],lat_ctr_tst_gss); if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_gss) nco_grd_lat_typ=nco_grd_lat_gss; if(lat_sin_out) lat_sin_out=(double *)nco_free(lat_sin_out); } /* !Gaussian */ if(nco_grd_lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ nco_grd_lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(nco_grd_lat_typ == nco_grd_lat_unk) nco_grd_2D_typ=nco_grd_2D_unk; else if(nco_grd_lat_typ == nco_grd_lat_gss) nco_grd_2D_typ=nco_grd_2D_gss; else if(nco_grd_lat_typ == nco_grd_lat_fv) nco_grd_2D_typ=nco_grd_2D_fv; else if(nco_grd_lat_typ == nco_grd_lat_eqa) nco_grd_2D_typ=nco_grd_2D_eqa; else assert(False); if(nco_grd_lon_typ == nco_grd_lon_nil){ /* NB: Longitude grid diagnosis is susceptible to mistakes when input mapfile embeds common faulty grids, e.g., ACME *150418* FV maps map_ne30np4_to_fv129x256_aave.150418.nc is diagnosed as regional grid of unknown type because of input grid flaws map_ne30np4_to_fv129x256_aave.20150901.nc is (correctly) diagnosed as global grid of with lon_Grn_ctr */ if( (float)lon_ctr_out[0L] == 0.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr_out[0L] == -180.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf_out[0L] == 0.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf_out[0L] == -180.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_bb; else nco_grd_lon_typ=nco_grd_lon_unk; } /* !nco_grd_lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(nco_grd_lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(nco_grd_lon_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_ctr_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slat_wgt_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slon_ctr_out=(double *)nco_malloc(slon_nbr_out*nco_typ_lng(crd_typ_out)); for(idx=0L;idx<slat_nbr_out;idx++){ slat_ctr_out[idx]=lat_ntf_out[idx+1L]; slat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_ctr_out[idx+1L])-sin(dgr2rdn*lat_ctr_out[idx])); /* fabs() ensures positive area in n2s grids */ } /* !lat_nbr_out */ for(idx=0L;idx<slon_nbr_out;idx++){ slon_ctr_out[idx]=lon_ntf_out[idx]; } /* !lat_nbr_out */ } /* !nco_grd_lat_fv */ switch(nco_grd_lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_bnd_out[2*idx+1L])-sin(dgr2rdn*lat_bnd_out[2*idx])); /* fabs() ensures positive area in n2s grids */ break; case nco_grd_lat_gss: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=wgt_Gss_out[idx]; if(wgt_Gss_out) wgt_Gss_out=(double *)nco_free(wgt_Gss_out); break; case nco_grd_lat_unk: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=0.0; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown output latitude grid-type. Unable to guess what latitude weights should be.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* end nco_grd_lat_typ switch */ /* Fuzzy test of latitude weight normalization */ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd_out[2L*(lat_nbr_out-1L)+1L])-sin(dgr2rdn*lat_bnd_out[0L])); /* fabs() ensures positive area in n2s grids */ if(nco_grd_lat_typ != nco_grd_lat_unk){ assert(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc < eps_rlt); if(lat_wgt_ttl_xpc < 0.0) abort(); /* CEWI Use lat_wgt_ttl_xpc at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ } /* !nco_grd_lat_unk */ } /* !flg_grd_out_rct */ /* When possible, ensure area_out is non-zero 20150722: ESMF documentation says "The grid area array is only output when the conservative remapping option is used" Actually, ESMF does (always?) output area, but area == 0.0 unless conservative remapping is used 20150721: ESMF bilinear interpolation map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc has area == 0.0 20150710: Tempest regionally refined grids like bilinearly interpolated CONUS for ACME RRM has area_out == 0 20150821: ESMF always outputs area_out == 0.0 for bilinear interpolation Check whether NCO must diagnose and provide its own area_out */ /* If area_out contains any zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] == 0.0) break; if(idx != (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid detected with zero-valued output area(s) at idx = %ld (and likely others, too).\n",nco_prg_nm_get(),idx); } /* !zero */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports area_out from mapfile is everywhere zero. This is expected for bilinearly interpolated output maps produced by ESMF_RegridWeightGen. ",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_2D && flg_grd_out_rct && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for rectangular output grids from a formula that assumes that cell boundaries follow arcs of constant latitude and longitude. This differs from the area of cells with boundaries that follow great circle arcs (used by, e.g., ESMF_RegridWeightGen and TempestRemap). Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_2D && flg_grd_out_crv && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for curvilinear output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMF_RegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable name \"%s\") from the destination gridcell boundaries. NCO diagnoses spherical polygon area for unstructured output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMFRegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else{ /* !1D */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"However, NCO cannot find enough boundary information, or it is too stupid about spherical trigonometry, to diagnose area_out. NCO will output an area variable (named \"%s\") copied from the input mapfile. This area will be everywhere zero.\n",rgr->area_nm); } /* !2D */ } /* !area */ if(flg_dgn_area_out){ if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for 1D grid\n"); /* Area of unstructured grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_bnd_out,lon_bnd_out,col_nbr_out,bnd_nbr_out,area_out); } /* !1D */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for curvilinear grid\n"); /* Area of curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_crn_out,lon_crn_out,grd_sz_out,bnd_nbr_out,area_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct && nco_grd_2D_typ != nco_grd_2D_unk){ /* Mr. Enenstein and George O. Abell taught me the area of spherical zones Spherical zone area is exact and faithful to underlying rectangular equi-angular grid However, ESMF and Tempest approximate spherical polygons as connected by great circle arcs fxm: Distinguish spherical zone shapes (e.g., equi-angular) from great circle arcs (e.g., unstructured polygons) */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out[lat_idx*lon_nbr_out+lon_idx]=fabs(dgr2rdn*(lon_bnd_out[2*lon_idx+1]-lon_bnd_out[2*lon_idx])*(sin(dgr2rdn*lat_bnd_out[2*lat_idx+1])-sin(dgr2rdn*lat_bnd_out[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !spherical zones */ } /* !flg_dgn_area_out */ if(rgr->tst == -1){ /* Passing --rgr tst=-1 causes regridder to fail here This failure should cause host climo script to abort */ (void)fprintf(stdout,"%s: ERROR %s reports regridder instructed to fail here. This tests failure mode in climo scripts...\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !tst */ /* Verify frc_out is sometimes non-zero ESMF: "The grid frac arrays (frac_a and frac_b) are calculated by ESMF_RegridWeightGen. For conservative remapping, the grid frac array returns the area fraction of the grid cell which participates in the remapping. For bilinear and patch remapping, the destination grid frac array [frac_b] is one where the grid point participates in the remapping and zero otherwise. For bilinear and patch remapping, the source grid frac array is always set to zero." SCRIP: Similar to ESMF For both ESMF+SCRIP frac_[ab] are computed by the weight-generation algorithm and are not specified as part of the input grids How does an input ocean grid indicate that, say, half the gridcell is land and half ocean? Does it use the area variable to tell the weight generation algorithm that a gridcell is fractional? In other words does it use grid_imask=1 and grid_area=0.5*full_gridcell_area and, e.g., T=273.0? */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ (void)fprintf(stdout,"%s: ERROR %s reports frc_out == frac_b contains all zeros\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !always zero */ /* Test whether frc_out is ever zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] == 0.0) break; if(nco_dbg_lvl_get() >= nco_dbg_std) if(idx != (long)grd_sz_out) (void)fprintf(stdout,"%s: INFO %s reports frc_out == frac_b contains zero-elements (e.g., at 1D idx=%ld)\n",nco_prg_nm_get(),fnc_nm,idx); /* Normalizing by frc_out is redundant iff frc_out == 1.0, so we can save time without sacrificing accuracy However, frc_out is often (e.g., for CS <-> RLL maps) close but not equal to unity (ESMF_RegridWeightGen issue?) Hence, decide whether to normalize by frc_out by diagnosing the furthest excursion of frc_out from unity */ nco_bool flg_frc_out_one=True; /* [flg] Destination gridcell fraction frc_out == frac_b is in [1-epsilon,frc_out,1+epsilon] */ nco_bool flg_frc_out_wrt=False; /* [flg] Write destination gridcell fraction frc_out == frac_b to regridded files */ double frc_out_dff_one; /* [frc] Deviation of frc_out from 1.0 */ double frc_out_dff_one_max=0.0; /* [frc] Maximum deviation of frc_out from 1.0 */ long idx_max_dvn; /* [idx] Index of maximum deviation from 1.0 */ for(idx=0;idx<(long)grd_sz_out;idx++){ frc_out_dff_one=fabs(frc_out[idx]-1.0); if(frc_out_dff_one > frc_out_dff_one_max){ frc_out_dff_one_max=frc_out_dff_one; idx_max_dvn=idx; } /* !max */ } /* !idx */ if(frc_out_dff_one_max > eps_rlt) flg_frc_out_one=False; nco_bool flg_frc_nrm=False; /* [flg] Must normalize by frc_out == frac_b because frc_out is not always unity and specified normalization is destarea or none */ if(!flg_frc_out_one && /* If fraction is sometimes "far" from 1.0 and ... */ ((nco_rgr_mpf_typ == nco_rgr_mpf_ESMF && nco_rgr_mth_typ == nco_rgr_mth_conservative && (nco_rgr_nrm_typ == nco_rgr_nrm_destarea || nco_rgr_nrm_typ == nco_rgr_nrm_none)) || /* ESMF map-file specifies conservative regridding with "destarea" or "none" or ... */ (nco_rgr_mpf_typ != nco_rgr_mpf_ESMF)) /* 20191003: Weight-generator does not adhere to ESMF "normalization type" convention */ && True){ flg_frc_nrm=True; /* Avoid writing frc_out unless discrepancies are particularly egregious Otherwise would frc_out for standard remaps like ne30->fv129x256 for which eps=2.46e-13 */ double eps_rlt_wrt_thr=3.0e-13; /* 20181104: Never write frac_b for CMIP6! */ /* if(frc_out_dff_one_max > eps_rlt_wrt_thr) flg_frc_out_wrt=True; */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s reports global metadata specifies conservative remapping with normalization of type = %s. Furthermore, destination fractions frc_dst = dst_frac = frac_b = frc_out contain non-unity elements (maximum deviation from unity of %g exceeds hard-coded (in variable eps_rlt) relative-epsilon threshold of %g for frc_out[%ld] = %g). Thus normalization issues will be explicitly treated. Will apply \'destarea\' normalization (i.e., divide by non-zero frc_out[dst_idx]) to all regridded arrays.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ),frc_out_dff_one_max,eps_rlt,idx_max_dvn,frc_out[idx_max_dvn]); if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_out_wrt) (void)fprintf(stdout,"%s: INFO %s Maximum deviation %g exceeds threshold of %g that triggers automatic writing of fractional destination area as variable named frac_b in regridded output.\n",nco_prg_nm_get(),fnc_nm,frc_out_dff_one_max,eps_rlt_wrt_thr); } /* !sometimes non-unity */ if(flg_frc_nrm && rgr->flg_rnr){ // 20190918: Weaken warning because NCO no longer renormalizes when using "destarea" maps unless specifically requested to with --rnr_thr (void)fprintf(stdout,"%s: INFO %s reports manual request to renormalize fields to preserve mean-values (rather than integral values) in destination gridcells incompletely covered by valid data in source gridcells (i.e., non-unity frc_dst = dst_frac = frac_b)\n",nco_prg_nm_get(),fnc_nm); //(void)fprintf(stdout,"%s: INFO %s reports manual request (with --rnr) to renormalize fields with non-unity frc_dst = dst_frac = frac_b at same time global metadata specifies normalization type = %s. Normalizing twice can be an error, depending on intent of each. Charlie is all ears on how NCO should handle this :)\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); //nco_exit(EXIT_FAILURE); } /* !flg_rnr */ /* Detailed summary of 2D grids now available including quality-checked coordinates and area */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_out_ttl=0.0; if(flg_grd_out_rct){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(idx=0;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; } /* !flg_grd_out_rct */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out_ttl+=area_out[lat_idx*lon_nbr_out+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_out_ttl,area_out_ttl/(4.0*M_PI)); if(flg_grd_out_rct){ for(idx=0;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2*idx],lon_ctr_out[idx],lon_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2*idx],lat_ctr_out[idx],lat_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li], wgt[%li] = %20.15f, %20.15f\n",idx,idx,lat_ctr_out[idx],lat_wgt_out[idx]); } /* !flg_grd_out_rct */ if(nco_dbg_lvl_get() > nco_dbg_crr) for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) (void)fprintf(stdout,"lat[%li] = %g, lon[%li] = %g, area[%li,%li] = %g\n",lat_idx,lat_ctr_out[lat_idx],lon_idx,lon_ctr_out[lon_idx],lat_idx,lon_idx,area_out[lat_idx*lon_nbr_out+lon_idx]); assert(area_out_ttl > 0.0); assert(area_out_ttl <= 4.0*M_PI); } /* endif dbg */ /* Allocate space for and obtain weights and addresses */ wgt_raw=(double *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_DOUBLE),fnc_nm,"Unable to malloc() value buffer for remapping weights"); col_src_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); row_dst_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); /* Obtain remap matrix addresses and weights from map file */ dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; rcd=nco_get_vara(in_id,col_src_adr_id,dmn_srt,dmn_cnt,col_src_adr,NC_INT); rcd=nco_get_vara(in_id,row_dst_adr_id,dmn_srt,dmn_cnt,row_dst_adr,NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; if(nco_rgr_mpf_typ != nco_rgr_mpf_SCRIP){ rcd=nco_get_vara(in_id,wgt_raw_id,dmn_srt,dmn_cnt,wgt_raw,NC_DOUBLE); }else{ /* SCRIP mapfiles store 2D weight array remap_matrix[num_links,num_wgts] Apply only first weight for first-order conservative accuracy (i.e., area overlap) Apply all three weights for second-order conservative accuracy (by including gradients from centroid to vertices) */ dmn_srd[0]=1L; dmn_srt[1]=0L; dmn_cnt[1]=1L; dmn_srd[1]=mpf.num_wgts; rcd=nco_get_vars(in_id,wgt_raw_id,dmn_srt,dmn_cnt,dmn_srd,wgt_raw,NC_DOUBLE); } /* !SCRIP */ /* Pre-subtract one from row/column addresses (stored, by convention, as Fortran indices) to optimize access with C indices */ size_t lnk_nbr; /* [nbr] Number of links */ size_t lnk_idx; /* [idx] Link index */ lnk_nbr=mpf.num_links; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) row_dst_adr[lnk_idx]--; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) col_src_adr[lnk_idx]--; if(nco_dbg_lvl_get() >= nco_dbg_io){ (void)fprintf(stdout,"idx row_dst col_src wgt_raw\n"); for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) (void)fprintf(stdout,"%li %d %d %g\n",lnk_idx,row_dst_adr[lnk_idx],col_src_adr[lnk_idx],wgt_raw[lnk_idx]); } /* endif dbg */ /* Free memory associated with input file */ if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_srd) dmn_srd=(long *)nco_free(dmn_srd); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to map file Below this line, fl_in and in_id refer to input file to be regridded */ /* Initialize */ in_id=rgr->in_id; out_id=rgr->out_id; /* Sanity check that input data file matches expectations from mapfile */ char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */ char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ /* 20160503 Discover coordinates via CF Convention if indicated This copies method used in nco_grd_nfr() */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=ALBDO_221_SFC_S113 --rgr grid=${HOME}/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ lat_nm_in=strdup(cf->dmn_nm[0]); lon_nm_in=strdup(cf->dmn_nm[1]); //lat_nm_in=strdup(cf->crd_nm[idx_lat]); //lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map-file not data-file) */ //lat_ctr_id=cf->crd_id[idx_lat]; //lon_ctr_id=cf->crd_id[idx_lon]; //lat_dmn_nm=strdup(cf->dmn_nm[0]); //lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has dimensions \"%s\" and \"%s\". Longitude coordinate \"%s\" has dimensions \"%s\" and \"%s\".\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon],cf->crd_nm[idx_lon],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); // if(foo) foo=(char *)nco_free(foo); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ if(flg_grd_in_1D){ long col_nbr_in_dat; /* [nbr] Number of columns in input datafile */ /* Check default or command-line option first, then search usual suspects, and if that fails then guess unstructured dimension is dimension in input file with size n_a expected by input map file, suggested by PJCS Using internal database names first ensures users can pick between multiple dimensions of size n_a 20180313: fxm New PJCS algorithm is superior, should eliminate internal database for unstructured grids? Database is necessary for 2D grids because otherwise no good way to disambiguate latitude from longitude */ if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ else if((rcd=nco_inq_dimid_flg(in_id,"nCells",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nCells"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"nEdges",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nEdges"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"sounding_id",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("sounding_id"); /* OCO2 */ /* 20180605: Database matches to above names may be false-positives ALM/CLM/CTSM/ELM store all possible dimension names that archived variables could use NCO only prints dimensions used in variables, while ncdump prints all dimensions From ncdump we find usually unused ALM/CLM/CTSM/ELM dimensions: gridcell, lndunit, column, pft, levurb, numrad, levsno Check that matched dimension has expected size: */ if(dmn_id_col != NC_MIN_INT){ rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in != col_nbr_in_dat){ dmn_id_col=NC_MIN_INT; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s database-prioritized unstructured dimension candidate \"%s\" has size not expected by supplied map-file: mapfile col_nbr_in = %ld != %ld = col_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nbr_in_dat); } /* !col_nbr_in */ }else{ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s expects data on an unstructured grid yet cannot find a dimension name that matches the usual suspects for unstructured dimensions (ncol, gridcell, lndgrid, nCells, nEdges, sounding_id). Consider specifying horizontal dimension name to ncks with \"--rgr col_nm=foo\" or to ncremap with \"ncremap -R '--rgr col_nm=foo'\", and consider requesting the NCO project to add this horizontal dimension name to its internal database.\n",nco_prg_nm_get(),fnc_nm); } /* !dmn_id_col */ if(dmn_id_col == NC_MIN_INT){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s Proceeding with fallback algorithm to guess unstructured dimension as first dimension in data file of equal size to that expected by supplied map-file...\n",nco_prg_nm_get(),fnc_nm); /* 20180312: Unstructured dimension must have same size as input map file, suggested by PJCS */ int *dmn_ids_in; /* [nbr] Input file dimension IDs */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ const int flg_prn=0; /* [enm] Parent flag */ rcd=nco_inq_dimids(in_id,&dmn_nbr_in,NULL,flg_prn); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); rcd=nco_inq_dimids(in_id,NULL,dmn_ids_in,flg_prn); /* Find dimension, if any, with same size as map "a" src_grid_dims[0] = n_a dimension */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_id_col=dmn_ids_in[dmn_idx]; rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in == col_nbr_in_dat){ rcd=nco_inq_dimname(in_id,dmn_id_col,col_nm_in); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s found that dimension %s in datafile has same size (n_a = %ld) expected by map-file. Assuming %s is the unstructured dimension.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nm_in); break; } /* !col_nbr_in */ } /* !dmn_idx */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_idx == dmn_nbr_in){ dmn_id_col=NC_MIN_INT; (void)fprintf(stdout,"%s: ERROR %s expects data on an unstructured grid but cannot find a dimension in the input file that matches the size of the unstructured dimension in the supplied map-file = src_grd_dims[0] = n_a = %ld. HINT: \n",nco_prg_nm_get(),fnc_nm,col_nbr_in); nco_exit(EXIT_FAILURE); } /* !dmn_idx */ } /* !col_nm_in */ } /* !1D */ if(flg_grd_in_2D){ long lat_nbr_in_dat; /* [nbr] Number of latitudes in input datafile */ if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lat"); else if((rcd=nco_inq_dimid_flg(in_id,"Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_dimid_flg(in_id,"south_north",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"south_north_stag",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"YDim:location",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"YDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"natrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("natrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nj",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nj"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nlat"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"nscan",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nscan"); /* AMSR, TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nTimes",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nTimes"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_lines",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("number_of_lines"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath:mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath_mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"CO_Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"j",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("j"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"latitude0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("x"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"y1",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"ygrid_0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s reports unable to find latitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input latitude dimension name with \"ncks --rgr lat_nm_in=name\" or \"ncremap -R '--rgr lat_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr_in_dat); if(lat_nbr_in != lat_nbr_in_dat){ (void)fprintf(stdout,"%s: ERROR %s reports mapfile and data file dimension sizes disagree: mapfile lat_nbr_in = %ld != %ld = lat_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lat_nbr_in,lat_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ long lon_nbr_in_dat; /* [nbr] Number of longitudes in input datafile */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lon"); else if((rcd=nco_inq_dimid_flg(in_id,"Longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_dimid_flg(in_id,"west_east",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"west_east_stag",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"XDim:location",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"XDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"ni",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("ni"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nlon"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"npix",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npix"); /* AMSR */ else if((rcd=nco_inq_dimid_flg(in_id,"npixel",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npixel"); /* TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nxtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nxtrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nXtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nXtrack"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_pixels",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("number_of_pixels"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath:mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath_mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"i",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("i"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"longitude0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("y"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"x1",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"xgrid_0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s reports unable to find longitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input longitude dimension name with \"ncks --rgr lon_nm_in=name\" or \"ncremap -R '--rgr lon_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr_in_dat); if(lon_nbr_in != lon_nbr_in_dat){ (void)fprintf(stdout,"%s: ERROR %s reports mapfile and data file dimension sizes disagree: mapfile lon_nbr_in = %ld != %ld = lon_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lon_nbr_in,lon_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ } /* !2D */ /* Do not extract grid variables (that are also extensive variables) like lon, lat, area, and masks If necessary, use remap data to diagnose them from scratch Other extensive variables (like counts, population) will be extracted and summed not averaged */ /* Exception list source: ALM/CLM: landmask (20170504: Debatable, including erroneous mask may be better than completely excluding an expected mask) (20170504: must keep landfrac since regridded by ncremap for SGS option) AMSR: Latitude, Longitude CAM, CERES, CMIP5: lat, lon CAM, CMIP5: gw, lat_bnds, lon_bnds CAM-FV: slon, slat, w_stag (w_stag is weights for slat grid, analagous to gw for lat grid) CAM-SE, EAM: area CICE: latt_bounds, lont_bounds, latu_bounds, lonu_bounds, TLAT, TLON, ULAT, ULON (NB: CICE uses ?LON and POP uses ?LONG) (aice is ice area, tmask is state-variable mask, both not currently excluded, although all binary masks like tmask should be recomputed on new grid) DSCOVR L2: latitude, longitude ESMF: gridcell_area GPM: S1_Latitude, S1_Longitude HIRDLS: Latitude MAR/RACMO: LAT, LON MLS: CO_Latitude MPAS-O/I/LI: areaCell, latCell, lonCell and others that are all handled by separated MPAS convention implementation below NCO: lat_vertices, lon_vertices NEMO: nav_lat, nav_lon NWS HRRR: gridlat_0, gridlon_0 OCO2: latitude_bnds, longitude_bnds OMI DOMINO: Latitude, LatitudeCornerpoints, Longitude, LongitudeCornerpoints Oxford: global_latitude0, global_longitude0, latitude0, longitude0 POP: TLAT, TLONG, ULAT, ULONG (NB: CICE uses ?LON and POP uses ?LONG) (POP does not archive spatial bounds) TRMM: Latitude, Longitude UV-CDAT regridder: bounds_lat, bounds_lon Unknown: XLAT_M, XLONG_M WRF: XLAT, XLONG */ const int var_xcl_lst_nbr=49; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/area","/gridcell_area","/gw","/LAT","/lat","/Latitude","/latitude","/nav_lat","/global_latitude0","gridlat_0","/latitude0","/slat","/TLAT","/ULAT","/XLAT","/XLAT_M","/CO_Latitude","/S1_Latitude","/lat_bnds","/lat_vertices","/latt_bounds","/latu_bounds","/latitude_bnds","/LatitudeCornerpoints","/bounds_lat","/LON","/lon","/Longitude","/longitude","/nav_lon","/global_longitude0","gridlon_0","/longitude0","/slon","/TLON","/TLONG","/ULON","/ULONG","/XLONG","/XLONG_M","/CO_Longitude","/S1_Longitude","/lon_bnds","/lon_vertices","/lont_bounds","/lonu_bounds","/longitude_bnds","/LongitudeCornerpoints","/bounds_lon","/w_stag"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ int var_xtn_nbr=0; /* [nbr] Number of extensive variables */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ cnv_sct *cnv; /* [sct] Convention structure */ /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); if(cnv->MPAS){ /* 20160228: MPAS has a host of mysterious grid and extensive variables that should probably not be regridded 20180206: Add from MPAS-LI xCell, yCell, zCell, and [xyz]Edge, and [xyz]Vertex 20180917: Restrict exclusion list to a subset of variables with nCells-dimension Six nCells-variables may be valuable when regridded to lat/lon mpas_xcl_lst in nco_rgr_wgt() and MPAS var_xcl_lst in nco_var_is_fix() differ by these six variables: areaCell for comparison to area(lat,lon) cellMask for area-weighted mask maxLevelCell for area-weighted underwater topographic mask xCell, yCell, zCell for area-weighted cartesian coordinates 20180918: Regridder currently only works on cell-based coordinates Decided regridder will omit not copy fields on vertex- or edge-based coordinates until it can regrid them Regridding vertex- or edge-based fields would require new sparse matrix for vertices or edges How would ERWG or TempestRemap handle that? MPAS geophysical variables on vertex-based (not cell-based) coordinates include: avg_airStressVertexUGeo_1, avg_airStressVertexVGeo_1, uOceanVelocityVertexGeo_1, uVelocityGeo_1, vOceanVelocityVertexGeo_1, vVelocityGeo_1 MPAS geophysical variables on edge-based (not cell-based) coordinates include: principalStress1Var_1, principalStress2Var_1 */ const int mpas_xcl_lst_nbr=35; const char *mpas_xcl_lst[]={"/angleEdge","/areaTriangle","/cellsOnCell","/cellsOnEdge","/cellsOnVertex","/dcEdge","/dvEdge","/edgeMask","/edgesOnCell","/edgesOnEdge","/edgesOnVertex","/indexToCellID","/indexToEdgeID","/indexToVertexID","/kiteAreasOnVertex","/latCell","/latEdge","/latVertex","/lonCell","/lonEdge","/lonVertex","/maxLevelEdgeTop","/meshDensity","/nEdgesOnCell","/nEdgesOnEdge","/vertexMask","/verticesOnCell","/verticesOnEdge","/weightsOnEdge","/xEdge","/yEdge","/zEdge","/xVertex","/yVertex","/zVertex"}; for(idx=0;idx<mpas_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,mpas_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined MPAS exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ } /* !MPAS */ char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ int dmn_nbr_in; /* [nbr] Number of dimensions in input variable */ int dmn_nbr_out; /* [nbr] Number of dimensions in output variable */ nco_bool has_lon; /* [flg] Contains longitude dimension */ nco_bool has_lat; /* [flg] Contains latitude dimension */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ has_lon=False; has_lat=False; if(flg_grd_in_2D){ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing two coordinates with "standard_name" = "latitude" and "longitude" */ if(!has_lon) has_lon=!strcmp(dmn_nm_cp,lon_nm_in); if(!has_lat) has_lat=!strcmp(dmn_nm_cp,lat_nm_in); } /* end loop over dimensions */ } /* !flg_grd_in_2D */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* Regrid variables containing the horizontal spatial dimension on 1D grids, and both latitude and longitude on 2D grids */ if(!strcmp(dmn_nm_cp,col_nm_in) || (has_lon && has_lat)){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; break; } /* endif */ } /* end loop over dimensions */ if(dmn_idx == dmn_nbr_in){ /* Not regridded, so must be omitted or copied... */ if(flg_grd_in_2D && (has_lon || has_lat)){ /* Single spatial dimensional variables on 2D input grids are likely extensive (e.g., grd_mrd_lng from bds) These could be salvaged with explicit rules or implicit assumptions */ trv_tbl->lst[idx_tbl].flg_xtr=False; var_xcl_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) extensive-seeming (e.g., 1D spatial variable in 2D input grid, or 2D spatial variable without primary grid dimensions from multi-grid file (e.g., west_east_stag or south_north_stag instead of west_east or south_north)) variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); }else{ /* !omitted */ /* Copy all variables that are not regridded or omitted */ var_cpy_nbr++; } /* !omitted */ } /* endif not regridded */ } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit regridding criteria. The regridder expects something to regrid, and variables not regridded are copied straight to output. HINT: If the name(s) of the input horizontal spatial dimensions to be regridded (e.g., latitude and longitude or column) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"latitude\", \"longitude\", and \"ncol\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid, e.g., \"ncks --rgr col=lndgrid --rgr lat=north\" or \"ncremap -R '--rgr col=lndgrid --rgr lat=north'\".\n",nco_prg_nm_get(),fnc_nm); for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.flg_rgr){ for(int xtn_idx=0;xtn_idx<rgr->xtn_nbr;xtn_idx++){ /* 20150927: Extensive variable treatments are still in alpha-development Currently testing on AIRS TSurfStd_ct (by summing not averaging) In future may consider variables that need more complex (non-summing) extensive treatment MPAS-O/I has a zillion of these [xyz]Cell, cellsOnCell, fCell, indexToCellID, maxLevelCell, meshDensity Not to mention the variables that depend on nEdges and nVertices... */ if(!strcmp(trv.nm,rgr->xtn_var[xtn_idx])){ trv_tbl->lst[idx_tbl].flg_xtn=True; var_xtn_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO Variable %s will be treated as extensive (summed not averaged)\n",nco_prg_nm_get(),trv.nm_fll); } /* !strcmp */ } /* !xtn_idx */ } /* !flg_rgr */ } /* !idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Regrid %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Lay-out regridded file */ aed_sct aed_mtd; char *area_nm_out; char *att_nm; char *bnd_nm_out; char *bnd_tm_nm_out; char *col_nm_out; char *frc_nm_out; char *lat_bnd_nm_out; char *lat_dmn_nm_out; char *lat_nm_out; char *lat_wgt_nm; char *lon_bnd_nm_out; char *lon_dmn_nm_out; char *lon_nm_out; char *msk_nm_out; char *slat_nm_out=NULL; char *slat_wgt_nm_out=NULL; char *slon_nm_out=NULL; int dmn_id_bnd; /* [id] Dimension ID */ int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_slat; /* [id] Dimension ID */ int dmn_id_slon; /* [id] Dimension ID */ int area_out_id; /* [id] Variable ID for area */ int frc_out_id; /* [id] Variable ID for fraction */ int lon_out_id; /* [id] Variable ID for longitude */ int lat_out_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int msk_out_id; /* [id] Variable ID for mask */ int slat_out_id; /* [id] Variable ID for staggered latitude */ int slat_wgt_id; /* [id] Variable ID for staggered latitude weight */ int slon_out_id; /* [id] Variable ID for staggered longitude */ int dmn_ids_out[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ long dmn_srt_out[dmn_nbr_grd_max]; long dmn_cnt_tuo[dmn_nbr_grd_max]; /* Name output dimensions/variables */ area_nm_out=rgr->area_nm; bnd_tm_nm_out=rgr->bnd_tm_nm; frc_nm_out=rgr->frc_nm; lat_bnd_nm_out=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; msk_nm_out=rgr->msk_nm; /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in; if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in; if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in; if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in; if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in; if(flg_grd_out_1D){ bnd_nm_out=rgr->vrt_nm; lat_bnd_nm_out=rgr->lat_vrt_nm; lon_bnd_nm_out=rgr->lon_vrt_nm; } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ bnd_nm_out=rgr->bnd_nm; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ bnd_nm_out=rgr->bnd_tm_nm; /* NB: default to bnd_tm_nm for spatial bounds */ } /* !flg_grd_out_rct */ if(flg_grd_out_2D){ lat_bnd_nm_out=rgr->lat_bnd_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; } /* !flg_grd_out_2D */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_nm_out=strdup("slat"); slat_wgt_nm_out=strdup("w_stag"); slon_nm_out=strdup("slon"); } /* !nco_grd_lat_fv */ /* Ensure temporal bounds dimension name is distinct from spatial bounds when their sizes differ */ if(bnd_nbr_out != bnd_tm_nbr_out){ if(!strcmp(bnd_nm_out,bnd_tm_nm_out)){ (void)fprintf(stdout,"%s: INFO %s reports spatial and temporal output bounds dimensions are identical (and named \"%s\") by default for rectangular output grids because both can be stored as 2D arrays. That cannot work for this mapping because temporal and spatial bounds dimensions sizes differ (bnd_nbr_out = %d, bnd_tm_nbr_out = %d). Using fall-back spatial bounds name \"%s\" instead. HINT: You may change one or both manually with \"ncks --rgr bnd_nm=name\" or \"ncks --rgr bnd_tm_nm=name\", or, using ncremap, with \"ncremap -R '--rgr bnd_nm=name'\" or \"ncremap -R '--rgr bnd_tm_nm=name'\"\n",nco_prg_nm_get(),fnc_nm,bnd_tm_nm_out,bnd_nbr_out,bnd_tm_nbr_out,bnd_nm_out); } /* !strcmp() */ } /* !bnd_nbr_out */ /* Persistent metadata */ aed_sct aed_mtd_crd; char *att_val_crd=NULL; char *att_nm_crd=NULL; att_nm_crd=strdup("coordinates"); aed_mtd_crd.att_nm=att_nm_crd; if(flg_grd_out_1D || flg_grd_out_crv) aed_mtd_crd.mode=aed_overwrite; else aed_mtd_crd.mode=aed_delete; aed_mtd_crd.type=NC_CHAR; aed_mtd_crd.sz=strlen(lat_nm_out)+strlen(lon_nm_out)+1L; att_val_crd=(char *)nco_malloc((aed_mtd_crd.sz+1L)*nco_typ_lng(aed_mtd_crd.type)); (void)sprintf(att_val_crd,"%s %s",lat_nm_out,lon_nm_out); aed_mtd_crd.val.cp=att_val_crd; /* Reminder: Regridder area_out options, e.g., --rgr area_out, set flg_area_out to control adding "area" variable to regridded output Regridder cll_msr options, --rgr cll_msr, set flg_cll_msr to control adding "cell_measures" attribute to regridded output ncks & ncra cll_msr options, --cll_msr, set EXTRACT_CLL_MSR to control adding "cell_measures" variables (e.g., area) to extraction list of input file EXTRACT_CLL_MSR supercedes --rgr area_out in determining whether to add "area" to regridded output */ nco_bool flg_area_out=rgr->flg_area_out; /* [flg] Add area to output */ nco_bool flg_cll_msr=rgr->flg_cll_msr; /* [flg] Add cell_measures attribute */ aed_sct aed_mtd_cll_msr; char *att_nm_cll_msr=NULL; char *att_val_cll_msr=NULL; if(flg_cll_msr){ att_nm_cll_msr=strdup("cell_measures"); aed_mtd_cll_msr.att_nm=att_nm_cll_msr; aed_mtd_cll_msr.mode=aed_overwrite; aed_mtd_cll_msr.type=NC_CHAR; att_val_cll_msr=(char *)nco_malloc((strlen(area_nm_out)+6L+1L)*nco_typ_lng(aed_mtd_cll_msr.type)); (void)sprintf(att_val_cll_msr,"area: %s",area_nm_out); aed_mtd_cll_msr.sz=strlen(att_val_cll_msr); aed_mtd_cll_msr.val.cp=att_val_cll_msr; } /* !flg_cll_msr */ /* Define new horizontal dimensions before all else */ if(flg_grd_out_1D){ rcd+=nco_def_dim(out_id,col_nm_out,col_nbr_out,&dmn_id_col); } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ rcd+=nco_def_dim(out_id,lat_dmn_nm_out,lat_nbr_out,&dmn_id_lat); rcd+=nco_def_dim(out_id,lon_dmn_nm_out,lon_nbr_out,&dmn_id_lon); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_dim(out_id,slat_nm_out,slat_nbr_out,&dmn_id_slat); rcd+=nco_def_dim(out_id,slon_nm_out,slon_nbr_out,&dmn_id_slon); } /* !nco_grd_lat_fv */ } /* !flg_grd_out_2D */ /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_tm_nm_out,&dmn_id_bnd_tm); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_tm_nm_out,bnd_tm_nbr_out,&dmn_id_bnd_tm); /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_nm_out,&dmn_id_bnd); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr_out,&dmn_id_bnd); char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ if(flg_grd_out_1D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; dmn_ids_out[2]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_var(out_id,slat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slat_wgt_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slon,&slon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !nco_grd_lat_fv */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lon; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lat_wgt_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_nbr_max++; /* Safety in case regridding adds dimension */ dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* Identify all record-dimensions in input file */ rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ double mss_val_dbl; /* Define regridded and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv_tbl->lst[idx_tbl].flg_mrv=True; trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; /* Demote DP to SP to save space. fxm: missing value type will then be inconsistent if copied without demotion */ //if(trv.var_typ == NC_DOUBLE) var_typ_out=NC_FLOAT; else var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Regrid */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); dmn_in_fst=0; rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,(double *)NULL); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); /* Is horizontal dimension last, i.e., most-rapidly-varying? */ if(flg_grd_in_1D && !strcmp(dmn_nm,col_nm_in)){ if(dmn_idx != dmn_nbr_in-1){ /* Unstructured input grid has col in non-MRV location (expect this with, e.g., MPAS-O/I native grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports unstructured grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support unstructured spatial dimensions that are not the last (i.e., most rapidly varying) dimension of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimension(s) last with, e.g., \'ncpdq -a time,lev,%s in.nc out.nc\' prior to calling the regridder. E3SM users: If this is an MPAS dataset with a new (unknown to ncremap) dimension, ask Charlie to add the dimension to ncremap.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in,dmn_nm); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_1D */ if(flg_grd_in_2D && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Are horizontal dimensions most-rapidly-varying? */ if(dmn_idx != dmn_nbr_in-1 && dmn_idx != dmn_nbr_in-2){ /* NB: Lat/lon input grid has lat/lon in non-MRV location (expect this with, e.g., AIRS L2 grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports lat-lon grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support rectangular lat-lon dimension(s) that are not the last two (i.e., most rapidly varying) dimensions of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimensions last with, e.g., \'ncpdq -a time,lev,lat,lon in.nc out.nc\' prior to calling the regridder.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_2D */ if(flg_grd_out_1D){ if((nco_rgr_typ == nco_rgr_grd_2D_to_1D) && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Replace orthogonal horizontal dimensions by unstructured horizontal dimension already defined */ if(!strcmp(dmn_nm,lat_nm_in)){ /* Replace lat with col */ dmn_id_out[dmn_idx]=dmn_id_col; dmn_cnt[dmn_idx]=col_nbr_out; } /* endif lat */ if(!strcmp(dmn_nm,lon_nm_in)){ /* Assume non-MRV dimensions are ordered lat/lon. Replace lat with col. Shift MRV dimensions to left after deleting lon. */ dmn_id_out[dmn_idx]=NC_MIN_INT; dmn_cnt[dmn_idx]=NC_MIN_INT; dmn_nbr_out--; /* Reduce output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=-1; } /* endif lon */ }else{ /* Dimension col_nm_in has already been defined as col_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,col_nm_in)) rcd=nco_inq_dimid_flg(out_id,col_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !lat && !lon */ } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ if(nco_rgr_typ == nco_rgr_grd_1D_to_2D && !strcmp(dmn_nm,col_nm_in)){ /* Replace unstructured horizontal dimension by orthogonal horizontal dimensions already defined */ dmn_id_out[dmn_idx]=dmn_id_lat; dmn_id_out[dmn_idx+1]=dmn_id_lon; dmn_cnt[dmn_idx]=lat_nbr_out; dmn_cnt[dmn_idx+1]=lon_nbr_out; dmn_nbr_out++; /* Increase output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=1; }else{ /* Dimensions lat/lon_nm_in have already been defined as lat/lon_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,lat_nm_in)) rcd=nco_inq_dimid_flg(out_id,lat_dmn_nm_out,dmn_id_out+dmn_idx); else if(!strcmp(dmn_nm,lon_nm_in)) rcd=nco_inq_dimid_flg(out_id,lon_dmn_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !col */ } /* !1D_to_2D */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-regridded variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); if(trv.flg_rgr){ aed_mtd_crd.var_nm=var_nm; aed_mtd_crd.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_crd); if(flg_cll_msr){ aed_mtd_cll_msr.var_nm=var_nm; aed_mtd_cll_msr.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_cll_msr); } /* !flg_cll_msr */ } /* !flg_rgr */ } /* !rcd */ } /* !var */ } /* end idx_tbl */ /* Free pre-allocated array space */ /* col_nm_in will not otherwise be free'd if it was guessed as usual suspect */ if(col_nm_in != rgr->col_nm_in) col_nm_in=(char *)nco_free(col_nm_in); if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Define new metadata in regridded file */ if(flg_area_out){ rcd=nco_char_att_put(out_id,area_nm_out,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm_out,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm_out,"units","steradian"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,area_nm_out,"cell_mathods",att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd=nco_char_att_put(out_id,frc_nm_out,"long_name","Fraction of gridcell valid on destination grid"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,frc_nm_out,"cell_mathods",att_val); } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd=nco_char_att_put(out_id,msk_nm_out,"long_name","Mask (0 = invalid destination, 1 = valid destination)"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); } /* !flg_msk_out */ rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); double vld_min; vld_min=-90.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); double vld_max; vld_max=90.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm_out); if(flg_grd_out_rct) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm_out,"long_name",att_val); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); /* UGRID Conventions define "topology" and "modulo" attributes https://github.com/ugrid-conventions/ugrid-conventions My understanding is these should only be utilized for global grids */ if(nco_rgr_typ == nco_rgr_grd_2D_to_2D){ /* fxm: change this to check whether lon_spn >= 360 or nco_grd_xtn == global */ att_nm=strdup("modulo"); double modulo=360.0; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&modulo; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"topology","circular"); } /* !nco_rgr_grd_2D_to_2D */ if(lon_ctr_out[0] >= 0.0) vld_min=0.0; else vld_min=-180.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(lon_ctr_out[0] >= 0.0) vld_max=360.0; else vld_max=180.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm_out); att_nm=strdup("bounds"); att_val=lon_bnd_nm_out; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(flg_grd_out_rct) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm_out,"long_name",att_val); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd=nco_char_att_put(out_id,slat_nm_out,"long_name","Latitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,slat_wgt_nm_out,"long_name","Latitude weights for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"long_name","Longitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"units","degrees_east"); } /* !nco_grd_lat_fv */ if(flg_grd_out_rct) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,NULL,"map_file",fl_in); rcd=nco_char_att_put(out_id,NULL,"input_file",rgr->fl_in); /* Annotate persistent metadata that should appear last in attribute list */ if(flg_grd_out_1D){ if(flg_area_out) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); if(flg_frc_out_wrt) rcd=nco_char_att_put(out_id,frc_nm_out,att_nm_crd,att_val_crd); if(flg_msk_out) rcd=nco_char_att_put(out_id,msk_nm_out,att_nm_crd,att_val_crd); } /* !flg_grd_out_1D */ /* Persistent metadata */ if(att_nm_crd) att_nm_crd=(char *)nco_free(att_nm_crd); if(att_val_crd) att_val_crd=(char *)nco_free(att_val_crd); if(flg_cll_msr){ if(att_nm_cll_msr) att_nm_cll_msr=(char *)nco_free(att_nm_cll_msr); if(att_val_cll_msr) att_val_cll_msr=(char *)nco_free(att_val_cll_msr); } /* !flg_cll_msr */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ if(slat_nm_out) slat_nm_out=(char *)nco_free(slat_nm_out); if(slat_wgt_nm_out) slat_wgt_nm_out=(char *)nco_free(slat_wgt_nm_out); if(slon_nm_out) slon_nm_out=(char *)nco_free(slon_nm_out); } /* !nco_grd_lat_fv */ /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_out_1D){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); if(flg_area_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_msk_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,crd_typ_out); } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,crd_typ_out); } /* !flg_msk_out */ dmn_srt_out[0]=dmn_srt_out[1]=dmn_srt_out[2]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; dmn_cnt_tuo[2]=bnd_nbr_out; /* NB: 20160803 Semantically confusing---curvilinear grids must write *_crn_out data into *_bnd_out arrays */ (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_crn_out,crd_typ_out); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_crn_out,crd_typ_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lon_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slat_nbr_out; (void)nco_put_vara(out_id,slat_out_id,dmn_srt_out,dmn_cnt_tuo,slat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,slat_wgt_id,dmn_srt_out,dmn_cnt_tuo,slat_wgt_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slon_nbr_out; (void)nco_put_vara(out_id,slon_out_id,dmn_srt_out,dmn_cnt_tuo,slon_ctr_out,crd_typ_out); if(slat_ctr_out) slat_ctr_out=(double *)nco_free(slat_ctr_out); if(slat_wgt_out) slat_wgt_out=(double *)nco_free(slat_wgt_out); if(slon_ctr_out) slon_ctr_out=(double *)nco_free(slon_ctr_out); } /* !nco_grd_lat_fv */ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt_out,dmn_cnt_tuo,lat_wgt_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lon_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,crd_typ_out); } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Regrid or copy variable values */ const double wgt_vld_thr=rgr->wgt_vld_thr; /* [frc] Weight threshold for valid destination value */ const nco_bool flg_rnr=rgr->flg_rnr; /* [flg] Renormalize destination values by valid area */ char *sgs_frc_nm=NULL; char *sgs_msk_nm=NULL; double *sgs_frc_in=NULL; double *sgs_frc_out=NULL; double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *wgt_vld_out=NULL; double var_val_crr; int *tally=NULL; /* [nbr] Number of valid (non-missing) values */ int lvl_idx; /* [idx] Level index */ int lvl_nbr; /* [nbr] Number of levels */ int thr_idx; /* [idx] Thread index */ size_t dst_idx; size_t idx_in; /* [idx] Input grid index */ size_t idx_out; /* [idx] Output grid index */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t val_in_fst; /* [nbr] Number of elements by which current N-D slab input values are offset from origin */ size_t val_out_fst; /* [nbr] Number of elements by which current N-D slab output values are offset from origin */ /* 20190322: Prior to entering OpenMP loop, collect specified SGS information */ const double sgs_nrm=rgr->sgs_nrm; /* [frc] Sub-gridscale normalization */ if(rgr->sgs_frc_nm){ /* Normalization test: fl_in=20181217.CNTL_CNPCTC1850_OIBGC.ne30_oECv3.edison.clm2.h0.2000-12.nc /bin/cp -f ${DATA}/hdf/${fl_in} ~/elm_raw.nc ncremap -P sgs -v FSDS,TBOT,GPP -a aave -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/cmip6_180x360_scrip.20181001.nc ~/elm_raw.nc ~/elm_sgs.nc # Original SGS method ncks -A -v grid_area ${DATA}/grids/ne30np4_pentagons.091226.nc ~/elm_sgs.nc ncremap -P gsg -v FSDS,TBOT,GPP -m ${DATA}/maps/map_ne30np4_to_cmip6_180x360_aave.20181001.nc ~/elm_raw.nc ~/elm_gsg.nc # New SGS method */ if(rgr->sgs_msk_nm) sgs_msk_nm=(char *)strdup(rgr->sgs_msk_nm); sgs_frc_nm=(char *)strdup(rgr->sgs_frc_nm); var_nm=sgs_frc_nm; var_typ_rgr=NC_DOUBLE; /* NB: Regrid in double precision */ var_typ_out=NC_DOUBLE; /* NB: sgs_frc_out must be double precision */ var_sz_in=1L; /* Compute from scratch to be sure it matches grd_sz_in */ var_sz_out=grd_sz_out; /* Assume this holds */ char *fl_sgs=NULL; /* [sng] External sub-gridscale file name */ int sgs_id; /* [id] netCDF file ID for external sub-gridscale file */ sgs_id=in_id; if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ /* If sgs_frc_nm is not in input file then search for it in external area file */ char *sls_ptr; /* [sng] Pointer to last slash character (' ') */ sls_ptr=strrchr(var_nm,'/'); if(!sls_ptr){ (void)fprintf(stderr,"%s: ERROR %s reports unable to find sgs_frc_nm = %s in current input file, and unable to identify filename (ending with slash '/') portion of that string to serve as local external file for sgs_frc input, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm); nco_exit(EXIT_FAILURE); } /* !sls_ptr */ sgs_frc_nm=(char *)strdup(sls_ptr+1L); /* Copy variable-name portion of string */ *sls_ptr='\0'; /* NULL-terminate filename */ fl_sgs=(char *)strdup(var_nm); var_nm=sgs_frc_nm; /* NB: too tricky? */ rcd=nco_open(fl_sgs,NC_NOWRITE,&sgs_id); if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ (void)fprintf(stderr,"%s: ERROR %s reports unable to find sgs_frc_nm = \"%s\" in local external file %s, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); nco_exit(EXIT_FAILURE); } /* !rcd */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s obtaining sgs_frc = %s from file %s\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); } /* !rcd */ rcd=nco_inq_varndims(sgs_id,var_id_in,&dmn_nbr_in); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(sgs_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(sgs_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(var_sz_in != grd_sz_in){ (void)fprintf(stdout,"%s: ERROR %s requires that sgs_frc = %s be same size as spatial grid but var_sz_in = %lu != %lu = grd_sz_in\n",nco_prg_nm_get(),fnc_nm,var_nm,var_sz_in,grd_sz_in); nco_exit(EXIT_FAILURE); } /* !var_sz_in */ /* Missing value setup (NB: ELM landfrac has _FillValue and is _FillValue where masked */ has_mss_val=nco_mss_val_get_dbl(sgs_id,var_id_in,&mss_val_dbl); sgs_frc_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_in value buffer"); rcd=nco_get_vara(sgs_id,var_id_in,dmn_srt,dmn_cnt_in,sgs_frc_in,var_typ_rgr); /* If sgs_frc comes from external local file, close it now */ if(fl_sgs){ rcd=nco_close(sgs_id); fl_sgs=(char *)nco_free(fl_sgs); } /* !fl_sgs */ /* Initialize output */ sgs_frc_out=(double *)nco_malloc_dbg(grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_out value buffer"); /* Initialize and regrid sgs_frc_out 20190907: sgs_frc_in (landfrac) is _FillValue (1.0e36) for ELM datasets in all masked gridcells, and is always positive definite (never zero) in all unmasked gridcells because it it a true area. ELM sgs_frc_out is always positive definite gridcell area everywhere, with no missing values and no zero values. 20190910: MPAS-Seaice datasets have no mask, and sgs_frc_in (timeMonthly_avg_iceAreaCell) is never (ncatted-appended) _FillValue (-9.99999979021477e+33) and is usually zero because it is time-mean area-fraction of sea ice which only exists in polar regions. MPAS-Seaice sgs_frc_out is zero in all gridcells without sea-ice. Regardless of input source, following blocks guarantee that sgs_frc_out is defined everywhere, is never a missing value (sgs_frc_out is zero where sgs_frc_in may have been _FillValue), and is always safe to multiply and normalize by sgs_frc_out in main regridding loop */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) sgs_frc_out[dst_idx]=0.0; if(!has_mss_val) for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) sgs_frc_out[row_dst_adr[lnk_idx]]+=sgs_frc_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]; if(has_mss_val) for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) if((var_val_crr=sgs_frc_in[col_src_adr[lnk_idx]]) != mss_val_dbl) sgs_frc_out[row_dst_adr[lnk_idx]]+=var_val_crr*wgt_raw[lnk_idx]; /* Sanity check sgs_frc_out */ if(nco_dbg_lvl_get() >= nco_dbg_fl){ /* 20190326: sgs_frc expressed as a fraction must never exceed sgs_nrm CICE expresses sgs_frc (aice) in percent, i.e., sgs_nrm=100.0 Sum total value of sgs_frc (as opposed to gridcell_area) depends on grid resolution */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* 20190907: Approximate comparison because rounding causes frequent exceedances of sgs_nrm by epsilon ~ 1.0e-15 */ if((float)sgs_frc_out[dst_idx] > sgs_nrm) (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f > %g = sgs_nrm\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx],sgs_nrm); } /* !dst_idx */ } /* !dbg */ // for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ // (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx]); // } /* !dst_idx */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); } /* !sgs_frc_nm */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Regridding progress: # means regridded, ~ means copied\n"); /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* OpenMP notes: default(none): GCC9.x does not accept this (https://github.com/nco/nco/issues/114) perhaps because of fp_stdout/stderr? Intel accepts it. firstprivate(): Pointers that could be inadvertently free()'d if they lost their NULL-initialization private(): Almost everything else shared(): uggh...shared clause depends on both compiler and compiler-version 1. All const variables are default shared for gcc >= 4.9.2, 2. fnc_nm (only!) must be explicit shared for g++ 4.6.3 (travis) 3. flg_rnr,fnc_nm,wgt_vld_thr must be explicit shared for icc 13.1.3 (rhea) 4. assert() cannot be used in OpenMP blocks 5. Good discussion of "const" variables in shared() clause here http://jakascorner.com/blog/2016/07/omp-default-none-and-const.html 20200221: fxm Revisit default(none) in light of above article */ #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) # pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_frc_nrm,flg_rnr,fnc_nm,frc_out,lnk_nbr,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw,wgt_vld_thr) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_frc_nrm,fnc_nm,frc_out,lnk_nbr,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) # pragma omp target teams distribute parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_frc_nrm,frc_out,lnk_nbr,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # else # pragma omp parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_frc_nrm,frc_out,lnk_nbr,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # endif /* !GCC > 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Regrid variable */ var_nm=trv.nm; var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(out_id,dmn_id_out[dmn_idx],dmn_cnt_out+dmn_idx); if(dmn_cnt_out[dmn_idx] == 0L){ /* No records have been written, so overwrite zero output record size with input record size */ char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */ int dmn_rec_id_in; rcd=nco_inq_dimname(out_id,dmn_id_out[dmn_idx],dmn_rec_nm); rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in); rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx); } /* !dmn_cnt_out */ var_sz_out*=dmn_cnt_out[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* end loop over dimensions */ /* Compute number and size of non-lat/lon or non-col dimensions (e.g., level, time, species, wavelength) Denote their convolution by level or 'lvl' for shorthand There are lvl_nbr elements for each lat/lon or col position 20151011: Until today assume lat/lon and col are most-rapidly varying dimensions 20151011: Until today lvl_nbr missed last non-spatial dimension for 1D output */ lvl_nbr=1; /* Simple prescription of lvl_nbr works when horizontal dimension(s) is/are MRV */ for(dmn_idx=0;dmn_idx<dmn_nbr_out-dmn_nbr_hrz_crd;dmn_idx++) lvl_nbr*=dmn_cnt_out[dmn_idx]; /* Missing value setup */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); /* Memory requirements of next four malloc's (i.e., exclusive of wgt_raw) add up to ~7*sizeof(uncompressed var) for NC_FLOAT and ~3.5*sizeof(uncompressed var) for NC_DOUBLE */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); if(has_mss_val) tally=(int *)nco_malloc_dbg(var_sz_out*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() tally buffer"); if(has_mss_val && flg_rnr) wgt_vld_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output renormalization weight buffer"); /* Initialize output */ (void)memset(var_val_dbl_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); if(has_mss_val) (void)memset(tally,0,var_sz_out*nco_typ_lng(NC_INT)); if(wgt_vld_out) (void)memset(wgt_vld_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); /* Obtain input variable */ rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); /* 20150914: Intensive variables require normalization, extensive do not Intensive variables (temperature, wind speed, mixing ratio) do not depend on gridcell boundaries Extensive variables (population, counts, numbers of things) depend on gridcell boundaries Extensive variables are the exception in models, yet are commonly used for sampling information, e.g., number of photons, number of overpasses Pass extensive variable list to NCO with, e.g., --xtn=TSurfStd_ct,... 20190420: Remove languishing, unfinished intensive variable code */ /* This first block is for "normal" variables without sub-gridscale fractions */ if(!sgs_frc_out){ /* Apply weights */ if(!has_mss_val){ if(lvl_nbr == 1){ /* Weight single-level fields without missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]; }else{ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields without missing values */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ //if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(fp_stdout,"%s lvl_idx = %d val_in_fst = %li, val_out_fst = %li\n",trv.nm,lvl_idx,val_in_fst,val_out_fst); for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]+val_out_fst]+=var_val_dbl_in[col_src_adr[lnk_idx]+val_in_fst]*wgt_raw[lnk_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* has_mss_val */ if(lvl_nbr == 1){ /* Weight single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_dbl */ } /* !lnk_idx */ }else{ /* lvl_nbr > 1 */ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields with missing values */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_dbl */ } /* !lnk_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ if(!has_mss_val){ /* frc_dst = frc_out = dst_frac = frac_b contains non-unity elements and normalization type is "destarea" or "dstarea" or "none" When this occurs for conservative remapping, follow "destarea" normalization procedure See SCRIP manual p. 11 and http://www.earthsystemmodeling.org/esmf_releases/public/last, specifically http://www.earthsystemmodeling.org/esmf_releases/public/last/ESMF_refdoc/node3.html#SECTION03029000000000000000 "frac_a: When a conservative regridding method is used, this contains the fraction of each source cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 0.0. frac_b: When a conservative regridding method is used, this contains the fraction of each destination cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 1.0 where the point participated in the regridding (i.e. was within the unmasked source grid), and 0.0 otherwise. If the first-order conservative interpolation method is specified ("-m conserve") then the destination field may need to be adjusted by the destination fraction (frac_b). This should be done if the normalization type is ``dstarea'' (sic, really "destarea") and if the destination grid extends outside the unmasked source grid. If it isn't known if the destination extends outside the source, then it doesn't hurt to apply the destination fraction. (If it doesn't extend outside, then the fraction will be 1.0 everywhere anyway.) The following code shows how to adjust an already interpolated destination field (dst_field) by the destination fraction. The variables n_b, and frac_b are from the weight file: ! Adjust destination field by fraction do i=1, n_b if (frac_b(i) .ne. 0.0) then dst_field(i)=dst_field(i)/frac_b(i) endif enddo" NB: Non-conservative interpolation methods (e.g., bilinear) should NOT apply this normalization (theoretically there is no danger in doing so because frc_out == 1 always for all gridcells that participate in bilinear remapping and frc_out == 0 otherwise) NCO's renormalization procedure below is similar to the ESMF-recommended procedure above. However, users can control NCO renormalization with, e.g., --rnr_thr=0.1, or override it completely with --rnr_thr=none. Moreover, frac_b == frc_dst is determined solely by solely by gridcell binary mask overlaps during weight generation. It is time-invariant and 2D. Missing values (e.g., AOD) can vary in time and can be 3D (or N-D) and so can wgt_vld_out. Hence NCO renormalization is more flexible. flg_frc_nrm (i.e., ESMF-recommended) normalization makes fields pretty for graphics, yet is non-conservative because e.g., MPAS Ocean gridcells projected onto global uniform grids would have their SSTs normalized for prettiness on coastal gridpoints, which is inherently non-conservative. 20190912: Make "ESMF renormalization" of fields without missing values (i.e., "destarea") opt-in rather than default "destarea" and frac_b = frc_dst together set flg_frc_nrm Formerly flg_frc_nrm triggered ESMF renormalization by default Now flg_frc_nrm and user-explicitly-set --rnr_thr to [0.0,1.0] must both be true to trigger it This keep conservative maps conservative by default NB: This "ESMF renormalization" normalizes by frac_b == frc_dst (not by wgt_vld_out) regardless of rnr_thr 20151018: Avoid double-normalizing by only executing fractional normalization (flg_frc_nrm) block when !has_mss_val, and valid area normalization when has_mss_val */ if(flg_frc_nrm){ /* Only renormalize when frac_b < 1.0 (because frac_b == 1.0 does nothing) */ if(flg_rnr){ /* 20190912: Only renormalize when user explicitly requests it (because renormalization is non-conservative). Prior to today, renormalization was by default, henceforth it is opt-in. */ if(lvl_nbr == 1){ /* Fractionally renormalize single-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=frc_out[dst_idx]; }else{ /* Fractionally renormalize multi-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(frc_out[dst_idx] != 0.0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]/=frc_out[dst_idx]; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* lvl_nbr > 1 */ } /* !flg_rnr */ } /* !flg_frc_nrm */ } /* !has_mss_val */ if(has_mss_val){ /* NCL and ESMF treatment of weights and missing values described at https://www.ncl.ucar.edu/Applications/ESMF.shtml#WeightsAndMasking http://earthsystemmodeling.org/esmf_releases/non_public/ESMF_6_1_1/ESMF_refdoc/node5.html#SECTION05012600000000000000 NCO implements one of two procedures: "conservative" or "renormalized" The "conservative" algorithm uses all valid data from the input grid on the output grid Destination cells receive the weighted valid values of the source cells This is conservative because the global integrals of the source and destination fields are equal The "renormalized" algorithm divides the destination value by the sum of the valid weights This returns "reasonable" values, i.e., the mean of the valid input values However, renormalization is equivalent to extrapolating valid data to missing regions Hence the input and output integrals are unequal and the regridding is not conservative */ /* In fields with missing value, destination cells with no accumulated weight are missing value */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(!tally[dst_idx]) var_val_dbl_out[dst_idx]=mss_val_dbl; if(flg_rnr){ // if(nco_dbg_lvl_get() >= nco_dbg_quiet) (void)fprintf(fp_stdout,"%s: DEBUG renormalization for %s uses flg_rnr block\n",nco_prg_nm_get(),var_nm); if(wgt_vld_thr == 0.0){ /* Renormalize cells with no threshold by valid accumulated weight */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(tally[dst_idx]) var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx]; }else{ /* Renormalize cells with threshold by valid accumulated weight if weight exceeds threshold */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(wgt_vld_out[dst_idx] >= wgt_vld_thr){var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx];}else{var_val_dbl_out[dst_idx]=mss_val_dbl;} } /* !wgt_vld_thr */ } /* !flg_rnr */ } /* !has_mss_val */ } /* !sgs_frc_out */ /* Variables with sub-gridscale fractions require "double-weighting" and normalization */ if(sgs_frc_out){ if(!strcmp(var_nm,sgs_frc_nm)){ /* Copy shared variable sgs_frc_out that was regridded before OpenMP loop 20190911: Reasons to copy sgs_frc_out into sgs_frc_nm data include speed, consistency, and well-definedness of sgs_frc_out. One reason to regrid sgs_frc_nm here is consistency with original, raw dataset: ELM landfrac is masked so regridding it here (rather than using sgs_frc_out) would produce a regridded dataset more identical to raw ELM output. The same can be said for CICE (I think). MPAS cellMask and timeMonthly_avg_iceAreaCell are not masked, and so should produce the same values as sgs_frc_out if regridded here. */ memcpy(var_val_dbl_out,sgs_frc_out,grd_sz_out*nco_typ_lng(var_typ_rgr)); }else if(sgs_msk_nm && !strcmp(var_nm,sgs_msk_nm)){ /* Compute binary mask directly from shared sgs_frc_out (guaranteed to be all valid values) */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]=1.0; }else{ /* !sgs_msk_nm */ /* "Double-weight" all other sub-gridscale input values by sgs_frc_in and overlap weight, normalize by sgs_frc_out */ if(!has_mss_val){ if(lvl_nbr == 1){ /* SGS-regrid single-level fields without missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; /* NB: MPAS-Seaice dataset sgs_frc_out is usually zero in non-polar regions */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx]; }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields without missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; var_val_dbl_out[idx_out+val_out_fst]+=var_val_dbl_in[idx_in+val_in_fst]*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; tally[idx_out]++; } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx+val_out_fst]/=sgs_frc_out[dst_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* !has_mss_val */ if(lvl_nbr == 1){ /* SGS-regrid single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; tally[idx_out]++; } /* !mss_val_dbl */ } /* !lnk_idx */ /* NB: Normalization clause is complex to support sgs_frc_out from both ELM and MPAS-Seaice */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(!tally[dst_idx]){var_val_dbl_out[dst_idx]=mss_val_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx];} }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields with missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; tally[idx_out]++; } /* !mss_val_dbl */ } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ idx_out=dst_idx+val_out_fst; if(!tally[idx_out]){var_val_dbl_out[idx_out]=mss_val_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[idx_out]/=sgs_frc_out[dst_idx];} } /* dst_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ } /* !sgs_msk_nm */ } /* !sgs_frc_out */ #pragma omp critical { /* begin OpenMP critical */ // rcd=nco_put_var(out_id,var_id_out,var_val_dbl_out,var_typ_rgr); rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); } /* end OpenMP critical */ if(dmn_id_in) dmn_id_out=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(tally) tally=(int *)nco_free(tally); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); if(wgt_vld_out) wgt_vld_out=(double *)nco_free(wgt_vld_out); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded */ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables regridded = %d (%d extensive), copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_xtn_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); /* Free memory allocated for grid reading/writing */ if(area_out) area_out=(double *)nco_free(area_out); if(col_src_adr) col_src_adr=(int *)nco_free(col_src_adr); if(dmn_sz_in_int) dmn_sz_in_int=(int *)nco_free(dmn_sz_in_int); if(dmn_sz_out_int) dmn_sz_out_int=(int *)nco_free(dmn_sz_out_int); if(frc_out) frc_out=(double *)nco_free(frc_out); if(lat_bnd_out) lat_bnd_out=(double *)nco_free(lat_bnd_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lat_ntf_out) lat_ntf_out=(double *)nco_free(lat_ntf_out); if(lat_wgt_out) lat_wgt_out=(double *)nco_free(lat_wgt_out); if(lon_bnd_out) lon_bnd_out=(double *)nco_free(lon_bnd_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lon_ntf_out) lon_ntf_out=(double *)nco_free(lon_ntf_out); if(msk_out) msk_out=(int *)nco_free(msk_out); if(row_dst_adr) row_dst_adr=(int *)nco_free(row_dst_adr); if(sgs_frc_nm) sgs_frc_nm=(char *)nco_free(sgs_frc_nm); if(sgs_frc_in) sgs_frc_in=(double *)nco_free(sgs_frc_in); if(sgs_frc_out) sgs_frc_out=(double *)nco_free(sgs_frc_out); if(sgs_msk_nm) sgs_msk_nm=(char *)nco_free(sgs_msk_nm); if(wgt_raw) wgt_raw=(double *)nco_free(wgt_raw); return rcd; } /* end nco_rgr_wgt() */ void nco_bsl_zro /* Return Bessel function zeros */ (const int bsl_zro_nbr, /* O [nbr] Order of Bessel function */ double * const bsl_zro) /* O [frc] Bessel zero */ { /* Purpose: Return Bessel function zeros Source: CCM code /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/bsslzr.F Return bsl_zro_nbr zeros (or if bsl_zro_nbr > 50, approximate zeros), of the Bessel function j0 First 50 zeros are given exactly, and remaining zeros are computed by extrapolation, and therefore are not exact Original version: CCM1 Standardized: J. Rosinski, June 1992 Reviewed: J. Hack, D. Williamson, August 1992 Reviewed: J. Hack, D. Williamson, April 1996 Modified 19970123 by Jim Rosinski to use double precision arithmetic ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender */ const char fnc_nm[]="nco_bsl_zro()"; /* [sng] Function name */ const double pi=M_PI; // [frc] 3 const double bsl_zro_tbl[]={ // Zeros of Bessel functions of order 1 to 50 -1.e36, 2.4048255577, 5.5200781103, 8.6537279129, 11.7915344391, 14.9309177086, 18.0710639679, 21.2116366299, 24.3524715308, 27.4934791320, 30.6346064684, 33.7758202136, 36.9170983537, 40.0584257646, 43.1997917132, 46.3411883717, 49.4826098974, 52.6240518411, 55.7655107550, 58.9069839261, 62.0484691902, 65.1899648002, 68.3314693299, 71.4729816036, 74.6145006437, 77.7560256304, 80.8975558711, 84.0390907769, 87.1806298436, 90.3221726372, 93.4637187819, 96.6052679510, 99.7468198587, 102.8883742542, 106.0299309165, 109.1714896498, 112.3130502805, 115.4546126537, 118.5961766309, 121.7377420880, 124.8793089132, 128.0208770059, 131.1624462752, 134.3040166383, 137.4455880203, 140.5871603528, 143.7287335737, 146.8703076258, 150.0118824570, 153.1534580192, 156.2950342685}; const int bsl_zro_tbl_nbr_max=50; /* [nbr] */ int bsl_idx; /* [idx] Counting index */ /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* NB: Initialize bsl_zro[0] but (in C) never use it Initialization prevents uninitialized memory warnings */ for(bsl_idx=0;bsl_idx<=bsl_zro_nbr;bsl_idx++) if(bsl_idx <= bsl_zro_tbl_nbr_max) bsl_zro[bsl_idx]=bsl_zro_tbl[bsl_idx]; if(bsl_zro_nbr > bsl_zro_tbl_nbr_max) for(bsl_idx=bsl_zro_tbl_nbr_max+1;bsl_idx<=bsl_zro_nbr;bsl_idx++) bsl_zro[bsl_idx]=bsl_zro[bsl_idx-1]+pi; if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports bsl_zro_nbr = %d\n",nco_prg_nm_get(),fnc_nm,bsl_zro_nbr); (void)fprintf(stdout,"idx\tbsl_zro\n"); for(bsl_idx=1;bsl_idx<=bsl_zro_nbr;bsl_idx++) (void)fprintf(stdout,"%d\t%g\n",bsl_idx,bsl_zro[bsl_idx]); } /* endif dbg */ return; } /* end nco_bsl_zro() */ void nco_lat_wgt_gss /* [fnc] Compute and return sine of Gaussian latitudes and their weights */ (const int lat_nbr, /* I [nbr] Latitude number */ const nco_bool flg_s2n, /* I [enm] Latitude grid-direction is South-to-North */ double * const lat_sin, /* O [frc] Sine of latitudes */ double * const wgt_Gss) /* O [frc] Gaussian weights */ { /* Purpose: Compute and return sine of Gaussian latitudes and their weights Returned arrays are ordered south-to-north (S->N), not (N->S) Source: CCM /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/gauaw.F Calculate sine of latitudes lat_sin(lat_nbr) and weights wgt_Gss(lat_nbr) for Gaussian quadrature Algorithm described in Davis and Rabinowitz, Journal of Research of the NBS, V 56, Jan 1956 Zeros of Bessel function j0, obtained from nco_bsl_zro(), are first guess for abscissae Original version: CCM1 Standardized: L. Bath, Jun 1992 L. Buja, Feb 1996 Reviewed: D. Williamson, J. Hack, Aug 1992 D. Williamson, J. Hack, Feb 1996 19970123 Modified by Jim Rosinski to use real*16 arithmetic in order to achieve (nearly) identical weights and latitudes on all machines. ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender 20150725: Verified against tabulation at http://pomax.github.io/bezierinfo/legendre-gauss.html#n64 */ const char fnc_nm[]="nco_lat_wgt_gss()"; /* [sng] Function name */ const double eps_rlt=1.0e-16; // Convergence criterion (NB: Threshold was 1.0d-27 in real*16, 1.0e-15 fine for real*8, 1.0e-16 pushes double precision to the brink) const double pi=M_PI; // [frc] 3 const int itr_nbr_max=20; // [nbr] Maximum number of iterations double c_cff; // Constant combination coefficient double lat_idx_dbl; // Latitude index, double precision double lat_nnr_idx_dbl; // Inner latitude index, double precision double lat_nbr_dbl; // [nbr] Number of latitudes, double precision double pk=double_CEWI; // Polynomial double pkm1; // Polynomial double pkm2; // Polynomial double pkmrk; // Polynomial double sp; // Current iteration latitude increment double xz; // Abscissa estimate double cos_arg; // Intermediate parameter introduced while attempting to eliminate valgrind "uninitialised value" warnings int itr_cnt; // Iteration counter int lat_idx; // [idx] Counting index (latitude) int lat_sym_idx; // [idx] Counting index (symmetric latitude) int lat_nnr_idx; // [idx] Counting index (inner latitude loop) int lat_nbr_rcp2; // lat_nbr/2 (number of latitudes in hemisphere) double *lat_sin_p1; // Sine of Gaussian latitudes double precision double *wgt_Gss_p1; // Gaussian weights double precision /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* Arrays with Fortran indexing (indicated by "plus one" = "_p1") keep numerical algorithm in C identical to Fortran */ lat_sin_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Sine of Gaussian latitudes double precision wgt_Gss_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Gaussian weights double precision /* Use Newton iteration to find abscissae */ c_cff=0.25*(1.0-4.0/(pi*pi)); lat_nbr_dbl=lat_nbr; lat_nbr_rcp2=lat_nbr/2; // NB: Integer arithmetic (void)nco_bsl_zro(lat_nbr_rcp2,lat_sin_p1); for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ // NB: Loop starts at 1 // 20150713: Introduce intermediate parameter cos_arg in attempt to eliminate valgrind "uninitialised value" warnings emitted by cos() (actually __cos_sse()) // Warnings occur with gcc-compiled code, not with clang-compiled code cos_arg=lat_sin_p1[lat_idx]/sqrt((lat_nbr_dbl+0.5)*(lat_nbr_dbl+0.5)+c_cff); xz=cos(cos_arg); /* First approximation to xz */ itr_cnt=0; /* goto label_73 */ label_73: pkm2=1.0; pkm1=xz; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %d\n",nco_prg_nm_get(),fnc_nm,fabs(sp),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ /* Compute Legendre polynomial */ for(lat_nnr_idx=2;lat_nnr_idx<=lat_nbr;lat_nnr_idx++){ lat_nnr_idx_dbl=lat_nnr_idx; pk=((2.0*lat_nnr_idx_dbl-1.0)*xz*pkm1-(lat_nnr_idx_dbl-1.0)*pkm2)/lat_nnr_idx_dbl; pkm2=pkm1; pkm1=pk; } /* end inner loop over lat_nnr */ pkm1=pkm2; pkmrk=(lat_nbr_dbl*(pkm1-xz*pk))/(1.0-xz*xz); sp=pk/pkmrk; xz=xz-sp; /* NB: Easy to introduce bug here by not replacing Fortran abs() with C fabs() */ if(fabs(sp) > eps_rlt) goto label_73; lat_sin_p1[lat_idx]=xz; wgt_Gss_p1[lat_idx]=(2.0*(1.0-xz*xz))/((lat_nbr_dbl*pkm1)*(lat_nbr_dbl*pkm1)); } /* end outer loop over lat */ if(lat_nbr != lat_nbr_rcp2*2){ /* When lat_nbr is odd, compute weight at Equator */ lat_sin_p1[lat_nbr_rcp2+1]=0.0; pk=2.0/(lat_nbr_dbl*lat_nbr_dbl); for(lat_idx=2;lat_idx<=lat_nbr;lat_idx+=2){ lat_idx_dbl=lat_idx; pk=pk*lat_idx_dbl*lat_idx_dbl/((lat_idx_dbl-1.0)*(lat_idx_dbl-1.0)); } /* end loop over lat */ wgt_Gss_p1[lat_nbr_rcp2+1]=pk; } /* endif lat_nbr is odd */ /* Complete sets of abscissas and weights, using symmetry properties */ for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ lat_sym_idx=lat_nbr-lat_idx+1; lat_sin_p1[lat_sym_idx]=-lat_sin_p1[lat_idx]; wgt_Gss_p1[lat_sym_idx]=wgt_Gss_p1[lat_idx]; } /* end loop over lat */ /* Shift by one to remove Fortran offset in p1 arrays */ //memcpy(lat_sin,lat_sin_p1,lat_nbr*sizeof(double)); //memcpy(wgt_Gss,wgt_Gss_p1,lat_nbr*sizeof(double)); /* Reverse and shift arrays because original CCM code algorithm computes latitudes from north-to-south Shift by one to remove Fortran offset in p1 arrays */ if(flg_s2n){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_nbr-lat_idx]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_nbr-lat_idx]; } /* end loop over lat */ }else{ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_idx+1]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_idx+1]; } /* end loop over lat */ } /* !flg_s2n */ if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports lat_nbr = %d\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stdout,"idx\tasin\tngl_rad\tngl_dgr\tgw\n"); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) (void)fprintf(stdout,"%d\t%g\t%g\t%g%g\n",lat_idx,lat_sin[lat_idx],asin(lat_sin[lat_idx]),180.0*asin(lat_sin[lat_idx])/pi,wgt_Gss[lat_idx]); } /* endif dbg */ if(wgt_Gss_p1) wgt_Gss_p1=(double *)nco_free(wgt_Gss_p1); if(lat_sin_p1) lat_sin_p1=(double *)nco_free(lat_sin_p1); return; } /* end nco_lat_wgt_gss() */ void nco_sph_plg_area /* [fnc] Compute area of spherical polygon */ (rgr_sct * const rgr, /* I [sct] Regridding structure */ const double * const lat_bnd, /* [dgr] Latitude boundaries of rectangular grid */ const double * const lon_bnd, /* [dgr] Longitude boundaries of rectangular grid */ const long col_nbr, /* [nbr] Number of columns in grid */ const int bnd_nbr, /* [nbr] Number of bounds in gridcell */ double * const area) /* [sr] Gridcell area */ { /* Purpose: Compute area of spherical polygon */ /* Computing triangular area accurately is hard in corner cases Spherical triangle suffer from at least as many issues as planar, which are described by "Miscalculating Area and Angles of a Needle-like Triangle" by W. Kahan, UC Berkeley In particular, the Law of Cosines and Heron's formula can be ill-conditioned For spherical triangles L'Huilier's Theorem is superior to Girard's Formula: http://mathworld.wolfram.com/LHuiliersTheorem.html Girard's formula depends on pi-minus-angle and angle is usually quite small in our applications so precision would be lost L'Huilier's theorem depends only on angles (a,b,c) and semi-perimeter (s) and is well-conditioned for small angles semi-perimeter = half-perimeter of triangle = 0.5*(a+b+c) Spherical Excess (SE) difference between the sum of the angles of a spherical triangle area and a planar triangle area with same interior angles (that sum to pi) SE is also the solid angle subtended by the spherical triangle and that's, well, astonishing and pretty cool Wikipedia shows a better SE formula for triangles that are ill-conditioned for L'Huilier's formula because a = b ~ 0.5c https://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess See also interesting discussion of L'Huilier by Charles Karney who suggests his own alternative: http://osgeo-org.1560.x6.nabble.com/Area-of-a-spherical-polygon-td3841625.html The discussion mentions Mil94 Robert D. Miller, Computing the area of a spherical polygon, Graphic Gems IV, chapter II.4, pages 132-137. http://books.google.com/books?id=CCqzMm_-WucC&pg=PA132&lpg=PA132&dq=miller+area+spherical+polygon+gems&source=bl&ots=mrnvZ6NJcm&sig=CMg8eaD8dzP5snMaPeCQzgoFWUk&hl=sv&ei=4G-YTKv5GsWZOI-mmZQP&sa=X&oi=book_result&ct=result&resnum=1&ved=0CBQQ6AEwAA#v=onepage&q&f=false Mil94 contains similar ideas to my method for spherical polygons (decomposing into adjacent multiple triangles from single vertex) However, his method places single vertex at pole, then adds signed areas to obtain full polygon area His method may suffer from degraded precision because of roundoff error and long side-lengths So-called "proper" spherical triangle are those for which all angles are less than pi, so a+b+c<3*pi Cartesian coordinates of (lat,lon)=(theta,phi) are (x,y,z)=(cos(theta)*cos(phi),cos(theta)*sin(phi),sin(theta)) Dot-product rule for vectors gives interior angle/arc length between two points: cos(a)=u dot v=cos(theta1)*cos(phi1)*cos(theta2)*cos(phi2)+cos(theta1)*sin(phi1)*cos(theta2)*sin(phi2)+sin(theta1)*sin(theta2) Spherical law of cosines relates interior angles/arc-lengths (a,b,c) to surface angles (A,B,C) in spherical triangle: https://en.wikipedia.org/wiki/Spherical_law_of_cosines cos(a)=cos(b)*cos(c)+sin(b)*sin(c)*cos(A) cos(b)=cos(c)*cos(a)+sin(c)*sin(a)*cos(B) cos(c)=cos(a)*cos(b)+sin(a)*sin(b)*cos(C) cos(A)=[cos(a)-cos(b)*cos(c)]/[sin(b)*sin(c)] cos(B)=[cos(b)-cos(c)*cos(a)]/[sin(c)*sin(a)] cos(C)=[cos(c)-cos(a)*cos(b)]/[sin(a)*sin(b)] Bounds information on unstructured grids will use bounds_nbr=maximum(vertice_nbr) Unused vertices are stored as either repeated points (ACME does this) or, conceiveably, as missing values Given (lat,lon) for N-points algorithm to find area of spherical polygon is: 1. Any decomposition, Girard areas: Loses precision due to mismatch between pi and small spherical excesses A. Find interior angles/arc-lengths (a,b,c,d...) using spherical law of cosines along each edge B. Apply generalized Girard formula SE_n = Sum(A_n) - (N-2) - pi 2. CSZ decomposition (N-2 triangles) with L'Huilier areas, Convert polygon into triangles by cycling spoke through all sides from common apex This method requires computation of N-2 (not N) triangles, though fewer sides due to optimization It works on all convex polygons (interior angles less than 180) but not, in general, concave polygons Whether it works or not on concave polygons depends upon their exact shape and the choice of apex point A. First three non-identical points form first triangle with sides A,B,C (first+second point define A, etc.) i. First vertice anchors all triangles ii. Third vertice of preceding triangle becomes second vertice of next triangle iii. Next non-identical point becomes last vertice of next triangle iv. Side C of previous triangle is side A of next triangle B. For each triangle, compute area with L'Huilier formula unless A = B ~ 0.5*C then use SAS formula 3. centroidal decomposition, N triangle version by Taylor, L'Huilier areas: Compute polygon centroid and treat this as hub from which spokes are drawn to all vertices This method requires computation of N triangles, though fewer sides due to optimization Moreover, it works on all convex polygons and on slightly concave polygons Centroid/hub has clear view of interior of most simple concave polygons 4. Any decomposition but with exact RLL grids by Zender and Agress 20160918 A. Decompose polygon into triangles via any method (e.g., method 2 or 3 above) B. Determine whether triangle is spherical or contains RLL (constant latitude) C. Spherical triangles use L'Huilier, RLL triangles use series expansion */ const char fnc_nm[]="nco_sph_plg_area()"; const double dgr2rdn=M_PI/180.0; int bnd_nbr_ttl; /* [nbr] Number of bounds in gridcell accounting for possibility of centroid information */ long idx; /* [idx] Counting index for unrolled grids */ short int bnd_idx; /* Shift to this method once we pass rgr into nco_sph_plg_area() */ nco_bool flg_mth_csz=False; /* [flg] Use CSZ's advancing polygon bisector method */ nco_bool flg_mth_ctr=False; /* [flg] Use centroid method to compute polygon area */ nco_edg_typ_enm edg_typ; /* [enm] Arc-type for triangle edges */ nco_ply_tri_mth_typ_enm ply_tri_mth; /* [enm] Polygon decomposition method */ if(rgr->edg_typ == nco_edg_nil) rgr->edg_typ=nco_edg_gtc; edg_typ=rgr->edg_typ; /* [enm] Arc-type for triangle edges */ ply_tri_mth=rgr->ply_tri_mth; /* [enm] Polygon decomposition method */ if(ply_tri_mth == nco_ply_tri_mth_csz) flg_mth_csz=True; if(ply_tri_mth == nco_ply_tri_mth_ctr) flg_mth_ctr=True; assert(flg_mth_ctr != flg_mth_csz); bnd_nbr_ttl=bnd_nbr; // Allocate space for one extra boundary to store centroid information if necessary if(flg_mth_ctr) bnd_nbr_ttl=bnd_nbr+1; double *lat_bnd_rdn=NULL_CEWI; /* [rdn] Latitude boundaries of rectangular destination grid */ double *lon_bnd_rdn=NULL_CEWI; /* [rdn] Longitude boundaries of rectangular destination grid */ double *lat_bnd_sin=NULL_CEWI; /* [frc] Sine of latitude boundaries of rectangular destination grid */ double *lon_bnd_sin=NULL_CEWI; /* [frc] Sine of longitude boundaries of rectangular destination grid */ double *lat_bnd_cos=NULL_CEWI; /* [frc] Cosine of latitude boundaries of rectangular destination grid */ double *lon_bnd_cos=NULL_CEWI; /* [frc] Cosine of longitude boundaries of rectangular destination grid */ /* Allocate one extra space for some arrays to store polygon centroid values for each column for ply_tri_mth=ctr */ lon_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lat_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); memcpy(lat_bnd_rdn,lat_bnd,col_nbr*bnd_nbr*sizeof(double)); memcpy(lon_bnd_rdn,lon_bnd,col_nbr*bnd_nbr*sizeof(double)); for(idx=0;idx<col_nbr*bnd_nbr;idx++){ lon_bnd_rdn[idx]*=dgr2rdn; lat_bnd_rdn[idx]*=dgr2rdn; lon_bnd_cos[idx]=cos(lon_bnd_rdn[idx]); lat_bnd_cos[idx]=cos(lat_bnd_rdn[idx]); lon_bnd_sin[idx]=sin(lon_bnd_rdn[idx]); lat_bnd_sin[idx]=sin(lat_bnd_rdn[idx]); } /* !idx */ double area_smc_crc; /* [sr] Small-circle correction to spherical triangle area */ double area_smc; /* [sr] Gridcell area allowing for latitude-triangles */ double area_ttl; /* [sr] Total area of input polygon list assuming spherical triangles */ double area_smc_ttl; /* [sr] Total area of input polygon list allowing for latitude-triangles */ double area_smc_crc_ttl; /* [sr] Latitude-triangle correction (should be small) to total area of input polygon list */ double area_smc_crc_abs_ttl; /* [sr] Latitude-triangle absolute correction (no compensation of positive/negative contributions, should be no smaller than above) to total area of input polygon list */ double lat_ctr; /* [dgr] Latitude of polygon centroid */ double lon_ctr; /* [dgr] Longitude of polygon centroid */ double lat_ctr_rdn; /* [rdn] Latitude of polygon centroid */ double lon_ctr_rdn; /* [rdn] Longitude of polygon centroid */ double lat_ctr_cos; /* [frc] Cosine latitude of polygon centroid */ double lat_dlt; /* [rdn] Latitudinal difference */ double lon_dlt; /* [rdn] Longitudinal difference */ double ngl_a; /* [rdn] Interior angle/great circle arc a */ double ngl_b; /* [rdn] Interior angle/great circle arc b */ double ngl_c; /* [rdn] Interior angle/great circle arc c */ double ngl_ltr_a; /* [rdn] Interior angle/small circle arc a, canonical latitude-triangle geometry */ double ngl_ltr_b; /* [rdn] Interior angle/great circle arc b, canonical latitude-triangle geometry */ double ngl_ltr_c; /* [rdn] Interior angle/great circle arc c, canonical latitude-triangle geometry */ double prm_smi; /* [rdn] Semi-perimeter of triangle */ double sin_hlf_tht; /* [frc] Sine of half angle/great circle arc theta connecting two points */ double xcs_sph; /* [sr] Spherical excess */ int tri_nbr; /* [nbr] Number of triangles in polygon */ long bnd_vld_nbr=NC_MIN_INT; /* [idx] Number of valid (non-duplicative) vertices in each triangle */ long *a_idx; /* [idx] Point A 1-D indices for each triangle in polygon */ long *b_idx; /* [idx] Point B 1-D indices for each triangle in polygon */ long *c_idx; /* [idx] Point C 1-D indices for each triangle in polygon */ long *vrt_vld=NULL; /* [idx] Absolute 1-D indices of valid vertices */ long idx_a; /* [idx] Point A 1-D index */ long idx_b; /* [idx] Point B 1-D index */ long idx_c; /* [idx] Point C 1-D index */ nco_bool flg_sas_ndl=False; /* [flg] L'Huilier's formula will fail due to needle where one side exceeds semi-perimeter */ nco_bool flg_sas_isc=False; /* [flg] L'Huilier's formula is ill-conditioned due to flat, near-isoceles triangle */ nco_bool flg_sas_a=False; /* [flg] Use SAS triangle formula with central angle a */ nco_bool flg_sas_b=False; /* [flg] Use SAS triangle formula with central angle b */ nco_bool flg_sas_c=False; /* [flg] Use SAS triangle formula with central angle c */ nco_bool flg_ply_has_smc; /* [flg] Any triangle in polygon has small-circle edge */ nco_bool flg_tri_crr_smc; /* [flg] Current triangle has small_circle edge */ /* Initialize global accumulators */ area_ttl=0.0; area_smc_ttl=0.0; area_smc_crc_ttl=0.0; area_smc_crc_abs_ttl=0.0; for(long col_idx=0;col_idx<col_nbr;col_idx++){ /* Initialize local properties and accumulators for this cell/polygon */ flg_ply_has_smc=False; ngl_c=double_CEWI; /* Otherwise compiler unsure ngl_c is initialized first use */ area[col_idx]=0.0; area_smc=0.0; tri_nbr=0; if(col_idx == 0){ a_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); b_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); c_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); vrt_vld=(long *)nco_calloc(bnd_nbr,sizeof(long)); } /* !col_idx */ /* Safety re-initialization to ease debugging, not strictly necessary */ for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++){ vrt_vld[bnd_idx]=NC_MIN_INT; a_idx[bnd_idx]=NC_MIN_INT; b_idx[bnd_idx]=NC_MIN_INT; c_idx[bnd_idx]=NC_MIN_INT; } /* !bnd_idx */ if(flg_mth_ctr){ double lon_dff; /* [dgr] Longitude difference */ long bnd_srt_idx; /* [idx] Absolute starting index of vertices in polygon */ long bnd_idx; /* [idx] Offset of current valid vertex index from starting index */ long bnd_vld_idx; /* [idx] Absolute index of last valid vertex */ /* First vertice is always valid */ bnd_srt_idx=bnd_nbr*col_idx; bnd_vld_idx=bnd_srt_idx; vrt_vld[0]=bnd_vld_idx; lat_ctr=lat_bnd[bnd_srt_idx]; lon_ctr=lon_bnd[bnd_srt_idx]; bnd_vld_nbr=1; /* First guess for next valid index */ bnd_idx=1; /* bnd_idx labels offset from first vertex of next valid (i.e., non-duplicative) vertex */ while(bnd_idx<bnd_nbr){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ while(lon_bnd[bnd_vld_idx] == lon_bnd[bnd_srt_idx+bnd_idx] && lat_bnd[bnd_srt_idx] == lat_bnd[bnd_srt_idx+bnd_idx]){ /* Next valid vertice must not duplicate first vertex */ bnd_idx++; /* Have we already found all valid vertices? */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to normalization when all valid vertices found */ if(bnd_idx == bnd_nbr) break; /* Current vertex is valid (non-duplicative) */ bnd_vld_idx=bnd_srt_idx+bnd_idx; vrt_vld[bnd_vld_nbr]=bnd_vld_idx; bnd_vld_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports centroidal decomposition col_idx=%lu, bnd_nbr=%d, bnd_idx=%ld, bnd_vld_idx=%ld, bnd_vld_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,bnd_nbr,bnd_idx,bnd_vld_idx,bnd_vld_nbr); assert(bnd_vld_nbr <= bnd_nbr); lat_ctr+=lat_bnd[bnd_vld_idx]; lon_ctr+=lon_bnd[bnd_vld_idx]; lon_dff=lon_bnd[bnd_vld_idx]-lon_bnd[0]; if(lon_dff >= 180.0){ lon_ctr-=360.0; }else if(lon_dff <= -180.0){ lon_ctr+=360.0; } /* !lon_dff */ /* Search for next valid vertice in next iteration */ bnd_idx++; } /* !bnd_idx */ /* Compute centroid */ lat_ctr/=bnd_vld_nbr; lon_ctr/=bnd_vld_nbr; /* Centroid can become point A of bnd_nbr polygons or optimize algorithm: 1. Skip sub-dividing polygon into centroid-based triangles for bnd_vld_nbr == 3 2. Split quadrilaterals into two (non-centroid) triangles for bnd_vld_nbr == 4 3. Use full centroid-based triangle algorithm for bnd_vld_nbr >= 5 */ lat_ctr_rdn=lat_ctr*dgr2rdn; lon_ctr_rdn=lon_ctr*dgr2rdn; lat_ctr_cos=cos(lat_ctr_rdn); /* Place centroid values in extended arrays for easy access */ lat_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_rdn; lon_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lon_ctr_rdn; lat_bnd_cos[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_cos; /* Polygon centroid and valid vertices are now known */ assert(bnd_vld_nbr > 2); if(bnd_vld_nbr == 3){ /* Three vertices only means polygon is already decomposed into a triangle */ tri_nbr=1; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; }else if(bnd_vld_nbr == 4){ /* Bisect quadrilateral into two triangles rather than use centroid and have four triantles */ tri_nbr=2; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; a_idx[1]=vrt_vld[0]; /* NB: Order is important. This way side C of triangle[0] = side A of trangle[1] */ b_idx[1]=vrt_vld[2]; c_idx[1]=vrt_vld[3]; }else if(bnd_vld_nbr >= 5){ /* Centroid method has as many triangles as valid vertices */ tri_nbr=bnd_vld_nbr; for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ a_idx[tri_idx]=(col_idx+1)*bnd_nbr_ttl-1L; /* A is always centroid, store values at end of arrays */ b_idx[tri_idx]=vrt_vld[tri_idx]; c_idx[tri_idx]=vrt_vld[(tri_idx+1)%tri_nbr]; } /* !tri_idx */ } /* !bnd_vld_nbr */ } /* !flg_mth_ctr */ if(flg_mth_csz){ /* A is always first vertice of all triangles */ idx_a=bnd_nbr*col_idx; /* Start search for B at next vertice */ bnd_idx=1; /* bnd_idx labels offset from point A of potential location of triangle points B and C We know that bnd_idx(A) == 0, bnd_idx(B) < bnd_nbr-1, bnd_idx(C) < bnd_nbr */ while(bnd_idx<bnd_nbr-1){ /* Only first triangle must search for B, subsequent triangles recycle previous C as current B */ if(tri_nbr == 0){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ /* 20200115: Prior to today we never skipped polar points (same latitudes but different longitudes) That worked fine in practice for spherical triangles partly because triangles from CSZ decomposition (aka hub-and-spoke decomposition) are additive, even with multiple points on the same great circle, and partly due to luck (a starting vertex surrounded by points on the same geodesic would break it). Moreover, repeated polar points pose no issues for L'Huilier's (or Girard's) method which depends only on the interior angles and side lengths, not the longitudes of polar points. Small circles change that last part, and we must now eliminate repeated polar points. */ if(edg_typ == nco_edg_smc){ /* Skip repeated numerically identical points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ /* Skip geometrically identical (i.e., repeated polar) points */ while((fabs(lat_bnd[idx_a]) == 90.0) && (fabs(lat_bnd[idx_a+bnd_idx]) == 90.0)){ bnd_idx++; if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else if(edg_typ != nco_edg_smc){ /* Spherical polygongs can use simpler, pre-20200116 algorithm to eliminate repeated points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else{ abort(); } /* !edg_typ */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !tri_nbr */ idx_b=idx_a+bnd_idx; /* Search for C at next vertice */ bnd_idx++; /* fxm */ while(lon_bnd[idx_b] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_b] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate B */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr) break; idx_c=idx_a+bnd_idx; /* Valid triangle, vertices are known and labeled */ a_idx[tri_nbr]=idx_a; b_idx[tri_nbr]=idx_b; c_idx[tri_nbr]=idx_c; tri_nbr++; /* Begin search for next B at current C */ bnd_idx=idx_c-idx_a; } /* !bnd_idx */ } /* !flg_mth_csz */ /* Triangles are known for requested decomposition method Compute and accumulate their area Optimized algorithm recycles previous arc c as current arc a (after first triangle) */ for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ idx_a=a_idx[tri_idx]; idx_b=b_idx[tri_idx]; idx_c=c_idx[tri_idx]; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports triangle vertices: col_idx=%lu, tri_idx=%d, idx_a=%ld, idx_b=%ld, idx_c=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,idx_a,idx_b,idx_c); /* Compute interior angle/great circle arc a for first triangle; subsequent triangles recycle previous arc c */ if(tri_idx == 0){ /* 20150831: Test by computing ncol=0 area in conus chevrons grid, compare to MAT results ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/257x512_SCRIP.20150901.nc -m ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150901.nc ncremap -s ${DATA}/grids/257x512_SCRIP.20150901.nc -g ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc -m ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ncks -O -D 5 -v FSNT --map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc ${DATA}/ne30/raw/famipc5_ne30_v0.3_00003.cam.h0.1979-01.nc ${DATA}/ne30/rgr/fv_FSNT.nc ncks -O -D 5 -v FSNT --rgr diagnose_area --map ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ${DATA}/ne30/rgr/fv_FSNT.nc ${DATA}/ne30/rgr/dogfood.nc ncks -O -D 1 --rgr infer#diagnose_area --rgr grid=${HOME}/grd.nc ${DATA}/ne30/rgr/dogfood.nc ~/foo.nc ncks -H -s %20.15e, -v area -d ncol,0 ${DATA}/ne30/rgr/dogfood.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${HOME}/grd.nc ncol=0 on conus chevrons file: 3.653857995295246e-05 raw GLL weight 3.653857995294305e-05 ESMF weight (area_b from map-file) 3.653857995294302e-05 matlab CSZ decomposition (N-2 triangles) computed at SNL by MAT 3.653857995294301e-05 matlab centroidal decomposition (N triangles) computed at SNL by MAT 3.653857995294258e-05 NCO CSZ _and_ centroidal decompositions (new haversine) 3.653857995289623e-05 NCO CSZ decomposition (old acos) 20191011: Tested this same polygon in ESMF and NCO weight-generator NCO maps begin with first destination gridcell, find next ESMF gridcell by searching for first col: ncks --trd -C -v col ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc | egrep "=1 " ncks -H --trd -s %20.15e -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 3.653857995294305e-05 ncks -H --trd -s '%20.15e, ' -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995295246e-05 ESMF and NCO weight-generators produce nearly identical S results to double-precision: ncks -H --trd -s '%20.15e, ' -C -d n_s,0,1 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 2.181999640069480e-03, 1.309571213636605e-02 ncks -H --trd -s %20.15e -C -d n_s,207436 -d n_s,209617 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 2.181999640069454e-03, 1.309571213636510e-02 Compare first five polygon areas: ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995294305e-05, 1.250459284052488e-04, 1.448204605591709e-04, 8.223598867312266e-05, 8.585831933875070e-05, # aave 3.653857995294258e-05, 1.250459284052470e-04, 1.448204605591675e-04, 8.223598867312247e-05, 8.585831933875186e-05, Compare total areas: ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ~/foo_aave.nc ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc ~/foo_nco.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_aave.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_nco.nc aave: 1.256637061435867e+01, 1.256637061435973e+01 nco: 1.256637061435857e+01, 1.256637061435955e+01 4*pi: 1.25663706143591729538e+01 Does (tru_glb_ttl/NCO_glb_ttl)*NCO_lcl = ESMF_lcl ? (1.25663706143591729538/1.256637061435857)*3.653857995294258=3.6538579952944333 No, normalization alone does not explain differences between ESMF and NCO It does not appear that ESMF does a global normalization of areas/weights */ /* Computing great circle arcs over small arcs requires care since central angle is near 0 degrees Cosine small angles changes slowly for such angles, and leads to precision loss Use haversine formula instead of spherical law of cosines formula https://en.wikipedia.org/wiki/Great-circle_distance */ /* Interior angle/great circle arc a, spherical law of cosines formula (loses precision): cos_a=lat_bnd_cos[idx_a]*lon_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_cos[idx_b]+ lat_bnd_cos[idx_a]*lon_bnd_sin[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_sin[idx_b]+ lat_bnd_sin[idx_a]*lat_bnd_sin[idx_b];ngl_a=acos(cos_a); */ /* Interior angle/great circle arc a, haversine formula: */ // 20160918: Use branch cut rules for longitude lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_a],lon_bnd_rdn[idx_b])); lat_dlt=fabs(lat_bnd_rdn[idx_a]-lat_bnd_rdn[idx_b]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*pow(sin(0.5*lon_dlt),2)); ngl_a=2.0*asin(sin_hlf_tht); }else{ /* !tri_idx == 0 */ ngl_a=ngl_c; } /* !tri_idx == 0 */ /* Interior angle/great circle arc b */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_b],lon_bnd_rdn[idx_c])); lat_dlt=fabs(lat_bnd_rdn[idx_b]-lat_bnd_rdn[idx_c]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_b]*lat_bnd_cos[idx_c]*pow(sin(0.5*lon_dlt),2)); ngl_b=2.0*asin(sin_hlf_tht); /* Interior angle/great circle arc c */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_c],lon_bnd_rdn[idx_a])); lat_dlt=fabs(lat_bnd_rdn[idx_c]-lat_bnd_rdn[idx_a]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_c]*lat_bnd_cos[idx_a]*pow(sin(0.5*lon_dlt),2)); ngl_c=2.0*asin(sin_hlf_tht); /* Semi-perimeter */ prm_smi=0.5*(ngl_a+ngl_b+ngl_c); /* L'Huilier's formula results in NaN if any side exceeds semi-perimeter This can occur in needle-shaped triangles due to rounding errors in derived arc lengths a, b, c 20200203: Problematic needles occurs a few dozen times in ne120pg2 -> cmip6 maps Problematic isoceles triangles are much rarer than problematic needles Therefore look for needle-issues first, then, if none found, look for isoceles issues Wikipedia recommends treating ill-conditioned triangles by Side-Angle-Side (SAS) formula https://en.wikipedia.org/wiki/Spherical_trigonometry Diagnose needles beforehand and call SAS routines as above to avoid NaN in L'Huilier Label problematic needle triangles by shortest side, e.g., "flg_sas_a" means (b ~ c) and a ~ 0.0 */ flg_sas_ndl=flg_sas_isc=flg_sas_a=flg_sas_b=flg_sas_c=False; if(ngl_a > prm_smi){if(ngl_b > ngl_c) flg_sas_c=True; else flg_sas_b=True;} /* a exceeds semi-perimeter */ else if(ngl_b > prm_smi){if(ngl_c > ngl_a) flg_sas_a=True; else flg_sas_c=True;} /* b exceeds semi-perimeter */ else if(ngl_c > prm_smi){if(ngl_a > ngl_b) flg_sas_b=True; else flg_sas_a=True;} /* c exceeds semi-perimeter */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_ndl=True; if(!flg_sas_ndl){ /* L'Huilier's formula becomes ill-conditioned when two sides are one half the third side This occurs for flat, isoceles-shaped triangles Label problematic isoceles triangles by longest side, e.g., "flg_sas_a" means (b ~ c) ~ 0.5*a */ /* Sensitivity tests on ~20191014 showed that triangular ill-conditioning treatment (i.e., switching to SAS method) does not improve (and may degrade) accuracy for eps_ill_cnd > 1.0e-15 */ const double eps_ill_cnd=1.0e-15; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ const double eps_ill_cnd_dbl=2.0*eps_ill_cnd; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ if((fabs(ngl_a-ngl_b) < eps_ill_cnd) && (fabs(ngl_a-0.5*ngl_c) < eps_ill_cnd_dbl)) flg_sas_c=True; /* c is twice a and b */ else if((fabs(ngl_b-ngl_c) < eps_ill_cnd) && (fabs(ngl_b-0.5*ngl_a) < eps_ill_cnd_dbl)) flg_sas_a=True; /* a is twice b and c */ else if((fabs(ngl_c-ngl_a) < eps_ill_cnd) && (fabs(ngl_c-0.5*ngl_b) < eps_ill_cnd_dbl)) flg_sas_b=True; /* b is twice c and a */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_isc=True; } /* !flg_sas_ndl */ if(flg_sas_isc || flg_sas_ndl){ /* Compute area using SAS formula */ double cos_hlf_C; /* [frc] Cosine of half of canoncal surface angle C */ //double sin_hlf_C; /* [frc] Sine of half of canoncal surface angle C */ double ngl_sfc_ltr_C; /* [rdn] Canonical surface angle/great circle arc C */ double tan_hlf_a_tan_hlf_b; /* [frc] Product of tangents of one-half of nearly equal canoncal sides */ double xcs_sph_hlf_tan; /* [frc] Tangent of one-half the spherical excess */ /* Transform sides into canonical order for formula where C is surface angle between arcs a and b */ if(flg_sas_c){ ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; } /* !flg_sas_c */ if(flg_sas_a){ ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; } /* !flg_sas_a */ if(flg_sas_b){ ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; } /* !flg_sas_b */ if(flg_sas_ndl && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is needle-shaped triangle with a side that exceeds semi-perimeter = %0.16e. Eschew L'Huilier's formula for spherical excess to avoid NaN. Could use SAS formula with canonical central interior arc c = %0.16e.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,prm_smi,ngl_ltr_c); if(flg_sas_isc && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is nearly flat isoceles-shaped triangle. Canonical arcs a and b differ by %0.16e. Eschew L'Huilier's formula for spherical excess to avoid low precision. Could use SAS formula.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,fabs(ngl_ltr_a-ngl_ltr_b)); /* Determine canonical surface angle C To find any angle given three spherical triangle sides, Wikipedia opines: "The cosine rule may be used to give the angles A, B, and C but, to avoid ambiguities, the half-angle formulae are preferred." Half-angle formulae include two applicable variants that yield the sine or cosine of half C Then C is determined as twice the asin() or acos() function, respectively For needle-shaped triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For needle-shaped triangles, RHS cos formula is ~ sin^2(s)/sin(a)*sin(b) ~ 0.5 For flat isoceles triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For flat isoceles triangles, RHS cos formula is ~ sin(s)*sin(0)/sin(a)*sin(b) ~ 0.0 Use sin formula since both needle- and isoceles-shaped triangles have RHS ~ 0.0 where arcsin() is most precise 20200203: Half-angle sine formula gives NaNs, and half-angle cosine formula works on ne120pg2->cmip. Why? Adopting cosine formula because it works */ //sin_hlf_C=sqrt(sin(prm_smi-ngl_ltr_a)*sin(prm_smi-ngl_ltr_b)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle sine formula cos_hlf_C=sqrt(sin(prm_smi)*sin(prm_smi-ngl_ltr_c)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle cosine formula //ngl_sfc_ltr_C=2.0*asin(sin_hlf_C); ngl_sfc_ltr_C=2.0*acos(cos_hlf_C); /* SAS formula */ tan_hlf_a_tan_hlf_b=tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b); xcs_sph_hlf_tan=tan_hlf_a_tan_hlf_b*sin(ngl_sfc_ltr_C)/(1.0+tan_hlf_a_tan_hlf_b*cos(ngl_sfc_ltr_C)); assert(fabs(xcs_sph_hlf_tan) != M_PI_2); xcs_sph=2.0*atan(xcs_sph_hlf_tan); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO SAS area formula for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Spherical excess = %0.16e.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c,xcs_sph); // Single-line version // xcs_sph=2.0*atan(tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b)*sin(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b)))))/(1.0+tan_hlf_a_tan_hlf_b*cos(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b))))))); /* Above procedure for problematic needle-shaped and isoceles-shaped triangles degrades statistics For ne30pg2, ne120pg2 -> cmip, setting area = 0.0 _greatly_ improves area statistics (Why?) Set spherical excess to zero for problematic needle-shaped and isoceles-shaped triangles */ /* fxm: Make zeroing skinny needles/isoceles-shaped triangle-areas a command-line option? */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Setting SAS area = 0.0\n",nco_prg_nm_get()); xcs_sph=0.0; /* !flg_sas */ }else{ double xcs_sph_qtr_tan; /* [frc] Tangent of one-quarter the spherical excess */ xcs_sph_qtr_tan=sqrt(tan(0.5*prm_smi)*tan(0.5*(prm_smi-ngl_a))*tan(0.5*(prm_smi-ngl_b))*tan(0.5*(prm_smi-ngl_c))); assert(fabs(xcs_sph_qtr_tan) != M_PI_2); xcs_sph=4.0*atan(xcs_sph_qtr_tan); /* 20191014: Aggregate all previous area-related commands into one, gigantic, unreadable, possibly more precise command (tested and it is more obfuscated but not more precise) */ // xcs_sph=4.0*atan(sqrt(tan(0.5*0.5*(ngl_a+ngl_b+ngl_c))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_a))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_b))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_c)))); } /* !flg_sas */ if(isnan(xcs_sph)){ const double eps_ngl_skn=1.0e-13; /* [frc] Angles skinnier than this form needles whose area ~ 0.0 */ /* Categorize reason for NaN */ (void)fprintf(stdout,"%s: WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING\nUnxpected NaN polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e).\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); if( /* Side exceeds semi-perimeter */ (ngl_a > prm_smi) || (ngl_b > prm_smi) || (ngl_c > prm_smi) ){ (void)fprintf(stdout,"%s: WARNING Triangle side exceeds semi-perimeter = %0.16e polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),prm_smi,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are angles too skinny? Quite often on ne30pg2, ne120pg2 */ (ngl_a < eps_ngl_skn) || (ngl_b < eps_ngl_skn) || (ngl_c < eps_ngl_skn) ){ (void)fprintf(stdout,"%s: WARNING Triangle has at least one skinny angles < %g [rdn] for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),eps_ngl_skn,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are two vertices identical to double-precision? Never on ne30pg2, ne120pg2 */ ((lat_bnd[idx_a] == lat_bnd[idx_b]) && (lon_bnd[idx_a] == lon_bnd[idx_b])) || ((lat_bnd[idx_b] == lat_bnd[idx_c]) && (lon_bnd[idx_b] == lon_bnd[idx_c])) || ((lat_bnd[idx_c] == lat_bnd[idx_a]) && (lon_bnd[idx_c] == lon_bnd[idx_a])) ){ (void)fprintf(stdout,"%s: WARNING Triangle has repeated points for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); }else{ (void)fprintf(stdout,"%s: WARNING Triangle area formula yields NaN for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Are points co-linear? Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); } /* !co-linear */ xcs_sph=0.0; } /* !NaN */ area[col_idx]+=xcs_sph; /* Accumulate spherical triangle area into reported polygon area and adjust below */ area_smc+=xcs_sph; /* Accumulate spherical triangle area into small-circle polygon area and adjust below */ area_ttl+=xcs_sph; /* Accumulate spherical triangle area into spherical polygon area */ area_smc_ttl+=xcs_sph; /* Accumulate spherical triangle area into total polygon area and adjust below */ /* 20160918 from here to end of loop is non-spherical work 20170217: Temporarily turn-off latitude circle diagnostics because Sungduk's POP case breaks them Canonical latitude-triangle geometry has point A at apex and points B and C at same latitude ncremap --dbg=1 --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_nco.20190601.nc ncremap --dbg=1 -R 'edg_typ=smc' --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_smc.20190601.nc */ flg_tri_crr_smc=False; if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b] || lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c] || lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ /* Set flag only if triangle is not degenerate. Degenerate triangles (3 points on a geodesic) have zero area */ if(xcs_sph != 0.0) flg_ply_has_smc=flg_tri_crr_smc=True; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG Found small circle triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); } /* endif */ if((edg_typ == nco_edg_smc) && flg_tri_crr_smc){ double ngl_plr; /* [rdn] Polar angle (co-latitude) */ long idx_ltr_a; /* [idx] Point A (apex) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_b; /* [idx] Point B (base) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_c; /* [idx] Point C (base) of canonical latitude-triangle geometry, 1-D index */ /* Rotate labels to standard position with vertex A, equi-latitude points B and C */ if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b]){ idx_ltr_a=idx_c; idx_ltr_b=idx_a; idx_ltr_c=idx_b; ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_a]); }else if(lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c]){ idx_ltr_a=idx_a; idx_ltr_b=idx_b; idx_ltr_c=idx_c; ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_b]); }else if(lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ idx_ltr_a=idx_b; idx_ltr_b=idx_c; idx_ltr_c=idx_a; ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_c]); }else{ (void)fprintf(stdout,"%s: ERROR latitudes not equal in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); abort(); } /* endif */ /* 20160918: Compute exact area of latitude triangle wedge */ double xpn_x; /* [frc] Expansion parameter */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_ltr_b],lon_bnd_rdn[idx_ltr_c])); assert(lon_dlt != 0.0); // Latitude triangles must have bases with distinct longitudes if(lon_dlt != M_PI){ /* Normal clause executed for small-circle triangles */ /* Numeric conditioning uncertain. Approaches divide-by-zero when lon_dlt << 1 */ xpn_x=lat_bnd_sin[idx_ltr_b]*(1.0-cos(lon_dlt))/sin(lon_dlt); assert(fabs(xpn_x) != M_PI_2); area_smc_crc=2.0*atan(xpn_x); /* 20170217: Sungduk's POP regrid triggers following abort(): ncremap -D 1 -i ~/pop_g16.nc -d ~/cam_f19.nc -o ~/foo.nc */ //assert(xpn_x >= 0.0); //if(lat_bnd[idx_ltr_b] > 0.0) area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; else area_smc_crc+=+lon_dlt*lat_bnd_sin[idx_ltr_b]; area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; }else{ /* 20200228: Latitude triangles may have bases with longitudes that differ by 180 degrees Consider a quadrilateral with four equidistant vertices in longitude, and that caps a pole: CSZ decomposition technique divides this into two triangles each with three co-latitudinal points and no vertex at pole Solution candidates: 1. Divide such quadrilaterals using centroid technique Just realized current implementation of centroid decomposition fails on polar caps Failure occurs because centroid latitude is +/- ~90 not mean of vertices' latitudes Must impute "pseudo-centroid" with latitude +/- 90 instead of averaging vertex latitudes Requires testing each polygon to determine if it contains pole <- Too difficult/expensive 2. Assume latitude triangles whose base is 180 degrees are at pole Compute area exactly using analytic formula for annular lune */ (void)fprintf(stdout,"%s: INFO longitudes differ by pi in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); (void)fprintf(stdout,"%s: DEBUG col_nbr=%lu, bnd_nbr=%d, col_idx=%ld, area=%g. Vertices [0..bnd_nbr-1] in format idx (lat,lon)\n",nco_prg_nm_get(),col_nbr,bnd_nbr,col_idx,xcs_sph); for(int bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%2d (%g, %g)\n",bnd_idx,lat_bnd[bnd_nbr*col_idx+bnd_idx],lon_bnd[bnd_nbr*col_idx+bnd_idx]); (void)fprintf(stdout,"%s: INFO Assuming this triangle is decomposed from polar cap polygon. Treating area with analytic formula for annular lune\n",nco_prg_nm_get()); /* Compute small circle correction as difference between spherical triangle area and standard annuular lune formula Small circle correction is positive-definite for polar triangles so use fabs(sin(lat_bnd_sin)) */ area_smc_crc=lon_dlt*fabs(lat_bnd_sin[idx_ltr_b])-area_smc; } /* !lon_dlt */ // Adjust diagnostic areas by small-circle area correction area_smc+=area_smc_crc; area_smc_ttl+=area_smc_crc; area_smc_crc_ttl+=area_smc_crc; area_smc_crc_abs_ttl+=fabs(area_smc_crc); // 20200109: Adjust area reported to calling code by small-circle area correction area[col_idx]+=area_smc_crc; if(0){ /* 20160918: Approximate area of latitude triangle wedge. Use truncated power expansion of exact formula. */ double xpn_x_sqr; /* [frc] Expansion parameter squared */ double xpn_sum; /* [frc] Expansion sum */ double xpn_nmr; /* [frc] Expansion term numerator */ double xpn_trm; /* [frc] Expansion term */ double xpn_dnm; /* [frc] Expansion term denominator */ const unsigned short int rdr_xpn=3; /* [nbr] Order of N in trigonometric series expansion */ unsigned short int idx_xpn; /* [idx] Index in series expansion */ xpn_x=cos(ngl_plr)*(1.0-cos(lon_dlt))/sin(lon_dlt); xpn_x_sqr=xpn_x*xpn_x; xpn_nmr=xpn_x; xpn_dnm=1.0; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; for(idx_xpn=3;idx_xpn<=rdr_xpn;idx_xpn+=2){ xpn_nmr*=xpn_x_sqr; xpn_dnm*=(idx_xpn-1)*idx_xpn; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; } /* !idx_xpn */ (void)fprintf(stdout,"%s: Small-circle area using series approximation...not implemented yet\n",nco_prg_nm_get()); } /* !0 */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stdout,"%s: INFO %s col_idx = %li triangle %d spherical area, latitude-triangle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,xcs_sph,xcs_sph+area_smc_crc,100.0*area_smc_crc/xcs_sph); if(fabs(area_smc_crc/xcs_sph) > 0.1){ (void)fprintf(stdout,"%s: DEBUG Non-spherical correction exceeds 10%% for current triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); } /* !fabs */ } /* !dbg */ } /* !edg_typ && flg_tri_crr_smc */ } /* !tri_idx */ if(edg_typ == nco_edg_smc && flg_ply_has_smc){ /* Current gridcell contained at least one latitude-triangle */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s col_idx = %li spherical area, small circle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,area[col_idx],area_smc,100.0*(area_smc-area[col_idx])/area[col_idx]); } /* !edg_typ && !flg_ply_has_smc */ } /* !col_idx */ if(edg_typ == nco_edg_smc && nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s total spherical area, small circle area, %% difference, crc_ttl, crc_abs_ttl: %g, %g, %g%%, %g, %g\n",nco_prg_nm_get(),fnc_nm,area_ttl,area_smc_ttl,100.0*(area_smc_ttl-area_ttl)/area_ttl,area_smc_crc_ttl,area_smc_crc_abs_ttl); if(vrt_vld) vrt_vld=(long *)nco_free(vrt_vld); if(a_idx) a_idx=(long *)nco_free(a_idx); if(b_idx) b_idx=(long *)nco_free(b_idx); if(c_idx) c_idx=(long *)nco_free(c_idx); if(lat_bnd_rdn) lat_bnd_rdn=(double *)nco_free(lat_bnd_rdn); if(lon_bnd_rdn) lon_bnd_rdn=(double *)nco_free(lon_bnd_rdn); if(lat_bnd_cos) lat_bnd_cos=(double *)nco_free(lat_bnd_cos); if(lon_bnd_cos) lon_bnd_cos=(double *)nco_free(lon_bnd_cos); if(lat_bnd_sin) lat_bnd_sin=(double *)nco_free(lat_bnd_sin); if(lon_bnd_sin) lon_bnd_sin=(double *)nco_free(lon_bnd_sin); } /* !nco_sph_plg_area() */ int /* O [enm] Return code */ nco_rgr_tps /* [fnc] Regrid using TempestRemap library */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Regrid fields using TempestRemap "library" (more precisely, executables) Routine was originally written to call Tempest executables However, that functionality was all placed into the ncremap shell script Thus this C-interface is currently unused TempestRemap2 has a library that may be accessed on-line Test Tempest library: no way to activate yet export DATA_TEMPEST='/data/zender/rgr';ncks -O --rgr=Y ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_tps()"; const int fmt_chr_nbr=6; const char *cmd_rgr_fmt; char *cmd_rgr; char fl_grd_dst[]="/tmp/foo_outRLLMesh.g"; char *fl_grd_dst_cdl; int rcd_sys; int lat_nbr_rqs=180; int lon_nbr_rqs=360; nco_rgr_tps_cmd nco_tps_cmd; /* [enm] TempestRemap command enum */ char *nvr_DATA_TEMPEST; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ nvr_DATA_TEMPEST=getenv("DATA_TEMPEST"); rgr->drc_tps= (nvr_DATA_TEMPEST && strlen(nvr_DATA_TEMPEST) > 0L) ? (char *)strdup(nvr_DATA_TEMPEST) : (char *)strdup("/tmp"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"drc_tps = %s, ",rgr->drc_tps ? rgr->drc_tps : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Allow for whitespace characters in fl_grd_dst Assume CDL translation results in acceptable name for shell commands */ fl_grd_dst_cdl=nm2sng_fl(fl_grd_dst); /* Construct and execute regridding command */ nco_tps_cmd=nco_rgr_GenerateRLLMesh; cmd_rgr_fmt=nco_tps_cmd_fmt_sng(nco_tps_cmd); cmd_rgr=(char *)nco_malloc((strlen(cmd_rgr_fmt)+strlen(fl_grd_dst_cdl)-fmt_chr_nbr+1UL)*sizeof(char)); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stderr,"%s: %s reports generating %d by %d RLL mesh in %s...\n",nco_prg_nm_get(),fnc_nm,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst); (void)sprintf(cmd_rgr,cmd_rgr_fmt,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst_cdl); rcd_sys=system(cmd_rgr); if(rcd_sys == -1){ (void)fprintf(stdout,"%s: ERROR %s unable to complete TempestRemap regridding command \"%s\"\n",nco_prg_nm_get(),fnc_nm,cmd_rgr); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"done\n"); /* Clean-up memory */ if(fl_grd_dst_cdl) fl_grd_dst_cdl=(char *)nco_free(fl_grd_dst_cdl); if(cmd_rgr) cmd_rgr=(char *)nco_free(cmd_rgr); return NCO_NOERR; } /* end nco_rgr_tps() */ const char * /* O [sng] String describing two-dimensional grid-type */ nco_grd_2D_sng /* [fnc] Convert two-dimensional grid-type enum to string */ (const nco_grd_2D_typ_enm nco_grd_2D_typ) /* I [enm] Two-dimensional grid-type enum */ { /* Purpose: Convert two-dimensional grid-type enum to string */ switch(nco_grd_2D_typ){ case nco_grd_2D_unk: return "Unknown, unclassified, or unrepresentable 2D grid type (e.g., unstructured, curvilinear, POP displaced-pole)"; case nco_grd_2D_gss: return "Gaussian latitude grid. Used by spectral transform models, e.g., CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_2D_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_2D_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_2D_sng() */ const char * /* O [sng] String describing latitude grid-type */ nco_grd_lat_sng /* [fnc] Convert latitude grid-type enum to string */ (const nco_grd_lat_typ_enm nco_grd_lat_typ) /* I [enm] Latitude grid-type enum */ { /* Purpose: Convert latitude grid-type enum to string */ switch(nco_grd_lat_typ){ case nco_grd_lat_unk: return "Unknown, unclassified, or unrepresentable latitude grid type (e.g., unstructured, curvilinear, POP3)"; case nco_grd_lat_gss: return "Gaussian latitude grid used by global spectral models: CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_lat_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_lat_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lat_sng() */ const char * /* O [sng] String describing longitude grid-type */ nco_grd_lon_sng /* [fnc] Convert longitude grid-type enum to string */ (const nco_grd_lon_typ_enm nco_grd_lon_typ) /* I [enm] Longitude grid-type enum */ { /* Purpose: Convert longitude grid-type enum to string */ switch(nco_grd_lon_typ){ case nco_grd_lon_unk: return "Unknown, unclassified, or unrepresentable longitude grid type (e.g., unstructured, curvilinear)"; case nco_grd_lon_180_wst: return "Date line at west edge of first longitude cell"; case nco_grd_lon_180_ctr: return "Date line at center of first longitude cell"; case nco_grd_lon_Grn_wst: return "Greenwich at west edge of first longitude cell"; case nco_grd_lon_Grn_ctr: return "Greenwich at center of first longitude cell"; case nco_grd_lon_bb: return "Longitude grid determined by bounding box (lon_wst/lon_est) and gridcell number (lon_nbr)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lon_sng() */ const char * /* O [sng] String describing grid extent */ nco_grd_xtn_sng /* [fnc] Convert two-dimensional grid-extent enum to string */ (const nco_grd_xtn_enm nco_grd_xtn) /* I [enm] Grid-extent enum */ { /* Purpose: Convert grid-extent enum to string */ switch(nco_grd_xtn){ case nco_grd_xtn_nil: return "Unknown"; case nco_grd_xtn_glb: return "Global"; case nco_grd_xtn_rgn: return "Regional"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_xtn_sng() */ const char * /* O [sng] String describing grid conversion */ nco_rgr_grd_sng /* [fnc] Convert grid conversion enum to string */ (const nco_rgr_typ_enm nco_rgr_typ) /* I [enm] Grid conversion enum */ { /* Purpose: Convert grid conversion enum to string */ switch(nco_rgr_typ){ case nco_rgr_grd_1D_to_1D: return "1D_to_1D"; case nco_rgr_grd_1D_to_2D: return "1D_to_2D"; case nco_rgr_grd_2D_to_1D: return "2D_to_1D"; case nco_rgr_grd_2D_to_2D: return "2D_to_2D"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_grd_sng() */ const char * /* O [sng] String describing regridding method */ nco_rgr_mth_sng /* [fnc] Convert regridding method enum to string */ (const nco_rgr_mth_typ_enm nco_rgr_mth_typ) /* I [enm] Regridding method enum */ { /* Purpose: Convert regridding method enum to string */ switch(nco_rgr_mth_typ){ case nco_rgr_mth_conservative: return "Conservative remapping"; case nco_rgr_mth_bilinear: return "Bilinear remapping"; case nco_rgr_mth_none: return "none"; case nco_rgr_mth_unknown: return "Unknown (TempestRemap or ESMF_weight_only)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mth_sng() */ const char * /* O [sng] String describing mapfile generator */ nco_rgr_mpf_sng /* [fnc] Convert mapfile generator enum to string */ (const nco_rgr_mpf_typ_enm nco_rgr_mpf_typ) /* I [enm] Mapfile generator enum */ { /* Purpose: Convert mapfile generator enum to string */ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_ESMF: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL"; case nco_rgr_mpf_SCRIP: return "SCRIP (original LANL package)"; case nco_rgr_mpf_Tempest: return "TempestRemap (GenerateOfflineMap)"; case nco_rgr_mpf_ESMF_weight_only: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL, with --weight_only option from ERWG 7.1+"; case nco_rgr_mpf_NCO: return "netCDF Operators (NCO) Offline Regridding Weight Generator"; case nco_rgr_mpf_unknown: return "Unknown Weight Generator"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mpf_sng() */ const char * /* O [sng] String describing regridding normalization */ nco_rgr_nrm_sng /* [fnc] Convert regridding normalization enum to string */ (const nco_rgr_nrm_typ_enm nco_rgr_nrm_typ) /* I [enm] Regridding normalization enum */ { /* Purpose: Convert regridding normalization enum to string */ switch(nco_rgr_nrm_typ){ case nco_rgr_nrm_fracarea: return "fracarea"; case nco_rgr_nrm_destarea: return "destarea"; case nco_rgr_nrm_none: return "none"; case nco_rgr_nrm_unknown: return "Unknown (possibilities include ESMF_weight_only, NCO, and TempestRemap)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_nrm_sng() */ const char * /* O [sng] String containing regridding command and format */ nco_tps_cmd_fmt_sng /* [fnc] Convert TempestRemap command enum to command string */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string and format */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh --res %d --file %s"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap --in_mesh %s --out_mesh %s --ov_mesh %s --in_data %s --out_data %s"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh --a %s --b %s --out %s"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh --lat %d --lon %d --file %s"; case nco_rgr_GenerateTestData: return "GenerateTestData --mesh %s --np %d --test %d --out %s"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_fmt_sng() */ const char * /* O [sng] String containing regridding command name */ nco_tps_cmd_sng /* [fnc] Convert TempestRemap command enum to command name */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh"; case nco_rgr_GenerateTestData: return "GenerateTestData"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_sng() */ int /* O [enm] Return code */ nco_grd_mk /* [fnc] Create SCRIP-format grid file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information to create SCRIP-format grid file Spherical geometry terminology: spherical cap = spherical dome = volume cut-off by plane spherical lune = digon = area bounded by two half-great circles = base of spherical wedge spherical segment = volume defined by cutting sphere with pair parallel planes spherical sector = volume subtended by lat1 spherical wedge = ungula = volume subtended by lon2-lon1 spherical zone = area of spherical segment excluding bases spherical quadrangle = area of intersection of spherical zone and lune (i.e., area of bearing = angle from true north geodesic = shortest path between points on a surface great circle = orthodrome = "straight path" = geodesic of the sphere convergency = difference (in azimuth?) between great circle tracks at two different positions conversion angle = angle between geodesic and rhumb line rhumb line = loxodrome = "oblique (or slanted) path" = line of constant azimuth Formulae: http://www.movable-type.co.uk/scripts/latlong.html # On-line Javascript implementation http://williams.best.vwh.net/avform.htm ACME: https://acme-svn2.ornl.gov/acme-repo/acme/mapping/grids https://acme-svn2.ornl.gov/acme-repo/acme/inputdata/cpl/gridmaps NCAR: yellowstone.ucar.edu:/glade/p/cesm/cseg/mapping/grids yellowstone.ucar.edu:/glade/p_old/cesm/cseg/mapping/grids Global RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 90x180' --rgr grid=${DATA}/grids/90x180_SCRIP.20150901.nc --rgr latlon=90,180 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Maps for global RLL grids: ESMF_RegridWeightGen -s ${DATA}/grids/180x360_SCRIP.20150901.nc -d ${DATA}/grids/90x180_SCRIP.20150901.nc -w ${DATA}/maps/map_180x360_to_90x180.20150901.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/90x180_SCRIP.20150901.nc -d ${DATA}/grids/180x360_SCRIP.20150901.nc -w ${DATA}/maps/map_90x180_to_180x360.20150901.nc --method conserve ACME grids: ncks -O -D 1 --rgr ttl='FV-scalar grid 129x256' --rgr grid=${DATA}/grids/129x256_SCRIP.20150910.nc --rgr latlon=129,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 257x512' --rgr grid=${DATA}/grids/257x512_SCRIP.20150910.nc --rgr latlon=257,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 801x1600' --rgr grid=${DATA}/grids/801x1600_SCRIP.20150910.nc --rgr latlon=801,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ACME maps: ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/129x256_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv129x256_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150910.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv257x512_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/801x1600_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv801x1600_bilin.20150910.nc --method bilinear AMWG grids: AMWG diagnostics (until ~2016) mis-diagnose FV grids with odd numbers of latitudes as Gaussian Grids ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 96x144 for horizontal resolution 1.9x2.5 degrees' --rgr grid=${DATA}/grids/96x144_SCRIP.20160301.nc --rgr latlon=96,144 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 192x288 for horizontal resolution 0.9x1.25 degrees' --rgr grid=${DATA}/grids/192x288_SCRIP.20160301.nc --rgr latlon=192,288 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 128x256 for horizontal resolution 1.4x1.4 degrees' --rgr grid=${DATA}/grids/128x256_SCRIP.20160301.nc --rgr latlon=128,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 256x512 for horizontal resolution 0.7x0.7 degrees' --rgr grid=${DATA}/grids/256x512_SCRIP.20160301.nc --rgr latlon=256,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 800x1600 for horizontal resolution 0.225x0.225 degrees' --rgr grid=${DATA}/grids/800x1600_SCRIP.20160301.nc --rgr latlon=800,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 360x720 produced by RTM' --rgr grid=${DATA}/grids/360x720rtm_SCRIP.20160301.nc --rgr latlon=360,720 --rgr lat_typ=eqa --rgr lon_typ=180_wst ~/nco/data/in.nc ~/foo.nc AMWG maps old method (no provenance archived): ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/128x256_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/800x1600_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv800x1600_bilin.20160301.nc --method bilinear AMWG maps with ncremap (preferred method): ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/128x256_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc -w esmf -a bilinear ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv256x512_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/800x1600_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv800x1600_bilin.20160301.nc -w esmf -a bilinear MPAS grids: NCO cannot yet generate MPAS grids, but given an MPAS grid it can generate appropriate maps MPAS maps: ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_bilin.20160301.nc -w esmf -a bilinear Regional RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/sld/rgr/grd_dst.nc --rgr latlon=100,100 --rgr snwe=30.0,70.0,-120.0,-90.0 ~/nco/data/in.nc ~/foo.nc Global RLL skeleton: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${DATA}/sld/rgr/skl_180x360.nc --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Curvilinear grids: ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Degenerate case.' --rgr crv --rgr lon_crv=0.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Curvilinearity = 1.0 lon' --rgr lon_crv=1.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc 1-D Latitude (no longitude) grids: ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_10dgr_uni.nc --rgr grid=${DATA}/sld/rgr/grd_lat_10dgr_uni.nc --rgr latlon=18,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_05dgr_cap.nc --rgr grid=${DATA}/sld/rgr/grd_lat_05dgr_cap.nc --rgr latlon=37,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncremap -i ${DATA}/sld/rgr/skl_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/skl_lat_05dgr_cap.nc -m ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc -o ~/rgr/lat10to05.nc ESMF_RegridWeightGen -s ${DATA}/sld/rgr/grd_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/grd_lat_05dgr_cap.nc -w ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc --method conserve */ const char fnc_nm[]="nco_grd_mk()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const nc_type crd_typ=NC_DOUBLE; char *fl_out_tmp=NULL_CEWI; char *fl_out; char grd_area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double area_ttl=0.0; /* [frc] Exact sum of area */ double lat_crv; /* [dgr] Latitudinal curvilinearity */ double lon_crv; /* [dgr] Longitudinal curvilinearity */ double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double *wgt_Gss=NULL; // [frc] Gaussian weights double precision int *msk=NULL; /* [flg] Mask of grid */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int msk_id; /* [id] Mask variable ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_nbr; /* [nbr] Number of bounds in gridcell */ long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_drc_enm lat_drc; /* [enm] Latitude grid-direction enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ dfl_lvl=rgr->dfl_lvl; grd_typ=rgr->grd_typ; /* [enm] Grid type */ fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; lat_drc=rgr->lat_drc; /* [enm] Latitude grid direction */ lat_typ=rgr->lat_typ; /* [enm] Latitude grid type */ lon_typ=rgr->lon_typ; /* [enm] Longitude grid type */ lat_nbr=rgr->lat_nbr; /* [nbr] Number of latitudes in grid */ lon_nbr=rgr->lon_nbr; /* [nbr] Number of longitudes in grid */ lat_crv=rgr->lat_crv; /* [dgr] Latitude curvilinearity */ lon_crv=rgr->lon_crv; /* [dgr] Longitude curvilinearity */ lat_sth=rgr->lat_sth; /* [dgr] Latitude of southern edge of grid */ lon_wst=rgr->lon_wst; /* [dgr] Longitude of western edge of grid */ lat_nrt=rgr->lat_nrt; /* [dgr] Latitude of northern edge of grid */ lon_est=rgr->lon_est; /* [dgr] Longitude of eastern edge of grid */ /* Use curvilinear coordinates (lat and lon are 2D arrays) if flg_crv already set or it lat_crv or lon_crv set */ if(lat_crv != 0.0 || lon_crv != 0.0 || rgr->flg_crv) flg_grd_crv=True; if(lat_drc == nco_grd_lat_drc_n2s) flg_s2n=False; /* Assume 2D grid */ flg_grd_2D=True; grd_rnk_nbr=dmn_nbr_2D; /* Assume quadrilaterals */ grd_crn_nbr=4; /* Assume rectangles */ bnd_nbr=2; col_nbr=lat_nbr*lon_nbr; grd_sz_nbr=lat_nbr*lon_nbr; /* Allocate space for output data */ area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); wgt_Gss=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Define variable values */ int lon_psn=int_CEWI; /* [idx] Ordinal position of longitude in rectangular grid dimension-size array */ int lat_psn=int_CEWI; /* [idx] Ordinal position of latitude in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_2D){ lon_psn=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn=1; } /* !flg_grd_in_2D */ dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; /* Compute rectangular arrays NB: Much is a more-generic rewrite of map/map_grd.F90:map_grd_mk() */ /* 20150827: Old rule: Longitude grid was entirely specified by one of four longitude map tokens: Grn_ctr,Grn_wst,180_ctr,180_wst New rule: User may specify bounds (lon_wst,lon_est,lat_sth,lat_nrt) independently of grid token Such bounds ALWAYS refer bounding box interface edges, NEVER to centers of first last gridcells Bounds and number of gridcells completely determine uniform grid so former longitude-type tokens have no effect when bounds specified (so letting grid-type tokens affect grid would over-determine grid and lead to errors) Hence, grid-type tokens may be used as short-hand to specify grids but may not be required to exist later (because regional grids would not have specified them) Grid grid-type tokens lon_bb/lat_bb imply bounding box was originally used to specify bounds 1x1 degree global grid with first longitude centered at Greenwich: --lon_nbr=360 --lon_typ Grn_ctr --lon_nbr=360 --lon_wst=-0.5 --lon_est=359.5 1x1 degree global grid with Greenwich at west edge of first longitude: --lon_nbr=360 --lon_typ Grn_wst --lon_nbr=360 --lon_wst=0.0 --lon_est=360.0 1x1 degree regional grid, total size 9x9 degrees, Greenwich at center of middle gridcell: --lon_nbr=9 --lon_wst=-4.5 --lon_est=4.5 1x1 degree regional grid, total size 10x10 degrees, Greenwich at east/west edges of middle two gridcells --lon_nbr=10 --lon_wst=-5.0 --lon_est=5.0 */ /* Were east/west longitude bounds set explicitly or implicitly? NB: This is redundant since it was done in nco_rgr_ini(), yet better safe than sorry */ if(lon_wst != NC_MAX_DOUBLE || lon_est != NC_MAX_DOUBLE) lon_typ=rgr->lon_typ=nco_grd_lon_bb; if(lon_wst == NC_MAX_DOUBLE){ /* Precomputed longitude grids begin with longitude 0.0 or -180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_wst=0.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_wst=-180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ if(lon_est == NC_MAX_DOUBLE){ /* Precomputed longitude grids end with longitude 360.0 or 180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_est=360.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_est=180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ /* Determine longitude increment from span of pre-centered bounding box (centering will not change span) */ lon_spn=lon_est-lon_wst; lon_ncr=lon_spn/lon_nbr; /* Centering: If user did not set explicit longitude bounds then... */ if(lon_typ != nco_grd_lon_bb) /* map_lon_ctr_typ determines whether lon_wst refers to cell center or Western edge */ if((lon_typ == nco_grd_lon_Grn_ctr) || (lon_typ == nco_grd_lon_180_ctr)) lon_wst=lon_wst-(lon_ncr/2.0); /* Re-derive lon_est from lon_wst and lon_nbr (more fundamental properties) */ lon_est=lon_wst+lon_ncr*lon_nbr; /* lon_wst and lon_est have been set and will not change */ assert(lon_wst < lon_est); lon_ntf[0L]=lon_wst; lon_ntf[lon_nbr]=lon_est; for(lon_idx=1L;lon_idx<lon_nbr;lon_idx++) lon_ntf[lon_idx]=lon_ntf[0L]+lon_idx*lon_ncr; /* Ensure rounding errors do not produce unphysical grid */ lon_ntf[lon_nbr]=lon_ntf[0L]+lon_spn; /* Finished with longitude, now tackle latitude */ /* Were south/north latitude bounds set explicitly or implicitly? */ // if(lat_sth != NC_MAX_DOUBLE || lat_nrt != NC_MAX_DOUBLE) lon_typ=rgr->lat_typ=nco_grd_lat_bb; if(lat_sth == NC_MAX_DOUBLE) lat_sth=-90.0; if(lat_nrt == NC_MAX_DOUBLE) lat_nrt=90.0; /* Determine latitude increment from span of pre-centered bounding box (centering will not change span) */ lat_spn=lat_nrt-lat_sth; lat_ncr=lat_spn/lat_nbr; const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) double *lat_sin=NULL; // [frc] Sine of Gaussian latitudes double precision /* Create S->N grid. If user requested N->S, flip grid at end */ // if(flg_s2n) lat_ntf[0L]=lat_sth; else lat_ntf[0L]=lat_nrt; lat_ntf[0L]=lat_sth; switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; for(lat_idx=2L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; break; case nco_grd_lat_gss: lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,True,lat_sin,wgt_Gss); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight (compute for one hemisphere, make other symmetric) */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1L], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); /* 20190613: n2s latitudes are constructed s2n and flipped to n2s later Hence next line is commented-out in construction mode but used in infer mode */ // if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Ensure rounding errors do not produce unphysical grid */ lat_ntf[lat_nbr]=lat_nrt; if(nco_dbg_lvl_get() > nco_dbg_old){ (void)fprintf(stderr,"%s: DEBUG %s Gaussian abscissae/interfaces for lat_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stderr,"idx\tlat_ctr\tlat_ntf\tntf_p1\n"); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ (void)fprintf(stderr,"%ld\t%20.15f\t%20.15f\t%20.15f\n",lat_idx,lat_ctr[lat_idx],lat_ntf[lat_idx],lat_ntf[lat_idx+1L]); } /* !lat_idx */ } /* !dbg */ /* Always define longitude centers midway between interfaces */ for(lon_idx=0L;lon_idx<=lon_nbr-1L;lon_idx++) lon_ctr[lon_idx]=0.5*(lon_ntf[lon_idx]+lon_ntf[lon_idx+1L]); /* Many grids have center latitude equally spaced between interfaces */ if(lat_typ != nco_grd_lat_fv && lat_typ != nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); } /* !lat_typ */ /* Cap grids excepted---they place centers of first/last gridcells at poles */ if(lat_typ == nco_grd_lat_fv){ lat_ctr[0L]=lat_ntf[0L]; for(lat_idx=1L;lat_idx<lat_nbr-1L;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); lat_ctr[lat_nbr-1L]=lat_ntf[lat_nbr]; } /* !cap */ /* Gaussian grid centerpoints are defined by solutions to Legendre polynomials */ if(lat_typ == nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); } /* !Gaussian */ for(idx=0L;idx<lon_nbr;idx++){ lon_bnd[2*idx]=lon_ntf[idx]; lon_bnd[2*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(int bnd_idx=0L;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ } /* endif dbg */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Fuzzy test of latitude weight normalization 20180903 Tolerance threshold of eps_rlt_max=1.0e-14 is too strict for Gaussian grids somewhere lat_nbr >~ 150 20180904 Tolerance threshold of eps_rlt_max=1.0e-12 allows Gaussian grids like ECMWF O1280 Newton-Raphson method of interface determination may need improvement to fix that Tolerance threshold of 1.0e-14 works for all relevant E3SM Uniform and Cap grids */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(grd_typ != nco_grd_2D_unk && 1.0-lat_wgt_ttl/lat_wgt_ttl_xpc > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ /* 20180831 Code above assumes grids run S->N User can request N->S grids with --rgr lat_drc=n2s If so, flip grid before unrolling into output arrays */ if(!flg_s2n){ double *lat_ctr_tmp=NULL_CEWI; /* [dgr] Temporary Latitude centers of rectangular grid */ double *lat_wgt_tmp=NULL; /* [dgr] Temporary Latitude weights of rectangular grid */ double *lat_ntf_tmp=NULL; /* [dgr] Temporary Latitude interfaces of rectangular grid */ lat_ctr_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf_tmp=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); long tmp_idx; /* [idx] Temporary index for swapping values */ for(idx=0L;idx<lat_nbr;idx++){ lat_ctr_tmp[idx]=lat_ctr[idx]; lat_wgt_tmp[idx]=lat_wgt[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ tmp_idx=lat_nbr-idx-1L; lat_ctr[idx]=lat_ctr_tmp[tmp_idx]; lat_wgt[idx]=lat_wgt_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ lat_ntf_tmp[idx]=lat_ntf[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ tmp_idx=lat_nbr+1L-idx-1L; /* NB: Subtle index difference */ lat_ntf[idx]=lat_ntf_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(lat_ctr_tmp) lat_ctr_tmp=(double *)nco_free(lat_ctr_tmp); if(lat_ntf_tmp) lat_ntf_tmp=(double *)nco_free(lat_ntf_tmp); if(lat_wgt_tmp) lat_wgt_tmp=(double *)nco_free(lat_wgt_tmp); } /* !flg_s2n */ assert(grd_crn_nbr == 4); for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; lon_crn[idx+3L]=lon_ntf[lon_idx]; } /* !lon_idx */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; lat_crn[idx+1L]=lat_ntf[lat_idx]; lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; } /* !lat_idx */ /* Stuff rectangular arrays into unrolled arrays */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ if(flg_grd_crv){ /* Impose curvilinearity by adding lon_crv offset to each row relative to previous row, and lat_crv offset to each column relative to previous column */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]+=lon_idx*lat_crv; grd_ctr_lon[idx]+=lat_idx*lon_crv; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; if(crn_idx == 0L || crn_idx == 1L){ grd_crn_lat[idx2]+=lat_idx*lat_crv; /* LL, LR */ grd_crn_lon[idx2]+=lat_idx*lon_crv; /* LL, LR */ }else if(crn_idx == 2L || crn_idx == 3L){ grd_crn_lat[idx2]+=(lat_idx+1L)*lat_crv; /* UL, UR */ grd_crn_lon[idx2]+=(lat_idx+1L)*lon_crv; /* UL, UR */ } /* !crn */ } /* !crn */ } /* !lon */ } /* !lat */ } /* !flg_grd_crv */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_vec) (void)fprintf(stderr,"%s: DEBUG %s reports nco_ccw_chk() tried to change idx = %lu from CW to CCW\n",nco_prg_nm_get(),fnc_nm,idx); } /* !idx */ } /* !flg_s2n */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0L; idx_crn_lr=grd_crn_nbr*idx_dbg+1L; idx_crn_ur=grd_crn_nbr*idx_dbg+2L; idx_crn_ul=grd_crn_nbr*idx_dbg+3L; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,grd_ctr_lat[idx_dbg],grd_ctr_lon[idx_dbg],grd_crn_lat[idx_crn_ll],grd_crn_lon[idx_crn_ll],grd_crn_lat[idx_crn_lr],grd_crn_lon[idx_crn_lr],grd_crn_lat[idx_crn_ur],grd_crn_lon[idx_crn_ur],grd_crn_lat[idx_crn_ul],grd_crn_lon[idx_crn_ul]); } /* !dbg */ if(flg_grd_crv){ /* Area of arbitrary curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else{ /* Area of rectangular spherical zones from elementary calculus results 20150906: Half-angle formulae for better conditioning improve area normalization for 801x1600 by 2.0e-15 area[lat_idx*lon_nbr+lon_idx]=dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*2.0*(sin(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(0.5*dgr2rdn*lat_bnd[2*lat_idx])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx])); Gain not worth the extra complexity */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) /* fabs() ensures positive area in n2s grids */ area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); } /* !flg_grd_2D */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; } /* !flg_grd_2D */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); assert(area_ttl <= 4.0*M_PI); } /* endif dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,grd_area_nm,(nc_type)crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); /* Define global and "units" attributes */ char *att_val; rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,grd_area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,grd_area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,grd_area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_skl; if(fl_out){ /* Write skeleton data file on requested grid Skeleton file can then be populated with data for testing */ char *area_nm; char *bnd_nm; // char *bnd_tm_nm; char *col_nm_out; char *lat_nm_out; /* [sng] Name of output dimension for latitude */ char *lat_wgt_nm; char *lon_nm_out; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm; /* [sng] Name of longitude boundary variable */ // int area_id; /* [id] Variable ID for area */ int dmn_id_bnd; /* [id] Dimension ID */ //int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_col; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int lat_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lon_id; /* [id] Variable ID for longitude */ /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=(char *)strdup("lat"); if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=(char *)strdup("lon"); if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=(char *)strdup("ncol"); /* Name output dimensions */ area_nm=rgr->area_nm; bnd_nm=rgr->bnd_nm; //bnd_tm_nm=rgr->bnd_tm_nm; lat_bnd_nm=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm=rgr->lon_bnd_nm; /* Use names discovered by fuzzing */ if(flg_grd_1D){ bnd_nm=rgr->vrt_nm; lat_bnd_nm=rgr->lat_vrt_nm; lon_bnd_nm=rgr->lon_vrt_nm; } /* !flg_grd_1D */ if(flg_grd_2D){ bnd_nm=rgr->bnd_nm; lat_bnd_nm=rgr->lat_bnd_nm; lon_bnd_nm=rgr->lon_bnd_nm; } /* !flg_grd_2D */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ if(flg_grd_crv){ rcd=nco_def_dim(out_id,bnd_nm,grd_crn_nbr,&dmn_id_bnd); }else{ rcd=nco_def_dim(out_id,bnd_nm,bnd_nbr,&dmn_id_bnd); } /* !flg_grd_crv */ if(flg_grd_1D){ rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col); } /* !flg_grd_1D */ if(flg_grd_2D){ rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat); rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon); } /* !flg_grd_2D */ /* Define new coordinates and variables in regridded file */ if(flg_grd_1D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_col,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; dmn_ids[2]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); }else if(flg_grd_2D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lon,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lon; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lat_wgt_nm,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_2D */ /* Define attributes */ rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm,"long_name",att_val); if(flg_grd_2D) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm,"long_name",att_val); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=dmn_srt[1]=0L;dmn_srt[2]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); }else if(flg_grd_2D){ dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lon_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt,dmn_cnt,lat_wgt,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_2D */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); } /* !fl_out */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_sin) lat_sin=(double *)nco_free(lat_sin); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); if(wgt_Gss) wgt_Gss=(double *)nco_free(wgt_Gss); return rcd; } /* !nco_grd_mk() */ int /* O [enm] Return code */ nco_grd_nfr /* [fnc] Infer SCRIP-format grid file from input data file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information and guesswork to create SCRIP-format grid file from input data file Test curvilinear grids: ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.hole.nc ~/foo.nc */ const char fnc_nm[]="nco_grd_nfr()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_grd_max=4; /* [nbr] Maximum rank of grid variables (msk_[src/dst] could be rank 4) */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const nc_type crd_typ=NC_DOUBLE; char *area_nm_in=NULL; char *fl_in; char *fl_out; char *fl_out_tmp=NULL_CEWI; char *fl_pth_lcl=NULL; char *msk_nm_in=NULL; char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ /* SCRIP-format grid names are non-negotiable and thus fixed not dynamic */ char area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; char unt_sng[]="units"; /* netCDF-standard units attribute name */ double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double area_ttl=0.0; /* [frc] Exact sum of area */ double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ // double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double mss_val_area_dbl; double mss_val_ctr_dbl; double mss_val_msk_dbl; int *msk=NULL; /* [flg] Mask of grid */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_idx; /* [idx] Dimension index */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id=NC_MIN_INT; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int lat_rnk; /* [nbr] Rank of latitude coordinate */ int lon_rnk; /* [nbr] Rank of longitude coordinate */ int lat_ctr_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_ctr_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int lat_bnd_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_bnd_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int msk_id=NC_MIN_INT; /* [id] Mask variable ID */ int msk_rnk_nbr; /* [id] Mask rank */ int mss_val_int_out=NC_MIN_INT; /* [nbr] Value that can be non-erroneously pointed to */ int val_two=2; /* [nbr] Value that can be non-erroneously pointed to */ int val_zero=0; /* [nbr] Value that can be non-erroneously pointed to */ int var_id; /* [id] Current variable ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_idx; long bnd_nbr=NC_MIN_INT; /* [nbr] Number of bounds in gridcell */ long col_idx; long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long dmn_sz; /* [nbr] Size of current dimension */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr=int_CEWI; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long idx_crn; long idx_ctr; long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; nco_bool FL_RTR_RMT_LCN; nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_1D_psd_rct_bnd=False; /* [flg] Unstructured input grid with pseudo-rectangular bounds */ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* [enm] Latitude grid-direction is South-to-North */ nco_bool flg_wrt_crn=True; nco_bool flg_crn_grd_lat_lon=False; /* [flg] Curvilinear corner array ordered non-canonically as grd_nbr,lat_nbr,lon_nbr */ nco_bool use_mss_val_area=False; nco_bool has_mss_val_area=False; nco_bool has_mss_val_bnd=False; nco_bool has_mss_val_ctr=False; nco_bool has_mss_val_msk=False; nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ nco_grd_xtn_enm nco_grd_xtn=nco_grd_xtn_nil; /* [enm] Grid-extent enum */ nc_type msk_typ; ptr_unn msk_unn; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ /* Algorithm: Read grid information from input data file (aka *_in) Close input file Once grid dimensions known, allocate output grid arrays (aka *_out) Open output file (aka grid-file) Use guesswork and standard algorithms to fill-in output arrays */ /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_in); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); char *bnd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as bounds */ char *col_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as column */ char *lat_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as latitude */ char *lon_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as longitude */ char *lat_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as latitude */ char *lon_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm=NULL_CEWI; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm=NULL_CEWI; /* [sng] Name of longitude boundary variable */ int dmn_id_bnd=NC_MIN_INT; /* [id] Dimension ID for spatial bounds */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID for unstructured grids */ int dmn_id_lat=NC_MIN_INT; /* [id] Dimension ID for latitude */ int dmn_id_lon=NC_MIN_INT; /* [id] Dimension ID for longitude */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=4LFTX_221_SPDY_S113 --rgr grid=~/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north") || !strcasecmp(cf->unt_sng[0],"degree_north") || !strcasecmp(cf->unt_sng[0],"degree_N") || !strcasecmp(cf->unt_sng[0],"degrees_N") || !strcasecmp(cf->unt_sng[0],"degreeN") || !strcasecmp(cf->unt_sng[0],"degreesN")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east") || !strcasecmp(cf->unt_sng[0],"degree_east") || !strcasecmp(cf->unt_sng[0],"degree_E") || !strcasecmp(cf->unt_sng[0],"degrees_E") || !strcasecmp(cf->unt_sng[0],"degreeE") || !strcasecmp(cf->unt_sng[0],"degreesE")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north") || !strcasecmp(cf->unt_sng[1],"degree_north") || !strcasecmp(cf->unt_sng[1],"degree_N") || !strcasecmp(cf->unt_sng[1],"degrees_N") || !strcasecmp(cf->unt_sng[1],"degreeN") || !strcasecmp(cf->unt_sng[1],"degreesN")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east") || !strcasecmp(cf->unt_sng[1],"degree_east") || !strcasecmp(cf->unt_sng[1],"degree_E") || !strcasecmp(cf->unt_sng[1],"degrees_E") || !strcasecmp(cf->unt_sng[1],"degreeE") || !strcasecmp(cf->unt_sng[1],"degreesE")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ //lat_nm_in=strdup(cf->dmn_nm[0]); //lon_nm_in=strdup(cf->dmn_nm[1]); lat_nm_in=strdup(cf->crd_nm[idx_lat]); lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map- not data-file) */ lat_ctr_id=cf->crd_id[idx_lat]; lon_ctr_id=cf->crd_id[idx_lon]; lat_dmn_nm=strdup(cf->dmn_nm[0]); lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has LRV (along-track) and MRV (across-track) dimensions \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[0],cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); // if(foo) foo=(char *)nco_free(foo); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ /* Locate fields that must be present in input file Required variables are usually latitude and longitude Currently these variables must be in root group This fails for, e.g., OMI L2 which has coordinates /GEOLOCATION_DATA/[Latitude,Longitude] fxm: Generalize with traversal table so usual suspect coordinates may be in any group */ if(lat_ctr_id == NC_MIN_INT){ if(rgr->lat_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lat_nm_in,&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup(rgr->lat_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_varid_flg(in_id,"Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Latitude"); /* AMSR, HIRDLS, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"Lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_varid_flg(in_id,"XLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLAT_M",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LAT"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"TLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("TLAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("ULAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"latCell",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("nav_lat"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("global_latitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("S1_Latitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"yc",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("yc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"gridlat_0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("gridlat_0"); /* NWS HRRR */ } /* !lat_ctr_id */ if(lon_ctr_id == NC_MIN_INT){ if(rgr->lon_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lon_nm_in,&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup(rgr->lon_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_varid_flg(in_id,"Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Longitude"); /* AMSR, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"Lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_varid_flg(in_id,"XLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLONG_M",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LON"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"TLON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"TLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"ULONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"lonCell",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lonCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("nav_lon"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("global_longitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("CO_Longitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("S1_Longitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"xc",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("xc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"gridlon_0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("gridlon_0"); /* NWS HRRR */ } /* !lon_ctr_id */ if(!lat_nm_in || !lon_nm_in){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude variable.\nHINT: Potential causes and workarounds for this include: 1. Coordinate variables must be in the root directory (not in a group). If this might be the problem, try to \"flatten\" the input file before regridding it (see http://nco.sf.net/nco.html#flatten). 2. Horizontal dimensions with \"unusual\" names are hard to identify unless the user designates them somehow. ncremap will search for horizontal dimensions named in the \"coordinates\" attribute in a template variable specified with the \"-V rgr_var\" option. 3. NCO will also search its own internal database for likely names of horizontal coordinate variables (lat, latitude, LAT, XLAT, etc.). Contact the NCO project to have your idiosyncratic coordinate names added to the internal database.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat_nm_in */ /* Rank of coordinates determines whether grid is curvilinear */ rcd+=nco_inq_varndims(in_id,lat_ctr_id,&lat_rnk); rcd+=nco_inq_varndims(in_id,lon_ctr_id,&lon_rnk); /* If lat_ctr and lon_ctr share same and only dimension then grid is unstructured */ if(lat_rnk*lon_rnk == 1){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,&dmn_id_lat); rcd+=nco_inq_vardimid(in_id,lon_ctr_id,&dmn_id_lon); if(dmn_id_lat == dmn_id_lon){ dmn_id_col=dmn_id_lat; dmn_id_lat=NC_MIN_INT; dmn_id_lon=NC_MIN_INT; rcd+=nco_inq_dimname(in_id,dmn_id_col,dmn_nm); col_dmn_nm=(char *)strdup(dmn_nm); flg_grd_1D=True; } /* !unstructured */ } /* lat_rnk == lon_rnk == 1 */ if(lat_rnk*lon_rnk == 1 && dmn_id_lat != NC_MIN_INT && dmn_id_lon != NC_MIN_INT){ flg_grd_crv=False; flg_grd_2D=True; } /* !lat_rnk */ if(lat_rnk == dmn_nbr_2D || lon_rnk == dmn_nbr_2D){ flg_grd_crv=True; flg_grd_2D=False; } /* !lat_rnk */ if(lat_rnk > dmn_nbr_2D || lon_rnk > dmn_nbr_2D){ (void)fprintf(stdout,"%s: ERROR %s reports an identified grid variable (%s with rank %d and/or %s with rank %d) has rank greater than two---grid variables currently must have rank 1 or 2.\nHINT: If grid variables do not vary in time, then temporally average them (with, e.g., ncwa -a time in.nc out.nc) prior to inferring grid\n",nco_prg_nm_get(),fnc_nm,lat_nm_in,lat_rnk,lon_nm_in,lon_rnk); nco_exit(EXIT_FAILURE); } /* !3D */ if(lat_rnk*lon_rnk != 1 && lat_rnk*lon_rnk != 4) assert(False); /* Scrutinize coordinates for their dimensions NB: Unstructure already known */ if(flg_grd_2D){ rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_2D */ if(flg_grd_crv){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,dmn_ids); /* fxm: use cf struct and match with units name, if any? normally curvilinear grid dimensions are just pixel dimensions that are not aligned north-south or east-west */ dmn_id_lat=dmn_ids[0]; dmn_id_lon=dmn_ids[1]; rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_crv */ if(!(lat_dmn_nm && lon_dmn_nm) && !col_dmn_nm){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude dimension and/or column dimension.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !col_dmn_nm !lat_dmn_nm !lon_dmn_nm */ /* Locate spatial dimensions that may be present NB: bounds dimensions may present a special problem CAM-FV and CAM-SE use nbnd for temporal bounds and have no spatial bounds dimension CAM3 uses tbnd for temporal bounds and has no spatial bounds dimension CICE and POP use d2 for temporal bounds, and CICE uses nvertices for spatial bounds while POP uses nothing Hence search for nvertices before nbnd to ensure spatial bound is found first */ if((rcd=nco_inq_dimid_flg(in_id,"nv",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nv"); /* fxm */ else if((rcd=nco_inq_dimid_flg(in_id,"nvertices",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nvertices"); /* CICE */ /* Use dimension IDs to get dimension sizes and grid size */ if(flg_grd_1D){ rcd+=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr); lat_nbr=lon_nbr=col_nbr; }else{ rcd+=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr); rcd+=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr); col_nbr=NC_MIN_INT; } /* !flg_grd_1D */ if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&grd_crn_nbr); if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&bnd_nbr); if(flg_grd_1D){ /* Unstructured grid (e.g., CAM-SE) */ grd_rnk_nbr=dmn_nbr_1D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* 1D grids without their own boundaries are at the mercy of the weight generator */ if(dmn_id_bnd == NC_MIN_INT){ (void)fprintf(stdout,"%s: WARNING %s reports an unstructured grid without spatial boundary information. NCO can copy but not infer spatial boundaries from unstructured grids. Thus NCO will not write spatial bounds to the gridfile inferred from this input file. Instead, the weight generator that ingests this gridfile must generate weights for gridcells with unknown spatial extent. This is feasible for grids and mappings where weights masquerade as areas and are determined by underlying grid and interpolation type (e.g., bilinear remapping of spectral element grid). Unfortunately, the ESMF_RegridWeightGen (ERWG) program requires cell interfaces in both grid files, so ERWG will break on this gridfile. Other weight generators such as TempestRemap may be more successful with this SCRIP file.\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT Re-run the regridder, this time adding the \"-s src_grd\" option to specify the source grid file in SCRIP format. That SCRIP file will have the spatial bounds information required by the ESMF_RegridWeightGen (ERWG) program, so that the regridder will circumvent inferring the underlying grid through its black but fragile magic.\n",nco_prg_nm_get()); flg_wrt_crn=False; /* Input could actually be from grid with no polygonal definition, e.g., CAM-SE Corner number is non-deterministic since, e.g., CAM-SE dual grid can be fit to quadrilaterals, pentagons, chevrons, etc. Bounds will not be diagnosed so safe to set grd_crn_nbr to harmless (though weird) value like 4 However, ERWG requires presence of valid corner dimension "grid_corners" and arrays in input SCRIP file So ERWG will break when reading this SCRIP file regardless of whether it contains arrays (with bogus values) By default do not write grid corner values */ grd_crn_nbr=4; } /* !dmn_id_bnd */ if(bnd_nbr == 2){ /* Unstructured grids with bounds information (e.g., OCO2) may use a pseudo-rectangular convention of archiving latitude and longitude bounds as 2xN (rather than 4XN) arrays even though cell have four corners. "convention" is that two latitudes and two longitudes can specify rectangular boundary cell In this case, bnd_nbr=grd_crn_nbr=2=sizeof(nv)=sizeof(nvertices) currently Set number of corners to rectangular and leave bnd_nbr as is */ grd_crn_nbr=4; flg_1D_psd_rct_bnd=True; } /* !bnd_nbr */ }else if(flg_grd_2D){ /* !flg_grd_1D */ /* Assume 2D grid of uninitialized type */ grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_nil; lat_typ=nco_grd_lat_nil; lon_typ=nco_grd_lon_nil; /* Assume rectangular grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Sometimes we infer from a 2D grid, like those produced by nco_grd_mk(), that has bounds with nv=2 This signals rectangular gridcell bounds are interfaces not vertices (to save half the space) These rectangles really have four corners so we change grd_crn_nbr (not bnd_nbr) accordingly */ if(grd_crn_nbr == 2) grd_crn_nbr=4; /* Convention is to archive only two bounds for rectangular grids (since sides are identical) Non-quadrilateral rectangular grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=2; }else if(flg_grd_crv){ /* !flg_grd_2D */ /* Assume curvilinear grid (e.g., WRF) */ flg_grd_2D=False; grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* Assume curvilinear grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Assume quadrilaterals are, well, quadrilaterals (e.g., rhomboids) not necessarily rectangles Non-quadrilateral curvilinear grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=4; else assert(False); } /* !flg_grd_crv */ /* Allocate space for output data */ if(flg_grd_1D) grd_sz_nbr=col_nbr; else grd_sz_nbr=lat_nbr*lon_nbr; dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); if(flg_grd_1D){ if(bnd_nbr != NC_MIN_INT) lat_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); if(bnd_nbr != NC_MIN_INT) lon_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_2D){ /* !flg_grd_1D */ lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_crv){ /* !flg_grd_2D */ lat_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); } /* !flg_grd_crv */ grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Locate fields that may be present in input file */ if((rcd=nco_inq_varid_flg(in_id,"lat_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"latt_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latt_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"latu_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lat_ntf",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lat_vertices",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"latitude_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LatitudeCornerpoints",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("LatitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"lon_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"lont_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lont_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lonu_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lonu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lon_ntf",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lon_vertices",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"longitude_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("longitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LongitudeCornerpoints",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("LongitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"area",&area_id)) == NC_NOERR) area_nm_in=strdup("area"); else if((rcd=nco_inq_varid_flg(in_id,"Area",&area_id)) == NC_NOERR) area_nm_in=strdup("Area"); else if((rcd=nco_inq_varid_flg(in_id,"areaCell",&area_id)) == NC_NOERR) area_nm_in=strdup("areaCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"grid_area",&area_id)) == NC_NOERR) area_nm_in=strdup("grid_area"); // else if((rcd=nco_inq_varid_flg(in_id,"aice",&area_id)) == NC_NOERR) area_nm_in=strdup("aice"); /* CICE time-dependent ice area (3D), not total gridcell area */ else if((rcd=nco_inq_varid_flg(in_id,"tarea",&area_id)) == NC_NOERR) area_nm_in=strdup("tarea"); /* CICE time-invariant state-variable gridcell area (2D) */ else if((rcd=nco_inq_varid_flg(in_id,"uarea",&area_id)) == NC_NOERR) area_nm_in=strdup("uarea"); /* CICE time-invariant dynamics variables (2D) */ msk_nm_in=rgr->msk_var; if(msk_nm_in){ if(!strcasecmp(msk_nm_in,"none")){ /* 20170814: Some variables named "*mask*" are, e.g., quality control masks not regridding masks per se */ msk_nm_in=(char *)nco_free(msk_nm_in); }else{ /* User-supplied name overrides database */ rcd=nco_inq_varid(in_id,msk_nm_in,&msk_id); } /* !msk_nm_in */ }else{ /* Otherwise search database */ if((rcd=nco_inq_varid_flg(in_id,"mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask"); else if((rcd=nco_inq_varid_flg(in_id,"Mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("Mask"); else if((rcd=nco_inq_varid_flg(in_id,"grid_imask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("grid_imask"); else if((rcd=nco_inq_varid_flg(in_id,"landmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("landmask"); /* ALM/CLM */ else if((rcd=nco_inq_varid_flg(in_id,"tmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("tmask"); /* CICE */ } /* !msk_nm_in */ /* Mask field requires special handling for non-conformant models */ if(msk_id != NC_MIN_INT){ /* 20151201: All models tested define mask as NC_INT except CICE which uses NC_FLOAT 20160111: Few observations tested define mask. Exceptions include AMSR and GHRSST. AMSR uses NC_SHORT to store bitmasks. Bitmask is 1 for missing data, and up to 128 for various quality levels of valid data. Hence, almost better to ignore AMSR mask variable. GHRSST uses NC_BYTE for its 3D "mask" bit-mask of surface-type values 1,2,4,8,16. */ rcd=nco_inq_varndims(in_id,msk_id,&msk_rnk_nbr); if(msk_rnk_nbr != grd_rnk_nbr && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports input mask variable \"%s\" is rank %d while grid is rank %ld so will use first timestep/layer to determine output mask\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,msk_rnk_nbr,grd_rnk_nbr); rcd=nco_inq_vartype(in_id,msk_id,&msk_typ); msk_unn.vp=(void *)nco_malloc(grd_sz_nbr*nco_typ_lng(msk_typ)); } /* !msk */ /* All grids: Some real-world datasets violate convention that coordinates ought never have missing values CICE lists missing value for lat/lon_ctr arrays (TLAT, TLONG) and re-uses that for bounds arrays (latt_bounds, lont_bounds) that do not bother to have their own missing value attributes Without counter-example, assume has_mss_val_bnd=has_mss_val_ctr and mss_val_bnd_dbl=mss_val_ctr_dbl */ has_mss_val_bnd=has_mss_val_ctr=nco_mss_val_get_dbl(in_id,lat_ctr_id,&mss_val_ctr_dbl); if(flg_grd_1D){ /* Obtain fields that must be present in unstructured input file */ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* Obtain fields that may be present in unstructured input file */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=col_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; if(flg_1D_psd_rct_bnd){ dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); }else{ dmn_cnt[1]=grd_crn_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); } /* !flg_1D_psd_rct_bnd */ } /* !flg_grd_1D */ if(flg_grd_crv){ /* Obtain fields that must be present in curvilinear input file */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* 20150923: Also input, if present in curvilinear file, corners, area, and mask area and mask are same size as lat and lon */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx]=lat_nbr; dmn_cnt[dmn_idx+1]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ /* Corners are on curvilinear corner grid Rectangular boundaries (i.e., lat_bnd=[lat_nbr,2]) DNE for curvilinear grids Read-in *_crn arrays in curvilinear grids, and *_bnd arrays for rectilinear grids Rank-ordering of corner arrays is usually lat_nbr,lon_nbr,grd_crn_nbr as produced/expected by SCRIP However some datasets, e.g., OMI DOMINO use grd_crn_nbr,lat_nbr,lon_nbr Sigh... */ dmn_srt[0]=dmn_srt[1]=dmn_srt[2]=0L; if(lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ rcd=nco_inq_vardimid(in_id,lat_bnd_id,dmn_ids); if((dmn_ids[0] == dmn_id_lat && dmn_ids[1] == dmn_id_lon) || (dmn_ids[0] == dmn_id_lon && dmn_ids[1] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[2]; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; }else if((dmn_ids[1] == dmn_id_lat && dmn_ids[2] == dmn_id_lon) || (dmn_ids[1] == dmn_id_lon && dmn_ids[2] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[0]; dmn_cnt[0]=grd_crn_nbr; dmn_cnt[1]=lat_nbr; dmn_cnt[2]=lon_nbr; flg_crn_grd_lat_lon=True; }else{ (void)fprintf(stdout,"%s: WARNING %s confused by dimension-ordering of latitude bounds variable \"%s. Will ignore this bounds variable and attempt to extrapolate vertices from centers internally...\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); lat_bnd_id=NC_MIN_INT; lon_bnd_id=NC_MIN_INT; } /* !dmn_ids */ rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); if(flg_crn_grd_lat_lon){ /* Permute corner arrays from non-canonical (grd_nbr,lat_nbr,lon_nbr) to canonical (lat_nbr,lon_nbr,grd_nbr) order */ double *lat_crn_tmp=NULL; double *lon_crn_tmp=NULL; lat_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); memcpy(lat_crn_tmp,lat_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); memcpy(lon_crn_tmp,lon_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; /* NB: Variables differ (lat vs. lon) but indexes are identical in next two lines */ lat_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lat_crn_tmp[crn_idx*grd_sz_nbr+idx]; lon_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lon_crn_tmp[crn_idx*grd_sz_nbr+idx]; } /* !idx */ } /* !crn_idx */ if(lat_crn_tmp) lat_crn_tmp=(double *)nco_free(lat_crn_tmp); if(lon_crn_tmp) lon_crn_tmp=(double *)nco_free(lon_crn_tmp); /* In this code branch, thought to be executed only for OMI DOMINO grids, re-compute grid center arrays (known to contain missing values) as centroids of supplied grid corners */ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; lat_ctr[idx]=0.25*(lat_crn[idx*grd_crn_nbr+0L]+lat_crn[idx*grd_crn_nbr+1L]+lat_crn[idx*grd_crn_nbr+2L]+lat_crn[idx*grd_crn_nbr+3L]); lon_ctr[idx]=nco_lon_crn_avg_brnch(lon_crn[idx*grd_crn_nbr+0L],lon_crn[idx*grd_crn_nbr+1L],lon_crn[idx*grd_crn_nbr+2L],lon_crn[idx*grd_crn_nbr+3L]); } /* !idx */ } /* !flg_crd_grd_lat_lon */ } /* !lat_bnd_id */ } /* !flg_grd_crv */ if(flg_grd_2D){ int lon_psn_in=1L; /* [idx] Ordinal position of longitude dimension in rectangular grid variables like area */ int lat_psn_in=0L; /* [idx] Ordinal position of latitude dimension in rectangular grid variables like area */ int tpl_id=NC_MIN_INT; /* [id] ID of template field */ /* Obtain fields that must be present in input file */ dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr; rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); if(lat_ctr[1L] < lat_ctr[0L]) flg_s2n=False; /* Use fields that may be present in input file to override, if necessary, default lon/lat order area and mask are both suitable templates for determining input lat/lon ordering NB: Algorithm assumes area is same rank as grid, and falls-back to mask if that has same rank as grid */ if(area_id != NC_MIN_INT) tpl_id=area_id; else if(msk_id != NC_MIN_INT && msk_rnk_nbr == grd_rnk_nbr) tpl_id=msk_id; if(tpl_id != NC_MIN_INT){ int tpl_rnk_nbr; var_id=tpl_id; /* NB: Template variable rank may exceed two with --msk_[src/dst] (e.g., SST(time,lat,lon)) */ rcd=nco_inq_varndims(in_id,var_id,&tpl_rnk_nbr); rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); /* fxm: Optimize discovery of lat/lon ordering */ for(dmn_idx=0;dmn_idx<grd_rnk_nbr;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids[dmn_idx],dmn_nm); rcd+=nco_inq_dimlen(in_id,dmn_ids[dmn_idx],&dmn_sz); if(!strcmp(dmn_nm,lat_dmn_nm)){ assert(dmn_sz == lat_nbr); assert(dmn_idx == 0); lat_psn_in=dmn_idx; } /* !lat */ if(!strcmp(dmn_nm,lon_dmn_nm)){ assert(dmn_sz == lon_nbr); assert(dmn_idx == 1); lon_psn_in=dmn_idx; } /* !lon */ } /* !dmn_idx */ } /* !tpl */ /* Obtain fields that may be present in input file */ if(area_id != NC_MIN_INT){ var_id=area_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !area */ if(msk_id != NC_MIN_INT){ var_id=msk_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; if(msk_rnk_nbr != grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx+lat_psn_in]=lat_nbr; dmn_cnt[dmn_idx+lon_psn_in]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk */ /* Rectangular boundaries are often on "abbreviated" bounds grid (two bounds per center) Read-in *_crn arrays for 1D and curvilinear grids, and *_bnd arrays for rectilinear grids */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); } /* !flg_grd_2D */ /* Additional information that may be required for any input grid */ if(area_id != NC_MIN_INT) has_mss_val_area=nco_mss_val_get_dbl(in_id,area_id,&mss_val_area_dbl); if(msk_id != NC_MIN_INT) has_mss_val_msk=nco_mss_val_get_dbl(in_id,msk_id,&mss_val_msk_dbl); /* 20160115: AMSR coordinates are packed as NC_SHORT with scale_value=0.01f. What to do? Is it worth unpacking everything? */ int flg_pck; /* [flg] Variable is packed on disk */ rcd=nco_inq_var_packing(in_id,lat_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lat_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); rcd=nco_inq_var_packing(in_id,lon_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lon_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lon_nm_in); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to input file to be regridded Below this line, fl_out and out_id refer to grid-file to be output */ dfl_lvl=rgr->dfl_lvl; fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; if(!fl_out){ (void)fprintf(stdout,"%s: ERROR %s filename for inferred SCRIP grid-file is uninitialized, supply it with \"ncks --rgr grid=filename.nc\" or \"ncremap -R '--rgr grid=filename.nc'\"\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ncremap supplies an automatically generated default name for any output SCRIP grid-file. Users of the standalone regridder (ncks) must explicitly specify a name for the inferred SCRIP grid-file.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !fl_out */ /* Define output variable values */ int lon_psn; /* [idx] Ordinal position of longitude dimension in rectangular grid dimension-size array */ int lat_psn; /* [idx] Ordinal position of latitude dimension in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_1D){ dmn_sz_int[0]=col_nbr; }else if(grd_rnk_nbr == dmn_nbr_2D){ /* !dmn_nbr_1D */ /* SCRIP introduced [lon,lat] convention because more natural for Fortran NB: This [lon,lat] convention applies ONLY to grid_dims variable Write all other SCRIP variables as [lat,lon] Nonsensical? Yes, but backwards compatibility is priceless */ lon_psn=0; lat_psn=1; dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; } /* !dmn_nbr_2D */ if(flg_grd_crv){ /* For curvilinear grids first, if necessary, infer corner boundaries Then perform sanity check using same code on inferred and copied grids */ if(False && has_mss_val_bnd && grd_crn_nbr == 4 && !strcmp(lat_bnd_nm,"latt_bounds") && !strcmp(lon_bnd_nm,"lont_bounds") && lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ /* Only CESM CICE is known to fit these constraints Cell center locations are (misleadingly) reported in a regular, rectangular, regional grid Cell corners/boundaries are regular only in SH, curvilinear in NH, i.e., displaced or tripole grid Grid is from southernmost Antarctic Ocean latitude and longitude near 79S,320E to North Pole Nominal centers do not agree with true centers computed from corners CICE may run in decomposed/unstructured mode, each column writes separately to output buffer? This could explain missing coordinates in non-ocean gridcells However, land points are completely masked (grid centers and corners are missing) Oversight? Why not write coordinates for land-masked cells? Regridder needs corners so we fill-in missing boundaries with derived grid Gave up on inferring 20170521 once tri-pole grid complexity became apparent */ const long idx_dbg=rgr->idx_dbg; double lat_ctr_drv; /* [dgr] Latitude center, derived */ double lon_ctr_drv; /* [dgr] Longitude center, derived */ double lat_crn_drv; /* [dgr] Latitude corner, derived */ double lon_crn_drv; /* [dgr] Longitude corner, derived */ long idx_ctr_sth; /* [idx] Index of southern neighbor */ long idx_ctr_nrt; /* [idx] Index of northern neighbor */ long idx_crn_sth; /* [idx] Index of southern neighbor */ long idx_crn_nrt; /* [idx] Index of northern neighbor */ long lon_idx_crr; /* [idx] Current longitude index */ long lon_vld_frs; /* [idx] First valid longitude in latitude row */ long *lon_vld_prv=NULL; /* [idx] Previous valid longitude in latitude row */ long *lon_vld_nxt=NULL; /* [idx] Next valid longitude in latitude row */ lon_vld_prv=(long *)nco_malloc(lon_nbr*sizeof(long)); lon_vld_nxt=(long *)nco_malloc(lon_nbr*sizeof(long)); /* First valid gridcell sets west and south bounds of entire grid */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(lat_ctr[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); idx_crn=idx_ctr*grd_crn_nbr; lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s will assume grid is regional CICE in curvilinear format with masked land. Will diagnose missing cell boundaries and centers from present boundaries and centers in grid of size lat_nbr=%ld, lon_nbr=%ld.\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx_ctr=lat_idx*lon_nbr; /* Find first valid longitude at this latitude */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; lon_vld_frs=lon_idx; /* 20170519: Verified all tri-pole grid latitudes have at least one valid point */ if(lon_vld_frs == -1L) abort(); for(lon_idx_crr=0;lon_idx_crr<lon_nbr;lon_idx++){ /* Find previous and next valid longitude for all longitudes at this latitude Cells can be their own previous/next valid longitude */ lon_vld_prv[lon_idx_crr]=-1L; lon_vld_nxt[lon_idx_crr]=-1L; /* Start from current longitude and move left (west)... */ for(lon_idx=lon_idx_crr;lon_idx>=0;lon_idx--) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx >= 0) lon_vld_prv[lon_idx_crr]=lon_idx; /* Start from current longitude and move right (east)... */ for(lon_idx=lon_idx_crr;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx < lon_nbr) lon_vld_nxt[lon_idx_crr]=lon_idx; /* Wrap west if previous valid cell not found */ lon_vld_prv[lon_idx_crr]=lon_vld_prv[lon_nbr-1L]; /* Wrap east if next valid cell not found */ lon_vld_nxt[lon_idx_crr]=lon_vld_nxt[0]; } /* !lon_idx_crr */ /* Derive centers and corners for each missing point */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx_ctr=lat_idx*lon_nbr+lon_idx; idx_crn=idx_ctr*grd_crn_nbr; if(lat_ctr[idx_ctr] != mss_val_ctr_dbl){ lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lat_ctr_drv=lat_sth+0.5*lat_ncr; lat_crn_drv=lat_sth; lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); if(nco_dbg_lvl_get() >= nco_dbg_std && idx_ctr == idx_dbg) (void)fprintf(stdout,"%s: DEBUG %s idx=%ld lat_idx=%ld, lon_idx=%ld, lat_sth=%g, lat_ncr=%g, lon_wst=%g, lon_ncr=%g\n",nco_prg_nm_get(),fnc_nm,idx_ctr,lat_idx,lon_idx,lat_sth,lat_ncr,lon_wst,lon_ncr); } /* !idx_ctr */ if(lat_ctr[idx_ctr] == mss_val_ctr_dbl){ if(lat_idx != 0L){ /* Not bottom row */ idx_ctr_sth=idx_ctr-lon_nbr; if(lat_ctr[idx_ctr_sth] != mss_val_ctr_dbl){ /* Copy southern corners from northern corners of southern neighbor */ idx_crn_sth=idx_ctr_sth*grd_crn_nbr; lat_crn[idx_crn+0L]=lat_crn[idx_crn_sth+3L]; lat_crn[idx_crn+1L]=lat_crn[idx_crn_sth+2L]; lon_crn[idx_crn+0L]=lon_crn[idx_crn_sth+3L]; lon_crn[idx_crn+1L]=lon_crn[idx_crn_sth+2L]; } /* !mss_val */ } /* !lat_idx */ if(lat_idx != lat_nbr-1L){ /* Not top row */ idx_ctr_nrt=idx_ctr+lon_nbr; if(lat_ctr[idx_ctr_nrt] != mss_val_ctr_dbl){ /* Copy northern corners from southern corners of northern neighbor */ idx_crn_nrt=idx_ctr_nrt*grd_crn_nbr; lat_crn[idx_crn+2L]=lat_crn[idx_crn_nrt+1L]; lat_crn[idx_crn+3L]=lat_crn[idx_crn_nrt+0L]; lon_crn[idx_crn+2L]=lon_crn[idx_crn_nrt+1L]; lon_crn[idx_crn+3L]=lon_crn[idx_crn_nrt+0L]; } /* !mss_val */ } /* !lat_idx */ /* Got to here before giving up Idea was to interpolate missing cell corners between previous and next valid cell */ /* Algorithm assumes lon_wst never changes (too simple for displaced/tri_pole) */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); lon_crn_drv=lon_wst+lon_ncr*lon_idx; if(lon_ctr_drv >= 360.0) lon_ctr_drv-=360.0; lat_ctr[idx_ctr]=lat_ctr_drv; lon_ctr[idx_ctr]=lon_ctr_drv; lat_crn[idx_crn+0L]=lat_crn[idx_crn+1L]=lat_crn_drv; lat_crn[idx_crn+2L]=lat_crn[idx_crn+3L]=lat_crn_drv+lat_ncr; lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr; /* Branch-cut rule */ if(lon_crn_drv+lon_ncr >= 360.0){ lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv-360.0; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr-360.0; } /* !brnch */ } /* !mss_val */ } /* !lon_idx */ } /* !lat_idx */ if(lon_vld_nxt) lon_vld_nxt=(long *)nco_free(lon_vld_nxt); if(lon_vld_prv) lon_vld_prv=(long *)nco_free(lon_vld_prv); } /* !CICE */ if(lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT){ /* Interfaces (ntf) and boundaries (bnd) for curvilinear grids are ill-defined since sides need not follow latitudes nor meridians Simplest representation that contains equivalent information to interfaces/boundaries is grid corners array Diagnose grid corners from midpoints Most curvilinear data (e.g., WRF) is dimensioned lat x lon unlike SCRIP which uses lon x lat Hence we keep lat_ctr, lon_ctr, lat_crn, lon_crn with same order (likely lat x lon) as data file from which we infer grid Always use input order to write skeleton file Change that order, if necessary, to write SCRIP grid file In the interior of a curvilinear grid, nine points contribute to the four corners of a quadrilateral surrounding each center point These are the three points above the point, the three points at the same latitude, and the three points beneath the point In other words, a nine-point stencil is required to define the four corners inferred around each gridcell center It is cleanest to use this stencil only once for all cells in the "real"-grid, including those on the edges, not the interior For this to work cleanly we define an enlarged "fake"-grid where we pre-copy the values that lead to the desired extrapolation on "real"-grid edges Inspired by array-based solutions to integration of PDEs on meshes in Juri Toomre's class NB: implementation is not robust to missing value points in interior of grid. Hopefully grids have no missing values in coordinate variables, although they may have missing values in non-grid fields (e.g., mask, temperature) */ double *lat_ctr_fk; /* [dgr] Latitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ double *lon_ctr_fk; /* [dgr] Longitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ lat_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); lon_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); long int idx_rl; /* [idx] Index into real unrolled array */ long int idx_fk; /* [idx] Index into fake unrolled array */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ /* lat idx on real grid */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* lon idx on real grid */ idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=(lat_idx+1)*(lon_nbr+2)+lon_idx+1; /* Copy real grid to interior of fake grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]; lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]; } /* !lon */ } /* !lat */ /* Formulae to extrapolate sides and corners of fake grid are written as a starting lat/lon plus or minus adjustment Adjustment is positive-definite if grid monotonically increases in latitude and longitude from LL to UR 20160111: Use macros/functions to determine longitude adjustments that are always less than 180 This ensures all longitudes contributing to extrapolated longitude are from same branch cut */ /* Bottom row */ lat_idx=0; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on bottom row of fake grid */ idx_rl=lat_idx*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on bottom row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+lon_nbr]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+lon_nbr],lon_ctr[idx_rl]); } /* !lon */ /* Top row */ lat_idx=lat_nbr+1; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on top row of fake grid */ idx_rl=(lat_nbr-1)*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on top row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-lon_nbr]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-lon_nbr]); } /* !lon */ /* Left side */ lon_idx=0; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on left side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx; /* 1D-offset of neighboring point on left side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+1]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+1],lon_ctr[idx_rl]); } /* !lat */ /* Right side */ lon_idx=lon_nbr+1; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on right side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx-2; /* 1D-offset of neighboring point on right side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-1]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-1]); } /* !lat */ /* LL */ lat_ctr_fk[0]=lat_ctr_fk[lon_nbr+2]-(lat_ctr_fk[2*(lon_nbr+2)]-lat_ctr_fk[lon_nbr+2]); lon_ctr_fk[0]=lon_ctr_fk[1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[2],lon_ctr_fk[1]); /* LR */ lat_ctr_fk[lon_nbr+1]=lat_ctr_fk[2*(lon_nbr+2)-1]-(lat_ctr_fk[3*(lon_nbr+2)-1]-lat_ctr_fk[2*(lon_nbr+2)-1]); lon_ctr_fk[lon_nbr+1]=lon_ctr_fk[lon_nbr]+nco_lon_dff_brnch_dgr(lon_ctr_fk[lon_nbr],lon_ctr_fk[lon_nbr-1]); /* UR */ lat_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]+(lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]-lat_ctr_fk[lat_nbr*(lon_nbr+2)-1]); lon_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2]+nco_lon_dff_brnch_dgr(lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2],lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-3]); /* UL */ lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lat_ctr_fk[lat_nbr*(lon_nbr+2)]+(lat_ctr_fk[lat_nbr*(lon_nbr+2)]-lat_ctr_fk[(lat_nbr-1)*(lon_nbr+2)]); lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[lat_nbr*(lon_nbr+2)+2],lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]); if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Fake Center [lat,lon]=[%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr_fk[idx_dbg],lon_ctr_fk[idx_dbg]); } /* !dbg */ long int lat_idx_fk; /* [idx] Index into fake (extrapolated) latitude array */ long int lon_idx_fk; /* [idx] Index into fake (extrapolated) longitude array */ long int idx_fk_crn_ll_ctr_ll; long int idx_fk_crn_ll_ctr_lr; long int idx_fk_crn_ll_ctr_ur; long int idx_fk_crn_ll_ctr_ul; long int idx_fk_crn_lr_ctr_ll; long int idx_fk_crn_lr_ctr_lr; long int idx_fk_crn_lr_ctr_ur; long int idx_fk_crn_lr_ctr_ul; long int idx_fk_crn_ur_ctr_ll; long int idx_fk_crn_ur_ctr_lr; long int idx_fk_crn_ur_ctr_ur; long int idx_fk_crn_ur_ctr_ul; long int idx_fk_crn_ul_ctr_ll; long int idx_fk_crn_ul_ctr_lr; long int idx_fk_crn_ul_ctr_ur; long int idx_fk_crn_ul_ctr_ul; double *crn_lat; double *crn_lon; crn_lat=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); crn_lon=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); size_t wrn_nbr_max=20; size_t wrn_nbr=0; for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* 9-point template valid at all interior (non-edge) points in real grid, and at all points (including edges) in fake grid Read variables idx_crn_ll_ctr_ul as "index of upper left gridcell center that contributes to lower-left gridcell corner" Algorithms execute in counter-clockwise (CCW) direction: lower-left, lower-right, upper-right, upper-left lat_idx and lon_idx are true indices and are used to write into grd_crn_lat/lon arrays lat_idx_fk and lon_idx_fk are indices into fake arrays with extrapolated boundaries and are used to read data from fake arrays */ lon_idx_fk=lon_idx+1; lat_idx_fk=lat_idx+1; idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=lat_idx_fk*(lon_nbr+2)+lon_idx_fk; /* Determine index into fake array (valid everywhere it is applied) Comments after each equation are formula for real index (valid only at interior gridcells) */ idx_fk_crn_ll_ctr_ll=idx_fk-(lon_nbr+2)-1; // (lat_idx-1)*lon_nbr+lon_idx-1 idx_fk_crn_ll_ctr_lr=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ur=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ul=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1; idx_fk_crn_lr_ctr_ll=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_lr_ctr_lr=idx_fk-(lon_nbr+2)+1; // (lat_idx-1)*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ur=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ul=idx_fk; // lat_idx*lon_nbr+lon_idx; idx_fk_crn_ur_ctr_ll=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ur_ctr_lr=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ur=idx_fk+(lon_nbr+2)+1; // (lat_idx+1)*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ul=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx; idx_fk_crn_ul_ctr_ll=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1 idx_fk_crn_ul_ctr_lr=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ur=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ul=idx_fk+(lon_nbr+2)-1; // (lat_idx+1)*lon_nbr+lon_idx-1; /* 20160111: Algorithm requires that all longitudes in template be on same "branch cut" If, say, LL longitude is 179.0 and LR longitude is -179.0 then their sum and average are zero, not 180.0 or -180.0 as desired Routines labeled "*_brnch" in the following ensure that branch-cut rules are followed */ idx_crn_ll=grd_crn_nbr*idx_rl+0; lat_crn[idx_crn_ll]=0.25*(lat_ctr_fk[idx_fk_crn_ll_ctr_ll]+lat_ctr_fk[idx_fk_crn_ll_ctr_lr]+lat_ctr_fk[idx_fk_crn_ll_ctr_ur]+lat_ctr_fk[idx_fk_crn_ll_ctr_ul]); lon_crn[idx_crn_ll]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ll_ctr_ll],lon_ctr_fk[idx_fk_crn_ll_ctr_lr],lon_ctr_fk[idx_fk_crn_ll_ctr_ur],lon_ctr_fk[idx_fk_crn_ll_ctr_ul]); idx_crn_lr=grd_crn_nbr*idx_rl+1; lat_crn[idx_crn_lr]=0.25*(lat_ctr_fk[idx_fk_crn_lr_ctr_ll]+lat_ctr_fk[idx_fk_crn_lr_ctr_lr]+lat_ctr_fk[idx_fk_crn_lr_ctr_ur]+lat_ctr_fk[idx_fk_crn_lr_ctr_ul]); lon_crn[idx_crn_lr]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_lr_ctr_ll],lon_ctr_fk[idx_fk_crn_lr_ctr_lr],lon_ctr_fk[idx_fk_crn_lr_ctr_ur],lon_ctr_fk[idx_fk_crn_lr_ctr_ul]); idx_crn_ur=grd_crn_nbr*idx_rl+2; lat_crn[idx_crn_ur]=0.25*(lat_ctr_fk[idx_fk_crn_ur_ctr_ll]+lat_ctr_fk[idx_fk_crn_ur_ctr_lr]+lat_ctr_fk[idx_fk_crn_ur_ctr_ur]+lat_ctr_fk[idx_fk_crn_ur_ctr_ul]); lon_crn[idx_crn_ur]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ur_ctr_ll],lon_ctr_fk[idx_fk_crn_ur_ctr_lr],lon_ctr_fk[idx_fk_crn_ur_ctr_ur],lon_ctr_fk[idx_fk_crn_ur_ctr_ul]); idx_crn_ul=grd_crn_nbr*idx_rl+3; lat_crn[idx_crn_ul]=0.25*(lat_ctr_fk[idx_fk_crn_ul_ctr_ll]+lat_ctr_fk[idx_fk_crn_ul_ctr_lr]+lat_ctr_fk[idx_fk_crn_ul_ctr_ur]+lat_ctr_fk[idx_fk_crn_ul_ctr_ul]); lon_crn[idx_crn_ul]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ul_ctr_ll],lon_ctr_fk[idx_fk_crn_ul_ctr_lr],lon_ctr_fk[idx_fk_crn_ul_ctr_ur],lon_ctr_fk[idx_fk_crn_ul_ctr_ul]); crn_lat[0]=lat_crn[idx_crn_ll]; crn_lat[1]=lat_crn[idx_crn_lr]; crn_lat[2]=lat_crn[idx_crn_ur]; crn_lat[3]=lat_crn[idx_crn_ul]; crn_lon[0]=lon_crn[idx_crn_ll]; crn_lon[1]=lon_crn[idx_crn_lr]; crn_lon[2]=lon_crn[idx_crn_ur]; crn_lon[3]=lon_crn[idx_crn_ul]; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && wrn_nbr < wrn_nbr_max){ (void)fprintf(stdout,"%s: %s WARNING reports non-CCW gridcell at idx=%li, (lat,lon)_idx=(%li,%li), (lat,lon) = (%g, %g)\n",nco_prg_nm_get(),fnc_nm,idx_rl,lat_idx,lon_idx,lat_ctr[lat_idx],lon_ctr[lon_idx]); wrn_nbr++; if(wrn_nbr == wrn_nbr_max) (void)fprintf(stdout,"%s: %s INFO Number of non-CCW errors reached maximum = %li, not printing anymore\n",nco_prg_nm_get(),fnc_nm,wrn_nbr_max); } /* endif */ lat_crn[idx_crn_ll]=crn_lat[0]; lat_crn[idx_crn_lr]=crn_lat[1]; lat_crn[idx_crn_ur]=crn_lat[2]; lat_crn[idx_crn_ul]=crn_lat[3]; lon_crn[idx_crn_ll]=crn_lon[0]; lon_crn[idx_crn_lr]=crn_lon[1]; lon_crn[idx_crn_ur]=crn_lon[2]; lon_crn[idx_crn_ul]=crn_lon[3]; } /* !lon */ } /* !lat */ if(lat_ctr_fk) lat_ctr_fk=(double *)nco_free(lat_ctr_fk); if(lon_ctr_fk) lon_ctr_fk=(double *)nco_free(lon_ctr_fk); if(crn_lon) crn_lon=(double *)nco_free(crn_lon); if(crn_lat) crn_lat=(double *)nco_free(crn_lat); } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_crv */ if(flg_1D_psd_rct_bnd){ double lon_brnch_min; double lon_brnch_max; double lon_dff; assert(grd_crn_nbr == 4); /* Make boundaries that were provided as pseudo-rectangular branch-cut-compliant */ for(col_idx=0;col_idx<col_nbr;col_idx++){ lon_brnch_min=(lon_bnd[2*col_idx] <= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_brnch_max=(lon_bnd[2*col_idx] >= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_dff=lon_brnch_max-lon_brnch_min; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports 1D pseudo-rectangular bounds branch-cut straddle at col_idx=%ld lon_brnch_max, lon_brnch_min, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,col_idx,lon_brnch_max,lon_brnch_min,lon_dff); lon_brnch_max-=360.0; }else if(lon_dff <= -180.0){ lon_brnch_max+=360.0; } /* !lon_dff */ /* Extra condition to convert CW bounds to CCW bounds (necessary for OCO2) */ if(lon_brnch_min <= lon_brnch_max){ lon_bnd[2*col_idx]=lon_brnch_min; lon_bnd[2*col_idx+1]=lon_brnch_max; }else{ lon_bnd[2*col_idx]=lon_brnch_max; lon_bnd[2*col_idx+1]=lon_brnch_min; } /* end else */ } /* !col_idx */ /* Convert boundaries that were provided as pseudo-rectangular to corners */ for(col_idx=0;col_idx<col_nbr;col_idx++){ idx=grd_crn_nbr*col_idx; /* fxm: OCO2 provides boundaries in CW not CCW orientation */ lon_crn[idx]=lon_bnd[2*col_idx]; /* LL */ lon_crn[idx+1]=lon_bnd[2*col_idx+1]; /* LR */ lon_crn[idx+2]=lon_bnd[2*col_idx+1]; /* UR */ lon_crn[idx+3]=lon_bnd[2*col_idx]; /* UL */ lat_crn[idx]=lat_bnd[2*col_idx]; /* LL */ lat_crn[idx+1]=lat_bnd[2*col_idx]; /* LR */ lat_crn[idx+2]=lat_bnd[2*col_idx+1]; /* UR */ lat_crn[idx+3]=lat_bnd[2*col_idx+1]; /* UL */ /* fxm: OCO2 provides boundaries in CW not CCW orientation */ } /* !col_idx */ } /* flg_1D_psd_rct_bnd */ if(flg_grd_crv || flg_1D_psd_rct_bnd){ /* As of 20160308, use same sanity check for 1D pseudo-rectangular grids as for curvilinear grids Pseudo-rectangular grids rely on user-produced boundaries that may be psychotic (CW, non-branch-cut) Starting 20151205, use same sanity check for both inferred and copied curvilinear grids 20151129: Curvilinear extrapolation technique above yields corners outside [-90.0,90.0], [-180.0,360.0] Also, it may assume input is ascending swath and fail for descending swaths Complications not fully addressed: Swaths may (verify this) turn from ascending to descending, or visa-versa, when satellite crosses latitude extrema Swaths may cross the date-line (and back!) */ /* Determine numeric bounds of input coordinate system */ double lon_min_min; double lon_max_max; nco_bool NCO_LON_0_TO_360=True; if(has_mss_val_ctr){ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] != mss_val_ctr_dbl && lon_ctr[idx] < 0.0) break; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] < 0.0) break; } /* !has_mss_val_ctr */ if(idx != grd_sz_nbr) NCO_LON_0_TO_360=False; if(NCO_LON_0_TO_360){ lon_min_min=0.0; lon_max_max=360.0; }else{ lon_min_min=-180.0; lon_max_max=180.0; } /* !NCO_LON_0_TO_360 */ /* Correct for extrapolation outside boundaries */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(lat_ctr[idx_ctr] == mss_val_ctr_dbl) continue; if(lat_crn[idx] < -90.0 || lat_crn[idx] > 90.0 || lon_crn[idx] < lon_min_min || lon_crn[idx] > lon_max_max){ idx_crn_ll=grd_crn_nbr*idx_ctr+0; idx_crn_lr=grd_crn_nbr*idx_ctr+1; idx_crn_ur=grd_crn_nbr*idx_ctr+2; idx_crn_ul=grd_crn_nbr*idx_ctr+3; if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s reports %s corner outside canonical bounds at idx = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,(lat_bnd_id == NC_MIN_INT) ? "inferred" : "copied",idx_ctr,lat_ctr[idx_ctr],lon_ctr[idx_ctr],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); /* Restrict grid to real latitudes and to the 360-degree range detected from input cell-centers */ if(lat_crn[idx] < -90.0) lat_crn[idx]=-90.0; if(lat_crn[idx] > 90.0) lat_crn[idx]=90.0; if(lon_crn[idx] < lon_min_min) lon_crn[idx]+=360.0; if(lon_crn[idx] > lon_max_max) lon_crn[idx]-=360.0; } /* !sanity */ } /* !idx */ /* Vertices (for valid points) are now within 360 degrees (either [0,360] or [-180,180]) implied by input coordinate system Curvilinear inferred grid are, by construction, branch-cut compliant fxm: Curvilinear and 1D pseudo-rectangular grids prescribed by (i.e., read-in from) input may not be branch-cut compliant */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0; idx_crn_lr=grd_crn_nbr*idx_dbg+1; idx_crn_ur=grd_crn_nbr*idx_dbg+2; idx_crn_ul=grd_crn_nbr*idx_dbg+3; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr[idx_dbg],lon_ctr[idx_dbg],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); } /* !dbg */ } /* !flg_grd_crv || flg_1D_psd_rct_bnd */ if(flg_grd_crv){ /* Copy centers into empty output array */ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; } /* !idx */ /* Copy inferred or copied (from input) sanity-checked corners into empty output array */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ grd_crn_lat[idx]=lat_crn[idx]; grd_crn_lon[idx]=lon_crn[idx]; } /* !idx */ } /* !flg_grd_crv */ /* 20150512 Many 2D datasets have bad bounds Primary example is Gaussian grids archived by CESM models that use midpoint rule rather than iterate to compute interfaces from quadrature points Such files have correct gw arrays and incorrect cell bounds flg_dgn_bnd allows nco_grd_nfr() to override faulty boundaries in file with correct bounds */ const nco_bool flg_dgn_bnd=rgr->flg_dgn_bnd; /* [flg] Diagnose rather than copy inferred bounds */ const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) if(flg_grd_2D){ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_bnd) (void)fprintf(stdout,"%s: INFO %s will diagnose cell boundaries from cell centers...\n",nco_prg_nm_get(),fnc_nm); /* Derive interfaces (ntf) and bounds (bnd) from midpoints approximation applied to center data NB: Simplistically derived interfaces (ntf) only valid on some rectangular grids (not on Gaussian grids) These inferred-from-midpoint interfaces/bounds are overwritten in next block once lat grid is known */ if(flg_s2n) lat_ntf[0L]=lat_ctr[0L]-0.5*(lat_ctr[1L]-lat_ctr[0L]); else lat_ntf[0L]=lat_ctr[0L]+0.5*(lat_ctr[0L]-lat_ctr[1L]); if(lat_ntf[0L] < -90.0) lat_ntf[0L]=-90.0; /* NB: lat_ntf[0] can be same as lat_ctr[0] for cap grid */ if(lat_ntf[0L] > 90.0) lat_ntf[0L]=90.0; for(lat_idx=0L;lat_idx<lat_nbr-1L;lat_idx++) lat_ntf[lat_idx+1L]=0.5*(lat_ctr[lat_idx]+lat_ctr[lat_idx+1L]); if(flg_s2n) lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]+0.5*(lat_ctr[lat_nbr-1L]-lat_ctr[lat_nbr-2L]); else lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]-0.5*(lat_ctr[lat_nbr-2L]-lat_ctr[lat_nbr-1L]); if(lat_ntf[lat_nbr] > 90.0) lat_ntf[lat_nbr]=90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(lat_ntf[lat_nbr] < -90.0) lat_ntf[lat_nbr]=-90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(flg_s2n) lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_ntf[0L]=lon_ctr[0L]-0.5*(lon_ctr[1L]-lon_ctr[0L]); for(lon_idx=0;lon_idx<lon_nbr-1L;lon_idx++) lon_ntf[lon_idx+1L]=0.5*(lon_ctr[lon_idx]+lon_ctr[lon_idx+1L]); lon_ntf[lon_nbr]=lon_ctr[lon_nbr-1L]+0.5*(lon_ctr[lon_nbr-1L]-lon_ctr[lon_nbr-2L]); lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; for(idx=0;idx<lon_nbr;idx++){ lon_bnd[2L*idx]=lon_ntf[idx]; lon_bnd[2L*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ }else{ /* !(lat_bnd_id && lon_bnd_id) */ /* Derive interfaces (ntf) from bounds (bnd) data on disk */ for(idx=0;idx<lon_nbr;idx++) lon_ntf[idx]=lon_bnd[2L*idx]; lon_ntf[lon_nbr]=lon_bnd[2L*lon_nbr-1L]; for(idx=0;idx<lat_nbr;idx++) lat_ntf[idx]=lat_bnd[2L*idx]; lat_ntf[lat_nbr]=lat_bnd[2L*lat_nbr-1L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_2D */ if(flg_grd_2D){ /* Diagnose type of two-dimensional input grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf[0L]+lat_spn*1.5/lat_nbr; else lat_ctr_tst_eqa=lat_ntf[0L]-lat_spn*1.5/lat_nbr; if(flg_s2n) lat_ctr_tst_fv=lat_ntf[0L]+lat_spn/(lat_nbr-1L); else lat_ctr_tst_fv=lat_ntf[0L]-lat_spn/(lat_nbr-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement with input to single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps are only accurate to ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 20191008: T62 grid from NCEP-NCAR Reanalysis 2 worse than single precision, has yc_[92]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 */ if(nco_dbg_lvl_get() >= nco_dbg_scl && !flg_s2n) (void)fprintf(stderr,"%s: INFO %s reports that grid inferral has detected a 2D grid that runs from north-to-south, not south-to-north. Support for creating/inferring 2D N-to-S grids was added in NCO 4.7.7 (September, 2018) and should work fine.\nHINT: If present command fails, report problem to developers and then re-try inferring grid after reversing input dataset's latitude coordinate (with, e.g., ncpdq -a time,-lat,lon in.nc out.nc)\n",nco_prg_nm_get(),fnc_nm); if((float)lat_ctr[1L] == (float)lat_ctr_tst_eqa) lat_typ=nco_grd_lat_eqa; if((float)lat_ctr[1L] == (float)lat_ctr_tst_fv) lat_typ=nco_grd_lat_fv; double *lat_sin=NULL_CEWI; // [frc] Sine of Gaussian latitudes double precision double *wgt_Gss=NULL; // [frc] Gaussian weights double precision if(lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); wgt_Gss=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,flg_s2n,lat_sin,wgt_Gss); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ const double eps_rlt_cnv_gss=1.0e-6; // Convergence criterion (1.0e-7 fails for NCEP NCAR Reanalysis 1!) if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG %s reports lat_ctr[1]=%g, lat_ctr_tst_gss=%g, fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))=%g\n",nco_prg_nm_get(),fnc_nm,lat_ctr[1],lat_ctr_tst_gss,fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))); if(fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss)) < eps_rlt_cnv_gss) lat_typ=nco_grd_lat_gss; } /* !Gaussian */ if(lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(lat_typ == nco_grd_lat_unk) grd_typ=nco_grd_2D_unk; else if(lat_typ == nco_grd_lat_gss) grd_typ=nco_grd_2D_gss; else if(lat_typ == nco_grd_lat_fv) grd_typ=nco_grd_2D_fv; else if(lat_typ == nco_grd_lat_eqa) grd_typ=nco_grd_2D_eqa; else assert(False); /* Diagnose latitude interfaces from gridcell centers (if boundaries not provided) */ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ if(flg_s2n) lat_nrt=lat_ntf[lat_nbr]; else lat_nrt=lat_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); if(flg_s2n) lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; else lat_ntf[1L]=lat_ntf[0L]-0.5*lat_ncr; for(lat_idx=2;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[1L]-(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[0L]-lat_idx*lat_ncr; break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ // 20190531: Wuyin Lin reports this convergence criterion fails on ECMWF F640 grid // Probably because latitude iterations assume s2n grid and ECMWF is n2s // Possibly also because latitude coordinates are stored in single precision // Implement precision-dependent convergence criterion, e.g., 1.0e-15 and 1.0e-7 for double- and single-precision, respectively? const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ if(lat_sin) lat_sin=(double *)nco_free(lat_sin); break; case nco_grd_lat_unk: /* No generic formula exists so use interfaces already read or diagnosed as midpoints between centers */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Ensure rounding errors do not produce unphysical grid */ lat_ntf[lat_nbr]=lat_nrt; if(lat_typ == nco_grd_lat_gss){ /* 20170510: First approximation above to exterior interfaces for Gaussian grid are ~ +/-89 degrees Loops below recompute interior interfaces only Southern- and northern-most interfaces must be explicitly assigned Inferral test for Gaussian grid _assumes_ global grid Hence WLOG can assign [-90.0, 90.0] to Gaussian grid exterior boundaries */ if(flg_s2n) lat_ntf[0L]=-90.0; else lat_ntf[0L]=90.0; if(flg_s2n) lat_ntf[lat_nbr]=90.0; else lat_ntf[lat_nbr]=-90.0; } /* !nco_grd_lat_gss */ /* Now that final latitude interfaces are known for all grid-types, assign to boundaries, overwriting provisional values stored there earlier */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ } /* !(lat_bnd_id && lon_bnd_id) */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; case nco_grd_lat_unk: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown input latitude grid-type. Guessing that weights for grid of rectangles is OK.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Diagnose type of longitude grid by testing second longitude center against formulae */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; if(lon_typ == nco_grd_lon_nil){ if( (float)lon_ctr[0L] == 0.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr[0L] == -180.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf[0L] == 0.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf[0L] == -180.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_bb; else lon_typ=nco_grd_lon_unk; } /* !lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input 2D grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_2D_sng(grd_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(lon_typ)); } /* !flg_grd_2D */ if(flg_grd_2D){ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ /* Fuzzy test of latitude weight normalization */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl=0.0; for(idx=0;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(grd_typ != nco_grd_2D_unk && 1.0-lat_wgt_ttl/lat_wgt_ttl_xpc > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ } /* !flg_grd_2D */ if(flg_grd_2D){ assert(grd_crn_nbr == 4); if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ /* If interfaces were diagnosed from centers, copy corners from interfaces */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; /* LL */ lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_ntf[lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; /* LL */ lat_crn[idx+1L]=lat_ntf[lat_idx]; /* LR */ lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; /* UL */ } /* !lat_idx */ }else{ /* !lat_bnd_id */ /* If boundaries were provided in input dataset, copy corners from boundaries */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_bnd[2*lon_idx]; /* LL */ lon_crn[idx+1L]=lon_bnd[2*lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_bnd[2*lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_bnd[2*lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_bnd[2*lat_idx]; /* LL */ lat_crn[idx+1L]=lat_bnd[2*lat_idx]; /* LR */ lat_crn[idx+2L]=lat_bnd[2*lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_bnd[2*lat_idx+1L]; /* UL */ } /* !lat_idx */ } /* !lat_bnd_id */ } /* !flg_grd_2D */ /* lat/lon_crn will not change anymore so stuff rectangular arrays into unrolled arrays */ if(flg_grd_1D){ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; if(flg_wrt_crn){ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=lat_crn[idx2]; grd_crn_lon[idx2]=lon_crn[idx2]; } /* !crn */ }else{ /* !flg_wrt_crn */ /* Defaults for ERWG when corners are unknown */ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=0.0; grd_crn_lon[idx2]=0.0; } /* !crn */ } /* !flg_wrt_crn */ } /* !col */ } /* !flg_grd_1D */ if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); } /* !idx */ } /* !flg_s2n */ } /* !flg_grd_2D */ /* Find span of all grids */ double lat_max; /* [dgr] Maximum latitude */ double lat_min; /* [dgr] Minimum latitude */ double lon_max; /* [dgr] Maximum longitude */ double lon_min; /* [dgr] Minimum longitude */ idx_ctr=0; if(has_mss_val_ctr){ /* Find first non-missing value center and thus corners */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(grd_ctr_lat[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); } /* !has_mss_val_ctr */ if(flg_wrt_crn){ /* Grids with corner boundaries supplied or inferred */ lon_max=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_max=grd_crn_lat[idx_ctr*grd_crn_nbr]; lon_min=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_min=grd_crn_lat[idx_ctr*grd_crn_nbr]; for(idx=1;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_crn_lat[idx] > lat_max) ? grd_crn_lat[idx] : lat_max; lon_max=(grd_crn_lon[idx] > lon_max) ? grd_crn_lon[idx] : lon_max; lat_min=(grd_crn_lat[idx] < lat_min) ? grd_crn_lat[idx] : lat_min; lon_min=(grd_crn_lon[idx] < lon_min) ? grd_crn_lon[idx] : lon_min; } /* !idx */ }else{ /* !flg_wrt_crn */ /* 20170424: Diagnose grid-extent when corners were not provided or inferred This is usually (always?) for 1d unstructured grids with only centers provided */ lon_max=grd_ctr_lon[idx_ctr]; lat_max=grd_ctr_lat[idx_ctr]; lon_min=grd_ctr_lon[idx_ctr]; lat_min=grd_ctr_lat[idx_ctr]; for(idx_ctr=1;idx_ctr<grd_sz_nbr;idx_ctr++){ if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_ctr_lat[idx_ctr] > lat_max) ? grd_ctr_lat[idx_ctr] : lat_max; lon_max=(grd_ctr_lon[idx_ctr] > lon_max) ? grd_ctr_lon[idx_ctr] : lon_max; lat_min=(grd_ctr_lat[idx_ctr] < lat_min) ? grd_ctr_lat[idx_ctr] : lat_min; lon_min=(grd_ctr_lon[idx_ctr] < lon_min) ? grd_ctr_lon[idx_ctr] : lon_min; } /* !idx_ctr */ } /* flg_wrt_crn */ lat_spn=lat_max-lat_min; lon_spn=lon_max-lon_min; /* Use strict rules for rectangular grids, looser for spans that are inferred, or center-to-center not corner-to-corner */ if(flg_grd_2D){ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; }else{ /* !flg_grd_2D */ if((float)lon_spn >= 340.0f && (float)lat_spn >= 170.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; } /* flg_wrt_crn */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports grid resolution %li x %li, spans %g x %g degrees: [%g <= lat <= %g], [%g <= lon <= %g]\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr,lat_spn,lon_spn,lat_min,lat_max,lon_min,lon_max); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); /* Write ERWG hints if filenames provided and grid is regional */ char *fl_hnt=NULL; char *fl_hnt_dst=NULL; char *fl_hnt_src=NULL; if(rgr->fl_hnt_dst) fl_hnt=fl_hnt_dst=rgr->fl_hnt_dst; if(rgr->fl_hnt_src) fl_hnt=fl_hnt_src=rgr->fl_hnt_src; if(nco_grd_xtn == nco_grd_xtn_rgn && fl_hnt){ const char *fl_mode="w"; FILE *fp_hnt; /* [fl] Hint file (for ERWG switches) file handle */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s writing ERWG weight-generation regional hint to file %s\n",nco_prg_nm_get(),fnc_nm,fl_hnt); /* Open output file */ if((fp_hnt=fopen(fl_hnt,fl_mode)) == NULL){ (void)fprintf(stderr,"%s: ERROR unable to open hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Opened hint file %s\n",nco_prg_nm_get(),fl_hnt); if(fl_hnt_src) (void)fprintf(fp_hnt,"--src_regional"); if(fl_hnt_dst) (void)fprintf(fp_hnt,"--dst_regional"); rcd=fclose(fp_hnt); if(rcd != 0){ (void)fprintf(stderr,"%s: ERROR unable to close hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Closed hint file %s\n",nco_prg_nm_get(),fl_hnt); } /* !nco_grd_xtn */ /* Diagnose area if necessary 20170510: ALM/CLM "area" is _FillValue=1.0e36f over ocean and total gridcell area in km2 (not multiplied by landfrac) elsewhere Writing this ALM/CLM "area" variable to gridfile, then using with ERWG --user_areas could be disastrous (depending on mask array and interpolation type) On the other hand CAM "area" variable is exactly what we want for gridfile Input areas are considered "untrustworthy" iff they have _and use_ missing value attribute Re-diagnose areas considered untrustworthy so output area array does not contain missing values */ if(flg_wrt_crn && has_mss_val_area){ const double mss_val_dbl=mss_val_area_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(area[idx] == mss_val_dbl) break; if(idx < grd_sz_nbr) use_mss_val_area=True; if(nco_dbg_lvl_get() >= nco_dbg_fl && use_mss_val_area) (void)fprintf(stdout,"%s: INFO %s reports input area field %s is considered untrustworthy because it uses missing values, will diagnose area from cell boundaries instead...\n",nco_prg_nm_get(),fnc_nm,area_nm_in); } /* !has_mss_val_area */ /* 20170511: There remain a handful of cases when input area should be diagnosed not copied These include using ncremap in SGS mode when inferred grids must use sensible area units Otherwise an inferred grid with area [km2] from ALM/CLM might be combined with area [sr] from NCO This would bias ERWG --user_areas produced values by ~10^10 Setting flg_dgn_area ensures inferred area uses [sr] */ const nco_bool flg_dgn_area=rgr->flg_dgn_area; /* [flg] Diagnose rather than copy inferred area */ if(flg_wrt_crn && /* If bounds are available to compute area and ... */ (area_id == NC_MIN_INT || /* Area is not in input file ... */ use_mss_val_area || /* Area is untrustworthy */ flg_dgn_area)){ /* User/application explicitly requests diagnostic area */ /* Not absolutely necessary to diagnose area because ERWG will diagnose and output area itself _unless_ --user_areas option is given */ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_area) (void)fprintf(stdout,"%s: INFO %s reports diagnosing area from cell boundaries...\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_crv || flg_grd_1D){ /* Area of arbitrary unstructured or curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !flg_grd_2D */ } /* !area_id */ /* ERWG will fail unless grid file has mask variable Use nul-mask (all points included) whenever input mask variable not supplied/detected Define nul-mask true everywhere and overwrite with false below Input mask can be any type and output mask will always be NC_INT */ for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; if(msk_id != NC_MIN_INT){ /* Change missing-value-masked points to 0 integer mask for SCRIP grids (SCRIP has no missing value convention) Input mask can be any type and output mask will always be NC_INT Applications: ALM/CLM mask (landmask) is NC_FLOAT and defines but does not use NC_FLOAT missing value CICE mask (tmask/umask) is NC_FLOAT and defines and uses NC_FLOAT missing value AMSR mask is NC_SHORT and has no missing value GHRSST mask is NC_BYTE and is a multi-valued surface-type flag with missing value == -1b */ if(msk_typ != NC_INT){ if(nco_dbg_lvl_get() == nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Re-run with higher debugging level for more information.\n",nco_prg_nm_get(),fnc_nm,msk_nm,nco_typ_sng(msk_typ)); if(nco_dbg_lvl_get() > nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Regridding weight generators require a mask variable of type NC_INT to specify points to include/exclude as sources/destinations. Points where the mask variable is zero will be excluded (ignored) in regridding, all other points will be included. When inferring gridfiles, NCO assumes the first variable with a \"mask\"-like name (\"mask\", \"Mask\", \"grid_imask\", \"landmask\", or \"tmask\"), or the variable designated by the \"--msk_[src/dst]=msk_nm\" option, is this mask. However the variable \"%s\" in this file is not type NC_INT and so may not be intended as a regridding mask, hence this pleasant informational warning. To prevent NCO from interpreting \"%s\" as a regridding mask, specify \"--msk_src=none\" and/or \"--msk_dst=none\", as appropriate. To utilize some other variable as the mask variable, specify \"--msk_src=msk_nm\" and/or \"--msk_dst=msk_nm\", as appropriate. Mask treatment is subtle, and NCO tries to \"do the right thing\". Whether it does is often easiest to discern by visual inspection of the regridded results.\n",nco_prg_nm_get(),fnc_nm,msk_nm,nco_typ_sng(msk_typ),msk_nm,msk_nm); } /* msk_typ */ switch(msk_typ){ case NC_FLOAT: if(has_mss_val_msk){ const float mss_val_flt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == mss_val_flt || msk_unn.fp[idx] == 0.0f) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == 0.0f) msk[idx]=0; } /* !mss_val */ break; case NC_DOUBLE: if(has_mss_val_msk){ const double mss_val_dbl=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == mss_val_dbl || msk_unn.dp[idx] == 0.0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == 0.0) msk[idx]=0; } /* !mss_val */ break; case NC_INT: if(has_mss_val_msk){ const int mss_val_int=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == mss_val_int || msk_unn.ip[idx] == 0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == 0) msk[idx]=0; } /* !mss_val */ break; case NC_SHORT: /* http://stackoverflow.com/questions/208433/how-do-i-write-a-short-literal-in-c */ if(has_mss_val_msk){ const short mss_val_sht=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == mss_val_sht || msk_unn.sp[idx] == ((short)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == ((short)0)) msk[idx]=0; /* 20160111: AMSR kludge fxm */ // for(idx=0;idx<grd_sz_nbr;idx++) if(msk[idx] == 1) msk[idx]=0; } /* !mss_val */ break; case NC_BYTE: if(has_mss_val_msk){ const nco_byte mss_val_byt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == mss_val_byt || msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; /* 20170811: GHRSST kludge? */ } /* !mss_val */ break; default: (void)fprintf(stderr,"%s: ERROR %s mask variable \"%s\" has unsupported type = %s\n",nco_prg_nm_get(),fnc_nm,msk_nm,nco_typ_sng(msk_typ)); nco_dfl_case_generic_err(); return NCO_ERR; break; } /* !msk_typ */ if(msk_unn.vp) msk_unn.vp=(void *)nco_free(msk_unn.vp); } /* !msk_id */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; } /* !flg_grd_2D */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); assert(area_ttl <= 4.0*M_PI); } /* endif dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ /* 20151230 ERWG appears to require presence of corner arrays in grid file even when they are not used (e.g., bilinear) But ERWG will break when corner values are bad. Default is do not write bad corner values. Uncomment next line to write bad corner values. */ /* flg_wrt_crn=True; */ if(flg_wrt_crn) rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); if(flg_wrt_crn){ dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); } /* !flg_wrt_crn */ /* Define attributes */ aed_sct aed_mtd; char *att_nm; char *att_val; if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO inferred this grid from input file %s"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt,rgr->fl_in); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(flg_wrt_crn){ rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); } /* !flg_wrt_crn */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); if(flg_wrt_crn){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); } /* !flg_wrt_crn */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_ugrid; if(fl_out){ /* Test UGRID: Documentation: https://github.com/ugrid-conventions/ugrid-conventions Procedure: Create 1x1 skeleton file, infer UGRID and SCRIP grids from it ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${HOME}/skl_180x360.nc --rgr scrip=${HOME}/grd_180x360_SCRIP.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr ugrid=${HOME}/grd_ugrid.nc --rgr scrip=${HOME}/grd_scrip.nc ~/skl_180x360.nc ~/foo.nc ncks --cdl -v mesh_node_y ~/grd_ugrid.nc ncks --cdl -v mesh_face_nodes,mesh_face_x,mesh_face_y -d nFaces,0 ~/grd_ugrid.nc ncks --cdl -v mesh_edge_nodes,mesh_edge_x,mesh_edge_y -d nEdges,0 ~/grd_ugrid.nc ncks --cdl -v grid_center_lat,grid_corner_lat -d grid_size,0,,360 -d grid_corners,0,3 ~/grd_scrip.nc ncks --cdl -m -M ~/grd_ugrid.nc */ char *dgx_nm=NULL_CEWI; /* [sng] Name of edge_coordinates x variable */ char *dgy_nm=NULL_CEWI; /* [sng] Name of edge_coordinates y variable */ char *dg_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as edges */ char *dg_nd_nm=NULL_CEWI; /* [sng] Name of edge_node_connectivity variable */ char *fcx_nm=NULL_CEWI; /* [sng] Name of face_coordinates x variable */ char *fcy_nm=NULL_CEWI; /* [sng] Name of face_coordinates y variable */ char *fc_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as faces */ char *fc_nd_nm=NULL_CEWI; /* [sng] Name of face_node_connectivity variable */ char *msh_nm=NULL_CEWI; /* [sng] Name of mesh topology variable */ char *nd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes */ char *ndx_nm=NULL_CEWI; /* [sng] Name of node_coordinates x variable */ char *ndy_nm=NULL_CEWI; /* [sng] Name of node_coordinates y variable */ char *npe_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-edge */ char *npf_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-face */ double *dgx=NULL_CEWI; /* [dgr] Characteristic longitude of edges */ double *dgy=NULL_CEWI; /* [dgr] Characteristic latitude of edges */ double *fcx=NULL_CEWI; /* [dgr] Characteristic longitude of faces */ double *fcy=NULL_CEWI; /* [dgr] Characteristic latitude of faces */ double *ndx=NULL_CEWI; /* [dgr] Longitude of nodes */ double *ndy=NULL_CEWI; /* [dgr] Latitude of nodes */ int *dg_nd; /* [idx] edge_node_connectivity variable */ int *fc_nd; /* [idx] face_node_connectivity variable */ int dg_nd_id=NC_MIN_INT; /* [id] edge_node_connectivity variable ID */ int dgx_id=NC_MIN_INT; /* [id] Characteristic longitude of edges variable ID */ int dgy_id=NC_MIN_INT; /* [id] Characteristic latitude of edges variable ID */ int dmn_id_dg=NC_MIN_INT; /* [id] Dimension ID for edges */ int dmn_id_fc=NC_MIN_INT; /* [id] Dimension ID for faces */ int dmn_id_nd=NC_MIN_INT; /* [id] Dimension ID for nodes */ int dmn_id_npe=NC_MIN_INT; /* [id] Dimension ID for nodes-per-edge */ int dmn_id_npf=NC_MIN_INT; /* [id] Dimension ID for nodes-per-face */ int fc_nd_id=NC_MIN_INT; /* [id] face_node_connectivity variable ID */ int fcx_id=NC_MIN_INT; /* [id] Characteristic longitude of faces variable ID */ int fcy_id=NC_MIN_INT; /* [id] Characteristic latitude of faces variable ID */ int msh_id=NC_MIN_INT; /* [id] Mesh topology variable ID */ int msh_val=42; /* [id] Mesh topology variable value from Monty Python */ int ndx_id=NC_MIN_INT; /* [id] Longitude of mesh nodes variable ID */ int ndy_id=NC_MIN_INT; /* [id] Latitude of mesh nodes variable ID */ const long fc_nbr=grd_sz_nbr; /* [nbr] Number of faces in mesh */ const long npe_nbr=2; /* [nbr] Number of nodes per edge */ const long npf_nbr=grd_crn_nbr; /* [nbr] Number of nodes per face */ long dg_idx; /* [idx] Counting index for edges */ long dg_nbr=(long)NC_MIN_INT64; /* [nbr] Number of edges in mesh */ long fc_idx; /* [idx] Counting index for faces */ long nd_idx; /* [idx] Counting index for nodes */ long nd_nbr=(long)NC_MIN_INT64; /* [nbr] Number of nodes in mesh */ long srt_idx=0; /* [idx] start_index (C/Fortran) for edge_nodes, face_nodes */ if(!dgx_nm) dgx_nm=(char *)strdup("mesh_edge_x"); if(!dgy_nm) dgy_nm=(char *)strdup("mesh_edge_y"); if(!dg_dmn_nm) dg_dmn_nm=(char *)strdup("nEdges"); if(!fcx_nm) fcx_nm=(char *)strdup("mesh_face_x"); if(!fcy_nm) fcy_nm=(char *)strdup("mesh_face_y"); if(!fc_dmn_nm) fc_dmn_nm=(char *)strdup("nFaces"); if(!dg_nd_nm) dg_nd_nm=(char *)strdup("mesh_edge_nodes"); if(!fc_nd_nm) fc_nd_nm=(char *)strdup("mesh_face_nodes"); if(!msh_nm) msh_nm=(char *)strdup("mesh"); if(!nd_dmn_nm) nd_dmn_nm=(char *)strdup("nNodes"); if(!ndx_nm) ndx_nm=(char *)strdup("mesh_node_x"); if(!ndy_nm) ndy_nm=(char *)strdup("mesh_node_y"); if(!npe_dmn_nm) npe_dmn_nm=(char *)strdup("two"); if(!npf_dmn_nm) npf_dmn_nm=(char *)strdup("maxNodesPerFace"); if(flg_grd_1D){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support 1D grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); }else if(flg_grd_2D){ /* Assume 2D grids are global and comprised of quadrilaterals */ switch(lat_typ){ case nco_grd_lat_fv: /* Currently all 2D grids are converted to the same UGRID representation fxm: Cap grids (e.g., FV) should eventually be written with a real cap, rather than as the "polar teeth" representation currently used. Polar teeth convention allows cap grid to be represented as rectangular on disk However, cap grids are better suited to non-rectangular UGRID meshes */ case nco_grd_lat_eqa: case nco_grd_lat_gss: /* Numbers of unique edges and nodes counted from South Pole (SP) to North Pole (NP) */ dg_nbr=lon_nbr*2+ /* SP: cells_per_lat*unique_edges_per_cell */ (lat_nbr-2)*lon_nbr*2+ /* Mid: lats*cells_per_lat*unique_edges_per_cell */ lon_nbr*1; /* NP: cells_per_lat*unique_edges_per_cell */ nd_nbr=1+lon_nbr*1+ /* SP: SP+cells_per_lat*unique_nodes_per_cell */ (lat_nbr-2)*lon_nbr*1+ /* Mid: lats*cells_per_lat*unique_nodes_per_cell */ 1; /* NP: NP */ break; case nco_grd_lat_unk: case nco_grd_lat_nil: default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ }else if(flg_grd_crv){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support curvilinear grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_grd */ dg_nd=(int *)nco_malloc(dg_nbr*npe_nbr*nco_typ_lng(NC_INT)); dgx=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); dgy=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); fc_nd=(int *)nco_malloc(fc_nbr*npf_nbr*nco_typ_lng(NC_INT)); fcx=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); fcy=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); ndx=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); ndy=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); const long int idx_fst_crn_ll=0; const long int idx_fst_crn_lr=1; const long int idx_fst_crn_ur=2; const long int idx_fst_crn_ul=3; /* Node Ordering: Each interior face requires one new node Node 0 at SP New latitude row moves next node North Add nodes to run West->East */ /* SP */ ndx[0]=lon_crn[0]; /* Longitude degenerate at SP, NP, keep same longitude as corner array */ ndy[0]=lat_crn[0]; /* Mid */ for(nd_idx=1;nd_idx<nd_nbr-1L;nd_idx++){ fc_idx=nd_idx-1L; lat_idx=fc_idx/lon_nbr; lon_idx=fc_idx%lon_nbr; ndx[nd_idx]=lon_crn[lon_idx*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_idx]=lat_crn[lat_idx*grd_crn_nbr+idx_fst_crn_ul]; } /* !nd_idx */ /* NP */ ndx[nd_nbr-1L]=lon_crn[(lon_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_nbr-1L]=lat_crn[(lat_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; /* Edge Ordering: epf_nbr is number of distinct edges-per-face (incremental, for interior cells) Each additional interior rectangular gridcell requires two new edges: Edge 0 runs South->North for all cells Edge 1 runs West->East for all cells NP row requires only one new edge per face */ /* SP */ const int epf_nbr=2; /* [nbr] Number of distinct edges-per-face (incremental, for interior cells) */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<(lat_nbr-1L)*lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ /* Only one new edge per face in last row, easiest to count backwards from last edge */ dg_idx=dg_nbr-(fc_nbr-fc_idx); /* NP faces require only only one new edge, Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+nd_nbr-1L; } /* !fc_idx */ /* SP */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+0L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+fc_idx+2L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+2L]=srt_idx+fc_idx+1L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<fc_nbr-lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+idx_fst_crn_ll]=srt_idx+fc_idx-lon_nbr+1L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_lr]=srt_idx+fc_idx-lon_nbr+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ur]=srt_idx+fc_idx+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ul]=srt_idx+fc_idx+1L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-2L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-1L; fc_nd[fc_idx*npf_nbr+2L]=srt_idx+nd_nbr-1L; fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Characteristic coordinates */ for(dg_idx=0;dg_idx<dg_nbr-1L;dg_idx++){ idx=dg_idx*npe_nbr; dgx[dg_idx]=0.5*(ndx[dg_nd[idx+0L]]+ndx[dg_nd[idx+1L]]); dgy[dg_idx]=0.5*(ndy[dg_nd[idx+0L]]+ndy[dg_nd[idx+1L]]); } /* !dg_idx */ /* Degenerate longitude at SP, NP, causes weird characterisic longitude unless special care taken */ for(fc_idx=0;fc_idx<fc_nbr-1L;fc_idx++){ idx=fc_idx*npf_nbr; if(fc_idx < lon_nbr){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]); }else if(fc_idx >= fc_nbr-lon_nbr-1){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]); }else if(fc_nd[idx+3L] != mss_val_int_out){ /* fxm for fcx use nco_lon_crn_avg_brnch() and 3-node version too */ fcx[fc_idx]=0.25*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]+ndx[fc_nd[idx+3L]]); }else{ abort(); } /* !fc_idx */ if(fc_nd[idx+3L] != mss_val_int_out) fcy[fc_idx]=0.25*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]+ndy[fc_nd[idx+3L]]); else fcy[fc_idx]=0.33*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]); } /* !fc_idx */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id); rcd=nco_def_dim(out_id,dg_dmn_nm,dg_nbr,&dmn_id_dg); rcd=nco_def_dim(out_id,fc_dmn_nm,fc_nbr,&dmn_id_fc); rcd=nco_def_dim(out_id,nd_dmn_nm,nd_nbr,&dmn_id_nd); rcd=nco_def_dim(out_id,npe_dmn_nm,npe_nbr,&dmn_id_npe); rcd=nco_def_dim(out_id,npf_dmn_nm,npf_nbr,&dmn_id_npf); dmn_ids[0]=dmn_id_dg; dmn_ids[1]=dmn_id_npe; rcd=nco_def_var(out_id,dg_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&dg_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dg_nd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_fc; dmn_ids[1]=dmn_id_npf; rcd=nco_def_var(out_id,fc_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&fc_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fc_nd_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,msh_nm,(nc_type)NC_INT,dmn_nbr_0D,(int *)NULL,&msh_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msh_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndx_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndy_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgx_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgy_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcx_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcy_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcy_id,shuffle,deflate,dfl_lvl); if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO constructed this UGRID grid from scratch"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6, UGRID-1.0"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,msh_nm,"cf_role","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"standard_name","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"long_name","Topology data"); att_nm=strdup("topology_dimension"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=msh_nm; aed_mtd.id=msh_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_two; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,msh_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); aed_mtd.sz=strlen(ndx_nm)+strlen(ndy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",ndx_nm,ndy_nm); rcd=nco_char_att_put(out_id,msh_nm,"node_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_node_connectivity",fc_nd_nm); aed_mtd.sz=strlen(fcx_nm)+strlen(fcy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",fcx_nm,fcy_nm); rcd=nco_char_att_put(out_id,msh_nm,"face_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_dimension",fc_dmn_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_node_connectivity",dg_nd_nm); aed_mtd.sz=strlen(dgx_nm)+strlen(dgy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",dgx_nm,dgy_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"edge_dimension",dg_dmn_nm); rcd=nco_char_att_put(out_id,ndx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,ndx_nm,"long_name","Longitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,ndy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,ndy_nm,"long_name","Latitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,dg_nd_nm,"cf_role","edge_node_connectivity"); rcd=nco_char_att_put(out_id,dg_nd_nm,"long_name","Maps every edge to the two nodes that it connects"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=dg_nd_nm; aed_mtd.id=dg_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,dg_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,fc_nd_nm,"cf_role","face_node_connectivity"); rcd=nco_char_att_put(out_id,fc_nd_nm,"long_name","Maps every face to its corner nodes"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); att_nm=strdup("_FillValue"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&mss_val_int_out; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,dgx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,dgx_nm,"long_name","Characteristic longitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,dgy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,dgy_nm,"long_name","Characteristic latitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,fcx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,fcx_nm,"long_name","Characteristic longitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,fcy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,fcy_nm,"long_name","Characteristic latitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcy_nm,"units","degrees_north"); /* Begin data mode */ (void)nco_enddef(out_id); (void)nco_put_vara(out_id,msh_id,dmn_srt,dmn_cnt,&msh_val,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=dg_nbr; dmn_cnt[1]=epf_nbr; (void)nco_put_vara(out_id,dg_nd_id,dmn_srt,dmn_cnt,dg_nd,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=fc_nbr; dmn_cnt[1]=npf_nbr; (void)nco_put_vara(out_id,fc_nd_id,dmn_srt,dmn_cnt,fc_nd,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndx_id,dmn_srt,dmn_cnt,ndx,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndy_id,dmn_srt,dmn_cnt,ndy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=dg_nbr; (void)nco_put_vara(out_id,dgx_id,dmn_srt,dmn_cnt,dgx,crd_typ); (void)nco_put_vara(out_id,dgy_id,dmn_srt,dmn_cnt,dgy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=fc_nbr; (void)nco_put_vara(out_id,fcx_id,dmn_srt,dmn_cnt,fcx,crd_typ); (void)nco_put_vara(out_id,fcy_id,dmn_srt,dmn_cnt,fcy,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Free memory associated with output file */ if(dgx) dgx=(double *)nco_free(dgx); if(dgy) dgy=(double *)nco_free(dgy); if(dg_nd) dg_nd=(int *)nco_free(dg_nd); if(fcx) fcx=(double *)nco_free(fcx); if(fcy) fcy=(double *)nco_free(fcy); if(fc_nd) fc_nd=(int *)nco_free(fc_nd); if(ndx) ndx=(double *)nco_free(ndx); if(ndy) ndy=(double *)nco_free(ndy); /* Free strings */ if(dgx_nm) dgx_nm=(char *)nco_free(dgx_nm); if(dgy_nm) dgy_nm=(char *)nco_free(dgy_nm); if(dg_dmn_nm) dg_dmn_nm=(char *)nco_free(dg_dmn_nm); if(dg_nd_nm) dg_nd_nm=(char *)nco_free(dg_nd_nm); if(fcx_nm) fcx_nm=(char *)nco_free(fcx_nm); if(fcy_nm) fcy_nm=(char *)nco_free(fcy_nm); if(fc_dmn_nm) fc_dmn_nm=(char *)nco_free(fc_dmn_nm); if(fc_nd_nm) fc_nd_nm=(char *)nco_free(fc_nd_nm); if(msh_nm) msh_nm=(char *)nco_free(msh_nm); if(nd_dmn_nm) nd_dmn_nm=(char *)nco_free(nd_dmn_nm); if(ndx_nm) ndx_nm=(char *)nco_free(ndx_nm); if(ndy_nm) ndy_nm=(char *)nco_free(ndy_nm); if(npe_dmn_nm) npe_dmn_nm=(char *)nco_free(npe_dmn_nm); if(npf_dmn_nm) npf_dmn_nm=(char *)nco_free(npf_dmn_nm); } /* !fl_ugrid */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); /* Free strings */ if(col_dmn_nm) col_dmn_nm=(char *)nco_free(col_dmn_nm); if(lat_dmn_nm) lat_dmn_nm=(char *)nco_free(lat_dmn_nm); if(lon_dmn_nm) lon_dmn_nm=(char *)nco_free(lon_dmn_nm); if(bnd_dmn_nm) bnd_dmn_nm=(char *)nco_free(bnd_dmn_nm); if(lat_nm_in) lat_nm_in=(char *)nco_free(lat_nm_in); if(lon_nm_in) lon_nm_in=(char *)nco_free(lon_nm_in); if(lat_bnd_nm) lat_bnd_nm=(char *)nco_free(lat_bnd_nm); if(lon_bnd_nm) lon_bnd_nm=(char *)nco_free(lon_bnd_nm); if(area_nm_in) area_nm_in=(char *)nco_free(area_nm_in); if(msk_nm_in) msk_nm_in=(char *)nco_free(msk_nm_in); return rcd; } /* !nco_grd_nfr() */ double /* O [dgr] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_dgr /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [dgr] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [dgr] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in degrees Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_dgr()"; const double lon_dff=lon_r-lon_l; /* [dgr] Longitude difference (lon_r-lon_l) */ if(lon_dff >= 180.0){ (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-360.0; }else if(lon_dff <= -180.0){ return lon_dff+360.0; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_dgr() */ double /* O [rdn] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_rdn /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [rdn] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [rdn] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in radians Assume longitudes are within pi radians of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_rdn()"; const double lon_dff=lon_r-lon_l; /* [rdn] Longitude difference (lon_r-lon_l) */ //nco_bool dbg_prn=False; /* [flg] Print warning when longitude difference is suspicious */ /* longitudes on different branch cuts are expected when computing polygon area, so warn only if requested with high debugging level */ if(lon_dff >= M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-M_PI-M_PI; }else if(lon_dff <= -M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff+M_PI+M_PI; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_rdn() */ double /* O [dgr] Longitude average */ nco_lon_crn_avg_brnch /* [fnc] Average quadrilateral longitude with branch-cut rules */ (double lon_ll, /* I [dgr] Longitude at lower left of gridcell */ double lon_lr, /* I [dgr] Longitude at lower right of gridcell */ double lon_ur, /* I [dgr] Longitude at upper right of gridcell */ double lon_ul) /* I [dgr] Longitude at upper left of gridcell */ { /* Purpose: Return average of four corner longitudes of quadrilateral Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ const char fnc_nm[]="nco_lon_crn_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ lon_dff=lon_lr-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_lr, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_lr,lon_ll,lon_dff); lon_lr-=360.0; }else if(lon_dff <= -180.0){ lon_lr+=360.0; } /* !lon_dff */ lon_dff=lon_ur-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ur, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ur,lon_ll,lon_dff); lon_ur-=360.0; }else if(lon_dff <= -180.0){ lon_ur+=360.0; } /* !lon_dff */ lon_dff=lon_ul-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ul, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ul,lon_ll,lon_dff); lon_ul-=360.0; }else if(lon_dff <= -180.0){ lon_ul+=360.0; } /* !lon_dff */ return 0.25*(lon_ll+lon_lr+lon_ur+lon_ul); } /* !nco_lon_crn_avg_brnch() */ double /* O [dgr] Longitude average */ nco_lon_ply_avg_brnch_dgr /* [fnc] Average polygon longitude with branch-cut rules */ (double *lon_crn, /* I [dgr] Longitude of gridcell corners */ long lon_nbr) /* I [nbr] Number of vertices in polygon */ { /* Purpose: Return average longitude of polygon vertices, i.e., centroid longitude Assume longitudes are within 180 degrees of one another Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ // const char fnc_nm[]="nco_lon_ply_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ double lon_avg; /* [dgr] Longitude average */ int lon_idx; /* [idx] Polygon vertex index */ assert(lon_nbr != 0); lon_avg=lon_crn[0]; for(lon_idx=1;lon_idx<lon_nbr;lon_idx++){ lon_avg+=lon_crn[lon_idx]; lon_dff=lon_crn[lon_idx]-lon_crn[0]; if(lon_dff >= 180.0){ lon_avg-=360.0; }else if(lon_dff <= -180.0){ lon_avg+=360.0; } /* !lon_dff */ } /* !lon_idx */ return lon_avg/lon_nbr; } /* !nco_lon_ply_avg_brnch() */ nco_bool /* O [flg] Input corners were CCW */ nco_ccw_chk /* [fnc] Convert quadrilateral gridcell corners to CCW orientation */ (double * const crn_lat, /* [dgr] Latitude corners of gridcell */ double * const crn_lon, /* [dgr] Latitude corners of gridcell */ const int crn_nbr, /* [nbr] Number of corners per gridcell */ int idx_ccw, /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl) /* [nbr] Recursion level */ { /* Purpose: Determine whether corner vertices are oriented CCW If not, alter order so they are returned in CCW order Function can call itself, and rcr_lvl indicates recursion level: rcr_lvl=1: Called by host code, i.e., nco_grd_nfr() rcr_lvl=2: Called by itself, i.e., nco_ccw_chk() Assumptions: Quadrilateral vertices are already corrected to obey branch-cut rules, i.e., all vertices are on "same side" of dateline or Greenwich as appropriate Algorithm: Start crn_idx=0, i.e., quadrilateral LL corner Vector A runs from crn_idx=0 to crn_idx=1, i.e., quadrilateral LL->LR Vector B runs from crn_idx=1 to crn_idx=2, i.e., quadrilateral LR->UR Compute cross-product A x B = C C is normal to plane containining A and B Dot-product of C with radial vector to head A = tail B is positive if A and B are CCW if(ABC is CCW){ if(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else(ABC is not CCW){ Assume entire quadrilateral is CW Take mirror image of quadrilateral by switching B with D If(new ABC is CCW){ If(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else{ Fail (return False, meaning point should be masked) } All cases return True (i.e., CCW) from rcr_lvl=1 except last Last case returns False, and calling code should mask such an aberrant point */ const char fnc_nm[]="nco_ccw_chk()"; /* MSVC compiler chokes unless array size is compile-time constant */ const int CRN_NBR_MSVC=4; double sin_lat[CRN_NBR_MSVC]; double sin_lon[CRN_NBR_MSVC]; double cos_lat[CRN_NBR_MSVC]; double cos_lon[CRN_NBR_MSVC]; double A_tail_x,A_tail_y,A_tail_z; double A_head_x,A_head_y,A_head_z; double A_x,A_y,A_z; double B_tail_x,B_tail_y,B_tail_z; double B_head_x,B_head_y,B_head_z; double B_x,B_y,B_z; double C_x,C_y,C_z; double R_x,R_y,R_z; double lat_rdn; double lon_rdn; double dot_prd; int crn_idx; /* [idx] Corner idx */ int A_tail_idx,A_head_idx; int B_tail_idx,B_head_idx; nco_bool flg_ccw; /* [flg] Input is CCW */ assert(crn_nbr == CRN_NBR_MSVC); for(crn_idx=0;crn_idx<crn_nbr;crn_idx++){ lat_rdn=crn_lat[crn_idx]*M_PI/180.0; lon_rdn=crn_lon[crn_idx]*M_PI/180.0; sin_lat[crn_idx]=sin(lat_rdn); cos_lat[crn_idx]=cos(lat_rdn); sin_lon[crn_idx]=sin(lon_rdn); cos_lon[crn_idx]=cos(lon_rdn); } /* !crn_idx */ /* Calls from host code (i.e., nco_grd_nfr()) start at lower-left of quadrilateral ABCD = Point A = vertex 0 Calls from self can start from quadrilateral Point A or C To check triangle CDA, start at upper-right of quadrilateral ABCD = Point C = vertex 2 */ A_tail_idx=idx_ccw; A_head_idx=B_tail_idx=(A_tail_idx+1)%crn_nbr; B_head_idx=(B_tail_idx+1)%crn_nbr; A_tail_x=cos_lat[A_tail_idx]*cos_lon[A_tail_idx]; A_tail_y=cos_lat[A_tail_idx]*sin_lon[A_tail_idx]; A_tail_z=sin_lat[A_tail_idx]; A_head_x=B_tail_x=R_x=cos_lat[A_head_idx]*cos_lon[A_head_idx]; A_head_y=B_tail_y=R_y=cos_lat[A_head_idx]*sin_lon[A_head_idx]; A_head_z=B_tail_z=R_z=sin_lat[A_head_idx]; B_head_x=cos_lat[B_head_idx]*cos_lon[B_head_idx]; B_head_y=cos_lat[B_head_idx]*sin_lon[B_head_idx]; B_head_z=sin_lat[B_head_idx]; A_x=A_head_x-A_tail_x; A_y=A_head_y-A_tail_y; A_z=A_head_z-A_tail_z; B_x=B_head_x-B_tail_x; B_y=B_head_y-B_tail_y; B_z=B_head_z-B_tail_z; /* Cross-Product C = A x B */ C_x=A_y*B_z-B_y*A_z; C_y=-A_x*B_z+B_x*A_z; C_z=A_x*B_y-B_x*A_y; /* Dot-Product R dot C */ dot_prd=C_x*R_x+C_y*R_y+C_z*R_z; if(dot_prd > 0.0) flg_ccw=True; else flg_ccw=False; if(flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC is and CDA is not CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; }else if(!flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is not CCW 20160124: Simplistic fix: reverse gridpoint order This only works for quadrilaterals without degenerate points */ double crn_tmp; if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is non-CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Mirror-imaging...\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); crn_tmp=crn_lat[1]; crn_lat[1]=crn_lat[3]; crn_lat[3]=crn_tmp; crn_tmp=crn_lon[1]; crn_lon[1]=crn_lon[3]; crn_lon[3]=crn_tmp; /* Check new triangle ABC */ idx_ccw=0; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ /* Inverted ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ return True; }else{ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is CCW after inversion, but triangle CDA is not at quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; } /* flg_ccw */ }else{ /* Original and Inverted ABC are not CCW */ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC remains non-CCW after first inversion\n",nco_prg_nm_get(),fnc_nm); return False; } /* !flg_ccw */ } /* flg_ccw */ return flg_ccw; } /* !nco_ccw_chk() */
ChConstraintRigidRigid.h
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2016 projectchrono.org // All right reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Hammad Mazhar // ============================================================================= // // Description: This class handles rigid contact and computes corrections // and jacobians // ============================================================================= #pragma once #include "chrono_parallel/ChDataManager.h" #include "chrono_parallel/math/ChParallelMath.h" namespace chrono { class CH_PARALLEL_API ChConstraintRigidRigid { public: ChConstraintRigidRigid() { data_manager = 0; offset = 3; inv_h = inv_hpa = inv_hhpa = 0; } ~ChConstraintRigidRigid() {} void Setup(ChParallelDataManager* data_container_) { data_manager = data_container_; uint num_contacts = data_manager->num_rigid_contacts; inv_h = 1 / data_manager->settings.step_size; inv_hpa = 1 / (data_manager->settings.step_size + data_manager->settings.solver.alpha); inv_hhpa = inv_h * inv_hpa; if (num_contacts > 0) { contact_active_pairs.resize(int(num_contacts)); data_manager->host_data.coh_rigid_rigid.resize(num_contacts); data_manager->host_data.fric_rigid_rigid.resize(num_contacts); rotated_point_a.resize(num_contacts); rotated_point_b.resize(num_contacts); quat_a.resize(num_contacts); quat_b.resize(num_contacts); #pragma omp parallel for for (int i = 0; i < (signed)num_contacts; i++) { vec2 body = data_manager->host_data.bids_rigid_rigid[i]; uint b1 = body.x; uint b2 = body.y; contact_active_pairs[i] = bool2(data_manager->host_data.active_rigid[b1] != 0, data_manager->host_data.active_rigid[b2] != 0); ////real coh = Max( //// (data_manager->host_data.cohesion_data[b1] + data_manager->host_data.cohesion_data[b2]) * .5, 0.0); real coh = Min(data_manager->host_data.cohesion_data[b1], data_manager->host_data.cohesion_data[b2]); data_manager->host_data.coh_rigid_rigid[i] = coh; real3 f_a = data_manager->host_data.fric_data[b1]; real3 f_b = data_manager->host_data.fric_data[b2]; real3 mu; mu.x = (f_a.x == 0 || f_b.x == 0) ? 0 : (f_a.x + f_b.x) * .5; mu.y = (f_a.y == 0 || f_b.y == 0) ? 0 : (f_a.y + f_b.y) * .5; mu.z = (f_a.z == 0 || f_b.z == 0) ? 0 : (f_a.z + f_b.z) * .5; data_manager->host_data.fric_rigid_rigid[i] = mu; { quaternion quaternion_conjugate = ~data_manager->host_data.rot_rigid[b1]; real3 sbar = Rotate(data_manager->host_data.cpta_rigid_rigid[i] - data_manager->host_data.pos_rigid[b1], quaternion_conjugate); rotated_point_a[i] = real3_int(sbar, b1); quat_a[i] = quaternion_conjugate; } { quaternion quaternion_conjugate = ~data_manager->host_data.rot_rigid[b2]; real3 sbar = Rotate(data_manager->host_data.cptb_rigid_rigid[i] - data_manager->host_data.pos_rigid[b2], quaternion_conjugate); rotated_point_b[i] = real3_int(sbar, b2); quat_b[i] = quaternion_conjugate; } } } } void Project(real* gamma); void Project_Single(int index, real* gamma); void host_Project_single(int index, vec2* ids, real3* friction, real* cohesion, real* gamma); void func_Project_normal(int index, const vec2* ids, const real* cohesion, real* gam); void func_Project_sliding(int index, const vec2* ids, const real3* fric, const real* cohesion, real* gam); void func_Project_spinning(int index, const vec2* ids, const real3* fric, real* gam); void Dx(const DynamicVector<real>& x, DynamicVector<real>& output); void D_Tx(const DynamicVector<real>& x, DynamicVector<real>& output); // Compute the vector of corrections void Build_b(); // Compute the diagonal compliance matrix void Build_E(); // Compute the jacobian matrix, no allocation is performed here, // GenerateSparsity should take care of that void Build_D(); void Build_s(); // Fill-in the non zero entries in the bilateral jacobian with ones. // This operation is sequential. void GenerateSparsity(); int offset; protected: custom_vector<bool2> contact_active_pairs; real inv_h; real inv_hpa; real inv_hhpa; custom_vector<real3_int> rotated_point_a, rotated_point_b; custom_vector<quaternion> quat_a, quat_b; // Pointer to the system's data manager ChParallelDataManager* data_manager; }; }
project.c
//----------------------------------------------------------------------------- // project.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/19/14 (Build 5.1.000) // 04/14/14 (Build 5.1.004) // 09/15/14 (Build 5.1.007) // 03/19/15 (Build 5.1.008) // 04/30/15 (Build 5.1.009) // 08/01/16 (Build 5.1.011) // 03/14/17 (Build 5.1.012) // 05/10/18 (Build 5.1.013) // Author: L. Rossman // // Project management functions. // // This module provides project-related services such as: // o opening a new project and reading its input data // o allocating and freeing memory for project objects // o setting default values for object properties and options // o initializing the internal state of all objects // o managing hash tables for identifying objects by ID name // // Build 5.1.004: // - Ignore RDII option added. // // Build 5.1.007: // - Default monthly adjustments for climate variables included. // - User-supplied GW flow equations initialized to NULL. // - Storage node exfiltration object initialized to NULL. // - Freeing of memory used for storage node exfiltration included. // // Build 5.1.008: // - Constants used for dynamic wave routing moved to dynwave.c. // - Input processing of minimum time step & number of // parallel threads for dynamic wave routing added. // - Default values of hyd. conductivity adjustments added. // - Freeing of memory used for outfall pollutant load added. // // Build 5.1.009: // - Fixed bug in computing total duration introduced in 5.1.008. // // Build 5.1.011: // - Memory management of hydraulic event dates array added. // // Build 5.1.012: // - Minimum conduit slope option initialized to 0 (none). // - NO/YES no longer accepted as options for NORMAL_FLOW_LIMITED. // // Build 5.1.013: // - omp_get_num_threads function protected against lack of compiler // support for OpenMP. // - Rain gage validation now performed after subcatchment validation. // - More robust parsing of MinSurfarea option provided. // - Support added for new RuleStep analysis option. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> #include <string.h> #include <stdlib.h> #include <math.h> #include "autocache.h" #if defined(_OPENMP) //(5.1.013) #include <omp.h> // #else // int omp_get_num_threads(void) { return 1;} // #endif // #include "headers.h" #include "lid.h" #include "hash.h" #include "mempool.h" //----------------------------------------------------------------------------- // Shared variables //----------------------------------------------------------------------------- static HTtable* Htable[MAX_OBJ_TYPES]; // Hash tables for object ID names static char MemPoolAllocated; // TRUE if memory pool allocated //----------------------------------------------------------------------------- // External Functions (declared in funcs.h) //----------------------------------------------------------------------------- // project_open (called from swmm_open in swmm5.c) // project_close (called from swmm_close in swmm5.c) // project_readInput (called from swmm_open in swmm5.c) // project_readOption (called from readOption in input.c) // project_validate (called from swmm_open in swmm5.c) // project_init (called from swmm_start in swmm5.c) // project_addObject (called from addObject in input.c) // project_createMatrix (called from openFileForInput in iface.c) // project_freeMatrix (called from iface_closeRoutingFiles) // project_findObject // project_findID //----------------------------------------------------------------------------- // Function declarations //----------------------------------------------------------------------------- static void initPointers(void); static void setDefaults(void); static void openFiles(char *f1, char *f2, char *f3); static void createObjects(void); static void deleteObjects(void); static void createHashTables(void); static void deleteHashTables(void); //============================================================================= void project_open(char *f1, char *f2, char *f3) // // Input: f1 = pointer to name of input file // f2 = pointer to name of report file // f3 = pointer to name of binary output file // Output: none // Purpose: opens a new SWMM project. // { initPointers(); setDefaults(); openFiles(f1, f2, f3); } //============================================================================= void project_readInput() // // Input: none // Output: none // Purpose: retrieves project data from input file. // { // --- create hash tables for fast retrieval of objects by ID names createHashTables(); // --- count number of objects in input file and create them input_countObjects(); createObjects(); // --- read project data from input file input_readData(); if ( ErrorCode ) return; // --- establish starting & ending date/time StartDateTime = StartDate + StartTime; EndDateTime = EndDate + EndTime; ReportStart = ReportStartDate + ReportStartTime; ReportStart = MAX(ReportStart, StartDateTime); // --- check for valid starting & ending date/times if ( EndDateTime <= StartDateTime ) { report_writeErrorMsg(ERR_START_DATE, ""); } else if ( EndDateTime <= ReportStart ) { report_writeErrorMsg(ERR_REPORT_DATE, ""); } else { // --- compute total duration of simulation in seconds double durationDate = EndDate - StartDate; double durationTime = EndTime - StartTime; TotalDuration = floor(durationDate * SECperDAY + durationTime * SECperDAY); // --- reporting step must be <= total duration if ( (double)ReportStep > TotalDuration ) { ReportStep = (int)(TotalDuration); } // --- reporting step can't be < routing step if ( (double)ReportStep < RouteStep ) { report_writeErrorMsg(ERR_REPORT_STEP, ""); } // --- convert total duration to milliseconds TotalDuration *= 1000.0; } } //============================================================================= void project_validate() // // Input: none // Output: none // Purpose: checks validity of project data. // { int i; int j; int err; // --- validate Curves and TimeSeries for ( i=0; i<Nobjects[CURVE]; i++ ) { err = table_validate(&Curve[i]); if ( err ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID); } for ( i=0; i<Nobjects[TSERIES]; i++ ) { err = table_validate(&Tseries[i]); if ( err ) report_writeTseriesErrorMsg(err, &Tseries[i]); } // --- validate hydrology objects // (NOTE: order is important !!!!) climate_validate(); lid_validate(); if ( Nobjects[SNOWMELT] == 0 ) IgnoreSnowmelt = TRUE; if ( Nobjects[AQUIFER] == 0 ) IgnoreGwater = TRUE; for ( i=0; i<Nobjects[AQUIFER]; i++ ) gwater_validateAquifer(i); for ( i=0; i<Nobjects[SUBCATCH]; i++ ) subcatch_validate(i); for ( i=0; i<Nobjects[GAGE]; i++ ) gage_validate(i); //(5.1.013) for ( i=0; i<Nobjects[SNOWMELT]; i++ ) snow_validateSnowmelt(i); // --- compute geometry tables for each shape curve j = 0; for ( i=0; i<Nobjects[CURVE]; i++ ) { if ( Curve[i].curveType == SHAPE_CURVE ) { Curve[i].refersTo = j; Shape[j].curve = i; if ( !shape_validate(&Shape[j], &Curve[i]) ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID); j++; } } // --- validate links before nodes, since the latter can // result in adjustment of node depths for ( i=0; i<Nobjects[NODE]; i++) Node[i].oldDepth = Node[i].fullDepth; for ( i=0; i<Nobjects[LINK]; i++) link_validate(i); for ( i=0; i<Nobjects[NODE]; i++) node_validate(i); // --- adjust time steps if necessary if ( DryStep < WetStep ) { report_writeWarningMsg(WARN06, ""); DryStep = WetStep; } if ( RouteStep > (double)WetStep ) { report_writeWarningMsg(WARN07, ""); RouteStep = WetStep; } // --- adjust individual reporting flags to match global reporting flag if ( RptFlags.subcatchments == ALL ) for (i=0; i<Nobjects[SUBCATCH]; i++) Subcatch[i].rptFlag = TRUE; if ( RptFlags.nodes == ALL ) for (i=0; i<Nobjects[NODE]; i++) Node[i].rptFlag = TRUE; if ( RptFlags.links == ALL ) for (i=0; i<Nobjects[LINK]; i++) Link[i].rptFlag = TRUE; // --- validate dynamic wave options if ( RouteModel == DW ) dynwave_validate(); // --- adjust number of parallel threads to be used //(5.1.013) #pragma omp parallel //(5.1.008) { if ( NumThreads == 0 ) NumThreads = omp_get_num_threads(); //(5.1.008) else NumThreads = MIN(NumThreads, omp_get_num_threads()); //(5.1.008) } if ( Nobjects[LINK] < 4 * NumThreads ) NumThreads = 1; //(5.1.008) } //============================================================================= void project_close() // // Input: none // Output: none // Purpose: closes a SWMM project. // { deleteObjects(); deleteHashTables(); } //============================================================================= int project_init(void) // // Input: none // Output: returns an error code // Purpose: initializes the internal state of all objects. // { int j; climate_initState(); lid_initState(); for (j=0; j<Nobjects[TSERIES]; j++) table_tseriesInit(&Tseries[j]); for (j=0; j<Nobjects[GAGE]; j++) gage_initState(j); for (j=0; j<Nobjects[SUBCATCH]; j++) subcatch_initState(j); for (j=0; j<Nobjects[NODE]; j++) node_initState(j); for (j=0; j<Nobjects[LINK]; j++) link_initState(j); return ErrorCode; } //============================================================================= int project_addObject(int type, char *id, int n) // // Input: type = object type // id = object ID string // n = object index // Output: returns 0 if object already added, 1 if not, -1 if hashing fails // Purpose: adds an object ID to a hash table // { int result; int len; char *newID; // --- do nothing if object already placed in hash table if ( project_findObject(type, id) >= 0 ) return 0; // --- use memory from the hash tables' common memory pool to store // a copy of the object's ID string len = strlen(id) + 1; newID = (char *) Alloc(len*sizeof(char)); strcpy(newID, id); // --- insert object's ID into the hash table for that type of object result = HTinsert(Htable[type], newID, n); if ( result == 0 ) result = -1; return result; } //============================================================================= int project_findObject(int type, char *id) // // Input: type = object type // id = object ID // Output: returns index of object with given ID, or -1 if ID not found // Purpose: uses hash table to find index of an object with a given ID. // { return HTfind(Htable[type], id); } //============================================================================= char *project_findID(int type, char *id) // // Input: type = object type // id = ID name being sought // Output: returns pointer to location where object's ID string is stored // Purpose: uses hash table to find address of given string entry. // { return HTfindKey(Htable[type], id); } //============================================================================= double ** project_createMatrix(int nrows, int ncols) // // Input: nrows = number of rows (0-based) // ncols = number of columns (0-based) // Output: returns a pointer to a matrix // Purpose: allocates memory for a matrix of doubles. // { int i,j; double **a; // --- allocate pointers to rows a = (double **) malloc(nrows * sizeof(double *)); if ( !a ) return NULL; // --- allocate rows and set pointers to them a[0] = (double *) malloc (nrows * ncols * sizeof(double)); if ( !a[0] ) return NULL; for ( i = 1; i < nrows; i++ ) a[i] = a[i-1] + ncols; for ( i = 0; i < nrows; i++) { for ( j = 0; j < ncols; j++) a[i][j] = 0.0; } // --- return pointer to array of pointers to rows return a; } //============================================================================= void project_freeMatrix(double **a) // // Input: a = matrix of floats // Output: none // Purpose: frees memory allocated for a matrix of doubles. // { if ( a != NULL ) { if ( a[0] != NULL ) free( a[0] ); free( a ); } } //============================================================================= int project_readOption(char* s1, char* s2) // // Input: s1 = option keyword // s2 = string representation of option's value // Output: returns error code // Purpose: reads a project option from a pair of string tokens. // // NOTE: all project options have default values assigned in setDefaults(). // { int k, m, h, s; double tStep; char strDate[25]; DateTime aTime; DateTime aDate; // --- determine which option is being read k = findmatch(s1, OptionWords); if ( k < 0 ) return error_setInpError(ERR_KEYWORD, s1); switch ( k ) { // --- choice of flow units case FLOW_UNITS: m = findmatch(s2, FlowUnitWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); FlowUnits = m; if ( FlowUnits <= MGD ) UnitSystem = US; else UnitSystem = SI; break; // --- choice of infiltration modeling method case INFIL_MODEL: m = findmatch(s2, InfilModelWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); InfilModel = m; break; // --- choice of flow routing method case ROUTE_MODEL: m = findmatch(s2, RouteModelWords); if ( m < 0 ) m = findmatch(s2, OldRouteModelWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); if ( m == NO_ROUTING ) IgnoreRouting = TRUE; else RouteModel = m; if ( RouteModel == EKW ) RouteModel = KW; break; // --- simulation start date case START_DATE: if ( !datetime_strToDate(s2, &StartDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation start time of day case START_TIME: if ( !datetime_strToTime(s2, &StartTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation ending date case END_DATE: if ( !datetime_strToDate(s2, &EndDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation ending time of day case END_TIME: if ( !datetime_strToTime(s2, &EndTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- reporting start date case REPORT_START_DATE: if ( !datetime_strToDate(s2, &ReportStartDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- reporting start time of day case REPORT_START_TIME: if ( !datetime_strToTime(s2, &ReportStartTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- day of year when street sweeping begins or when it ends // (year is arbitrarily set to 1947 so that the dayOfYear // function can be applied) case SWEEP_START: case SWEEP_END: strcpy(strDate, s2); strcat(strDate, "/1947"); if ( !datetime_strToDate(strDate, &aDate) ) { return error_setInpError(ERR_DATETIME, s2); } m = datetime_dayOfYear(aDate); if ( k == SWEEP_START ) SweepStart = m; else SweepEnd = m; break; // --- number of antecedent dry days case START_DRY_DAYS: StartDryDays = atof(s2); if ( StartDryDays < 0.0 ) { return error_setInpError(ERR_NUMBER, s2); } break; // --- runoff or reporting time steps // (input is in hrs:min:sec format, time step saved as seconds) case WET_STEP: case DRY_STEP: case REPORT_STEP: case RULE_STEP: //(5.1.013) if ( !datetime_strToTime(s2, &aTime) ) { return error_setInpError(ERR_DATETIME, s2); } datetime_decodeTime(aTime, &h, &m, &s); h += 24*(int)aTime; s = s + 60*m + 3600*h; // --- RuleStep allowed to be 0 while other time steps must be > 0 //(5.1.013) if (k == RULE_STEP) // { // if (s < 0) return error_setInpError(ERR_NUMBER, s2); // } // else if ( s <= 0 ) return error_setInpError(ERR_NUMBER, s2); // switch ( k ) { case WET_STEP: WetStep = s; break; case DRY_STEP: DryStep = s; break; case REPORT_STEP: ReportStep = s; break; case RULE_STEP: RuleStep = s; break; //(5.1.013) } break; // --- type of damping applied to inertial terms of dynamic wave routing case INERT_DAMPING: m = findmatch(s2, InertDampingWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); else InertDamping = m; break; // --- Yes/No options (NO = 0, YES = 1) case ALLOW_PONDING: case SLOPE_WEIGHTING: case SKIP_STEADY_STATE: case IGNORE_RAINFALL: case IGNORE_SNOWMELT: case IGNORE_GWATER: case IGNORE_ROUTING: case IGNORE_QUALITY: case IGNORE_RDII: m = findmatch(s2, NoYesWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); switch ( k ) { case ALLOW_PONDING: AllowPonding = m; break; case SLOPE_WEIGHTING: SlopeWeighting = m; break; case SKIP_STEADY_STATE: SkipSteadyState = m; break; case IGNORE_RAINFALL: IgnoreRainfall = m; break; case IGNORE_SNOWMELT: IgnoreSnowmelt = m; break; case IGNORE_GWATER: IgnoreGwater = m; break; case IGNORE_ROUTING: IgnoreRouting = m; break; case IGNORE_QUALITY: IgnoreQuality = m; break; case IGNORE_RDII: IgnoreRDII = m; break; } break; case NORMAL_FLOW_LTD: m = findmatch(s2, NormalFlowWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); NormalFlowLtd = m; break; case FORCE_MAIN_EQN: m = findmatch(s2, ForceMainEqnWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); ForceMainEqn = m; break; case LINK_OFFSETS: m = findmatch(s2, LinkOffsetWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); LinkOffsets = m; break; // --- compatibility option for selecting solution method for // dynamic wave flow routing (NOT CURRENTLY USED) case COMPATIBILITY: if ( strcomp(s2, "3") ) Compatibility = SWMM3; else if ( strcomp(s2, "4") ) Compatibility = SWMM4; else if ( strcomp(s2, "5") ) Compatibility = SWMM5; else return error_setInpError(ERR_KEYWORD, s2); break; // --- routing or lengthening time step (in decimal seconds) // (lengthening time step is used in Courant stability formula // to artificially lengthen conduits for dynamic wave flow routing // (a value of 0 means that no lengthening is used)) case ROUTE_STEP: case LENGTHENING_STEP: if ( !getDouble(s2, &tStep) ) { if ( !datetime_strToTime(s2, &aTime) ) { return error_setInpError(ERR_NUMBER, s2); } else { datetime_decodeTime(aTime, &h, &m, &s); h += 24*(int)aTime; s = s + 60*m + 3600*h; tStep = s; } } if ( k == ROUTE_STEP ) { if ( tStep <= 0.0 ) return error_setInpError(ERR_NUMBER, s2); RouteStep = tStep; } else LengtheningStep = MAX(0.0, tStep); break; // --- minimum variable time step for dynamic wave routing case MIN_ROUTE_STEP: if ( !getDouble(s2, &MinRouteStep) || MinRouteStep < 0.0 ) return error_setInpError(ERR_NUMBER, s2); break; case NUM_THREADS: m = atoi(s2); if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2); NumThreads = m; break; // --- safety factor applied to variable time step estimates under // dynamic wave flow routing (value of 0 indicates that variable // time step option not used) case VARIABLE_STEP: if ( !getDouble(s2, &CourantFactor) ) return error_setInpError(ERR_NUMBER, s2); if ( CourantFactor < 0.0 || CourantFactor > 2.0 ) return error_setInpError(ERR_NUMBER, s2); break; // --- minimum surface area (ft2 or sq. meters) associated with nodes // under dynamic wave flow routing case MIN_SURFAREA: if (!getDouble(s2, &MinSurfArea)) //(5.1.013) return error_setInpError(ERR_NUMBER, s2); //(5.1.013) if (MinSurfArea < 0.0) //(5.1.013) return error_setInpError(ERR_NUMBER, s2); //(5.1.013) break; // --- minimum conduit slope (%) case MIN_SLOPE: if ( !getDouble(s2, &MinSlope) ) return error_setInpError(ERR_NUMBER, s2); if ( MinSlope < 0.0 || MinSlope >= 100 ) return error_setInpError(ERR_NUMBER, s2); MinSlope /= 100.0; break; // --- maximum trials / time step for dynamic wave routing case MAX_TRIALS: m = atoi(s2); if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2); MaxTrials = m; break; // --- head convergence tolerance for dynamic wave routing case HEAD_TOL: if ( !getDouble(s2, &HeadTol) ) { return error_setInpError(ERR_NUMBER, s2); } break; // --- steady state tolerance on system inflow - outflow case SYS_FLOW_TOL: if ( !getDouble(s2, &SysFlowTol) ) { return error_setInpError(ERR_NUMBER, s2); } SysFlowTol /= 100.0; break; // --- steady state tolerance on nodal lateral inflow case LAT_FLOW_TOL: if ( !getDouble(s2, &LatFlowTol) ) { return error_setInpError(ERR_NUMBER, s2); } LatFlowTol /= 100.0; break; // --- method used for surcharging in dynamic wave flow routing //(5.1.013) case SURCHARGE_METHOD: m = findmatch(s2, SurchargeWords); if (m < 0) return error_setInpError(ERR_KEYWORD, s2); SurchargeMethod = m; break; case TEMPDIR: // Temporary Directory sstrncpy(TempDir, s2, MAXFNAME); break; } return 0; } //============================================================================= void initPointers() // // Input: none // Output: none // Purpose: assigns NULL to all dynamic arrays for a new project. // { Gage = NULL; Subcatch = NULL; Node = NULL; Outfall = NULL; Divider = NULL; Storage = NULL; Link = NULL; Conduit = NULL; Pump = NULL; Orifice = NULL; Weir = NULL; Outlet = NULL; Pollut = NULL; Landuse = NULL; Pattern = NULL; Curve = NULL; Tseries = NULL; Transect = NULL; Shape = NULL; Aquifer = NULL; UnitHyd = NULL; Snowmelt = NULL; Event = NULL; MemPoolAllocated = FALSE; } //============================================================================= void setDefaults() // // Input: none // Output: none // Purpose: assigns default values to project variables. // { int i, j; // Project title & temp. file path for (i = 0; i < MAXTITLE; i++) strcpy(Title[i], ""); strcpy(TempDir, ""); // Interface files Frain.mode = SCRATCH_FILE; // Use scratch rainfall file Fclimate.mode = NO_FILE; Frunoff.mode = NO_FILE; Frdii.mode = NO_FILE; Fhotstart1.mode = NO_FILE; Fhotstart2.mode = NO_FILE; Finflows.mode = NO_FILE; Foutflows.mode = NO_FILE; Frain.file = NULL; Fclimate.file = NULL; Frunoff.file = NULL; Frdii.file = NULL; Fhotstart1.file = NULL; Fhotstart2.file = NULL; Finflows.file = NULL; Foutflows.file = NULL; Fout.file = NULL; Fout.mode = NO_FILE; // Analysis options UnitSystem = US; // US unit system FlowUnits = CFS; // CFS flow units InfilModel = HORTON; // Horton infiltration method RouteModel = KW; // Kin. wave flow routing method SurchargeMethod = EXTRAN; // Use EXTRAN method for surcharging //(5.1.013) CrownCutoff = 0.96; //(5.1.013) AllowPonding = FALSE; // No ponding at nodes InertDamping = SOME; // Partial inertial damping NormalFlowLtd = BOTH; // Default normal flow limitation ForceMainEqn = H_W; // Hazen-Williams eqn. for force mains LinkOffsets = DEPTH_OFFSET; // Use depth for link offsets LengtheningStep = 0; // No lengthening of conduits CourantFactor = 0.0; // No variable time step MinSurfArea = 0.0; // Force use of default min. surface area MinSlope = 0.0; // No user supplied minimum conduit slope SkipSteadyState = FALSE; // Do flow routing in steady state periods IgnoreRainfall = FALSE; // Analyze rainfall/runoff IgnoreRDII = FALSE; // Analyze RDII IgnoreSnowmelt = FALSE; // Analyze snowmelt IgnoreGwater = FALSE; // Analyze groundwater IgnoreRouting = FALSE; // Analyze flow routing IgnoreQuality = FALSE; // Analyze water quality WetStep = 300; // Runoff wet time step (secs) DryStep = 3600; // Runoff dry time step (secs) RuleStep = 0; // Rules evaluated at each routing step RouteStep = 300.0; // Routing time step (secs) MinRouteStep = 0.5; // Minimum variable time step (sec) ReportStep = 900; // Reporting time step (secs) StartDryDays = 0.0; // Antecedent dry days MaxTrials = 0; // Force use of default max. trials HeadTol = 0.0; // Force use of default head tolerance SysFlowTol = 0.05; // System flow tolerance for steady state LatFlowTol = 0.05; // Lateral flow tolerance for steady state NumThreads = 0; // Number of parallel threads to use NumEvents = 0; // Number of detailed routing events // Deprecated options SlopeWeighting = TRUE; // Use slope weighting Compatibility = SWMM4; // Use SWMM 4 up/dn weighting method // Starting & ending date/time StartDate = datetime_encodeDate(2004, 1, 1); StartTime = datetime_encodeTime(0,0,0); StartDateTime = StartDate + StartTime; EndDate = StartDate; EndTime = 0.0; ReportStartDate = NO_DATE; ReportStartTime = NO_DATE; SweepStart = 1; SweepEnd = 365; // Reporting options RptFlags.input = FALSE; RptFlags.continuity = TRUE; RptFlags.flowStats = TRUE; RptFlags.controls = FALSE; RptFlags.subcatchments = FALSE; RptFlags.nodes = FALSE; RptFlags.links = FALSE; RptFlags.nodeStats = FALSE; RptFlags.averages = FALSE; // Temperature data Temp.dataSource = NO_TEMP; Temp.tSeries = -1; Temp.ta = 70.0; Temp.elev = 0.0; Temp.anglat = 40.0; Temp.dtlong = 0.0; Temp.tmax = MISSING; // Wind speed data Wind.type = MONTHLY_WIND; for ( i=0; i<12; i++ ) Wind.aws[i] = 0.0; // Snowmelt parameters Snow.snotmp = 34.0; Snow.tipm = 0.5; Snow.rnm = 0.6; // Snow areal depletion curves for pervious and impervious surfaces for ( i=0; i<2; i++ ) { for ( j=0; j<10; j++) Snow.adc[i][j] = 1.0; } // Evaporation rates Evap.type = CONSTANT_EVAP; for (i=0; i<12; i++) { Evap.monthlyEvap[i] = 0.0; Evap.panCoeff[i] = 1.0; } Evap.recoveryPattern = -1; Evap.recoveryFactor = 1.0; Evap.tSeries = -1; Evap.dryOnly = FALSE; // Climate adjustments for (i = 0; i < 12; i++) { Adjust.temp[i] = 0.0; // additive adjustments Adjust.evap[i] = 0.0; // additive adjustments Adjust.rain[i] = 1.0; // multiplicative adjustments Adjust.hydcon[i] = 1.0; // hyd. conductivity adjustments } Adjust.rainFactor = 1.0; Adjust.hydconFactor = 1.0; } //============================================================================= void openFiles(char *f1, char *f2, char *f3) // // Input: f1 = name of input file // f2 = name of report file // f3 = name of binary output file // Output: none // Purpose: opens a project's input and report files. // { // --- initialize file pointers to NULL Finp.file = NULL; Frpt.file = NULL; Fout.file = NULL; // --- save file names sstrncpy(Finp.name, f1, MAXFNAME); sstrncpy(Frpt.name, f2, MAXFNAME); sstrncpy(Fout.name, f3, MAXFNAME); // --- check that file names are not identical if (strcomp(f1, f2) || strcomp(f1, f3) || strcomp(f2, f3)) { writecon(FMT11); ErrorCode = ERR_FILE_NAME; return; } // --- open input and report files if ((Finp.file = fopen_cached(f1,"rt")) == NULL) { writecon(FMT12); writecon(f1); ErrorCode = ERR_INP_FILE; return; } if ((Frpt.file = fopen_cached(f2,"wt")) == NULL) { writecon(FMT13); ErrorCode = ERR_RPT_FILE; return; } } //============================================================================= void createObjects() // // Input: none // Output: none // Purpose: allocates memory for project's objects. // // NOTE: number of each type of object has already been determined in // project_readInput(). // { int j, k; // --- allocate memory for each category of object if ( ErrorCode ) return; Gage = (TGage *) calloc(Nobjects[GAGE], sizeof(TGage)); Subcatch = (TSubcatch *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatch)); Node = (TNode *) calloc(Nobjects[NODE], sizeof(TNode)); Outfall = (TOutfall *) calloc(Nnodes[OUTFALL], sizeof(TOutfall)); Divider = (TDivider *) calloc(Nnodes[DIVIDER], sizeof(TDivider)); Storage = (TStorage *) calloc(Nnodes[STORAGE], sizeof(TStorage)); Link = (TLink *) calloc(Nobjects[LINK], sizeof(TLink)); Conduit = (TConduit *) calloc(Nlinks[CONDUIT], sizeof(TConduit)); Pump = (TPump *) calloc(Nlinks[PUMP], sizeof(TPump)); Orifice = (TOrifice *) calloc(Nlinks[ORIFICE], sizeof(TOrifice)); Weir = (TWeir *) calloc(Nlinks[WEIR], sizeof(TWeir)); Outlet = (TOutlet *) calloc(Nlinks[OUTLET], sizeof(TOutlet)); Pollut = (TPollut *) calloc(Nobjects[POLLUT], sizeof(TPollut)); Landuse = (TLanduse *) calloc(Nobjects[LANDUSE], sizeof(TLanduse)); Pattern = (TPattern *) calloc(Nobjects[TIMEPATTERN], sizeof(TPattern)); Curve = (TTable *) calloc(Nobjects[CURVE], sizeof(TTable)); Tseries = (TTable *) calloc(Nobjects[TSERIES], sizeof(TTable)); Aquifer = (TAquifer *) calloc(Nobjects[AQUIFER], sizeof(TAquifer)); UnitHyd = (TUnitHyd *) calloc(Nobjects[UNITHYD], sizeof(TUnitHyd)); Snowmelt = (TSnowmelt *) calloc(Nobjects[SNOWMELT], sizeof(TSnowmelt)); Shape = (TShape *) calloc(Nobjects[SHAPE], sizeof(TShape)); // --- create array of detailed routing event periods Event = (TEvent *) calloc(NumEvents+1, sizeof(TEvent)); Event[NumEvents].start = BIG; Event[NumEvents].end = BIG + 1.0; // --- create LID objects lid_create(Nobjects[LID], Nobjects[SUBCATCH]); // --- create control rules ErrorCode = controls_create(Nobjects[CONTROL]); if ( ErrorCode ) return; // --- create cross section transects ErrorCode = transect_create(Nobjects[TRANSECT]); if ( ErrorCode ) return; // --- allocate memory for infiltration data infil_create(Nobjects[SUBCATCH], InfilModel); // --- allocate memory for water quality state variables for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].initBuildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].pondedQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].concPonded = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].surfaceBuildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } for (j = 0; j < Nobjects[NODE]; j++) { Node[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].extQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].inQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].reactorQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].extPollutFlag = (int *) calloc(Nobjects[POLLUT], sizeof(int)); Node[j].extInflow = NULL; Node[j].dwfInflow = NULL; Node[j].rdiiInflow = NULL; Node[j].treatment = NULL; } for (j = 0; j < Nobjects[LINK]; j++) { Link[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].extQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].reactorQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].extPollutFlag = (int *) calloc(Nobjects[POLLUT], sizeof(int)); } // --- allocate memory for land use buildup/washoff functions for (j = 0; j < Nobjects[LANDUSE]; j++) { Landuse[j].buildupFunc = (TBuildup *) calloc(Nobjects[POLLUT], sizeof(TBuildup)); Landuse[j].washoffFunc = (TWashoff *) calloc(Nobjects[POLLUT], sizeof(TWashoff)); } // --- allocate memory for subcatchment landuse factors for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].landFactor = (TLandFactor *) calloc(Nobjects[LANDUSE], sizeof(TLandFactor)); for (k = 0; k < Nobjects[LANDUSE]; k++) { Subcatch[j].landFactor[k].buildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } } // --- initialize buildup & washoff functions for (j = 0; j < Nobjects[LANDUSE]; j++) { for (k = 0; k < Nobjects[POLLUT]; k++) { Landuse[j].buildupFunc[k].funcType = NO_BUILDUP; Landuse[j].buildupFunc[k].normalizer = PER_AREA; Landuse[j].washoffFunc[k].funcType = NO_WASHOFF; } } // --- initialize rain gage properties for (j = 0; j < Nobjects[GAGE]; j++) { Gage[j].tSeries = -1; strcpy(Gage[j].fname, ""); } // --- initialize subcatchment properties for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].outSubcatch = -1; Subcatch[j].outNode = -1; Subcatch[j].infil = -1; Subcatch[j].groundwater = NULL; Subcatch[j].gwLatFlowExpr = NULL; Subcatch[j].gwDeepFlowExpr = NULL; Subcatch[j].snowpack = NULL; Subcatch[j].lidArea = 0.0; for (k = 0; k < Nobjects[POLLUT]; k++) { Subcatch[j].initBuildup[k] = 0.0; } } // --- initialize RDII unit hydrograph properties for ( j = 0; j < Nobjects[UNITHYD]; j++ ) rdii_initUnitHyd(j); // --- initialize snowmelt properties for ( j = 0; j < Nobjects[SNOWMELT]; j++ ) snow_initSnowmelt(j); // --- initialize storage node exfiltration for (j = 0; j < Nnodes[STORAGE]; j++) Storage[j].exfil = NULL; // --- initialize link properties for (j = 0; j < Nobjects[LINK]; j++) { Link[j].xsect.type = -1; Link[j].cLossInlet = 0.0; Link[j].cLossOutlet = 0.0; Link[j].cLossAvg = 0.0; Link[j].hasFlapGate = FALSE; } for (j = 0; j < Nlinks[PUMP]; j++) Pump[j].pumpCurve = -1; // --- initialize reporting flags for (j = 0; j < Nobjects[SUBCATCH]; j++) Subcatch[j].rptFlag = FALSE; for (j = 0; j < Nobjects[NODE]; j++) Node[j].rptFlag = FALSE; for (j = 0; j < Nobjects[LINK]; j++) Link[j].rptFlag = FALSE; // --- initialize curves, time series, and time patterns for (j = 0; j < Nobjects[CURVE]; j++) table_init(&Curve[j]); for (j = 0; j < Nobjects[TSERIES]; j++) table_init(&Tseries[j]); for (j = 0; j < Nobjects[TIMEPATTERN]; j++) inflow_initDwfPattern(j); } //============================================================================= void deleteObjects() // // Input: none // Output: none // Purpose: frees memory allocated for a project's objects. // // NOTE: care is taken to first free objects that are properties of another // object before the latter is freed (e.g., we must free a // subcatchment's land use factors before freeing the subcatchment). // { int j, k; // --- free memory for landuse factors & groundwater if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++) { for (k = 0; k < Nobjects[LANDUSE]; k++) { FREE(Subcatch[j].landFactor[k].buildup); } FREE(Subcatch[j].landFactor); FREE(Subcatch[j].groundwater); gwater_deleteFlowExpression(j); FREE(Subcatch[j].snowpack); } // --- free memory for buildup/washoff functions if ( Landuse ) for (j = 0; j < Nobjects[LANDUSE]; j++) { FREE(Landuse[j].buildupFunc); FREE(Landuse[j].washoffFunc) } // --- free memory for water quality state variables if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++) { FREE(Subcatch[j].initBuildup); FREE(Subcatch[j].oldQual); FREE(Subcatch[j].newQual); FREE(Subcatch[j].pondedQual); FREE(Subcatch[j].totalLoad); } if ( Node ) for (j = 0; j < Nobjects[NODE]; j++) { FREE(Node[j].oldQual); FREE(Node[j].newQual); } if ( Link ) for (j = 0; j < Nobjects[LINK]; j++) { FREE(Link[j].oldQual); FREE(Link[j].newQual); FREE(Link[j].totalLoad); } // --- free memory used for rainfall infiltration infil_delete(); // --- free memory used for storage exfiltration if ( Node ) for (j = 0; j < Nnodes[STORAGE]; j++) { if ( Storage[j].exfil ) { FREE(Storage[j].exfil->btmExfil); FREE(Storage[j].exfil->bankExfil); FREE(Storage[j].exfil); } } // --- free memory used for outfall pollutants loads if ( Node ) for (j = 0; j < Nnodes[OUTFALL]; j++) FREE(Outfall[j].wRouted); // --- free memory used for nodal inflows & treatment functions if ( Node ) for (j = 0; j < Nobjects[NODE]; j++) { inflow_deleteExtInflows(j); inflow_deleteDwfInflows(j); rdii_deleteRdiiInflow(j); treatmnt_delete(j); } // --- delete table entries for curves and time series if ( Tseries ) for (j = 0; j < Nobjects[TSERIES]; j++) table_deleteEntries(&Tseries[j]); if ( Curve ) for (j = 0; j < Nobjects[CURVE]; j++) table_deleteEntries(&Curve[j]); // --- delete cross section transects transect_delete(); // --- delete control rules controls_delete(); // --- delete LIDs lid_delete(); // --- now free each major category of object FREE(Gage); FREE(Subcatch); FREE(Node); FREE(Outfall); FREE(Divider); FREE(Storage); FREE(Link); FREE(Conduit); FREE(Pump); FREE(Orifice); FREE(Weir); FREE(Outlet); FREE(Pollut); FREE(Landuse); FREE(Pattern); FREE(Curve); FREE(Tseries); FREE(Aquifer); FREE(UnitHyd); FREE(Snowmelt); FREE(Shape); FREE(Event); } //============================================================================= void createHashTables() // // Input: none // Output: returns error code // Purpose: allocates memory for object ID hash tables // { int j; MemPoolAllocated = FALSE; for (j = 0; j < MAX_OBJ_TYPES ; j++) { Htable[j] = HTcreate(); if ( Htable[j] == NULL ) report_writeErrorMsg(ERR_MEMORY, ""); } // --- initialize memory pool used to store object ID's if ( AllocInit() == NULL ) report_writeErrorMsg(ERR_MEMORY, ""); else MemPoolAllocated = TRUE; } //============================================================================= void deleteHashTables() // // Input: none // Output: none // Purpose: frees memory allocated for object ID hash tables // { int j; for (j = 0; j < MAX_OBJ_TYPES; j++) { if ( Htable[j] != NULL ) HTfree(Htable[j]); } // --- free object ID memory pool if ( MemPoolAllocated ) AllocFreePool(); } //=============================================================================
vhcc_matrix.h
#pragma once #include "util.h" #include "mem.h" #include <cstdlib> #include <cstdint> #include <vector> #include <map> #include <time.h> #include <sys/time.h> #define MICRO_IN_SEC 1000000.00 double microtime(){ int tv_sec,tv_usec; double time; struct timeval tv; struct timezone tz; gettimeofday(&tv,&tz); return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC; } using namespace std; const int MAX_CORES = 60; const int MAX_THREADS_PER_CORE = 4; //const int MAX_THREADS_PER_CORE = 1; typedef struct { int vbase; int rbase; int sbase; int last_row; int overflow_row; int start_vec; int end_vec; int panel_id; int merge_start; int merge_end; } thr_info_t; typedef struct { double *tmp_result; } panel_info_t; template<typename IndexType = int, typename ValueType = double> class vhcc_matrix { public: const static int VECLEN = 512/8/sizeof(ValueType); typedef IndexType index_t; typedef ValueType value_t; typedef uint8_t mask_t; vhcc_matrix(index_t num_rows, index_t num_cols, index_t num_entries, index_t *row_idx, index_t *col_idx, value_t *vals) : _num_rows(num_rows), _num_cols(num_cols), _num_entries(num_entries), _row_idx(row_idx), _col_idx(col_idx), _vals(vals) { _thr_info = NULL; _veceor_ptr = NULL; _row_arr = NULL; _col_arr = NULL; _vals_arr = NULL; } ~vhcc_matrix() { FREE(_row_idx); FREE(_col_idx); FREE(_vals); if (_thr_info != NULL) FREE(_thr_info); if (_veceor_ptr != NULL) FREE(_veceor_ptr); if (_scan_mask != NULL) FREE(_scan_mask); if (_row_arr != NULL) FREE(_row_arr); if (_col_arr != NULL) FREE(_col_arr); if (_vals_arr != NULL) FREE(_vals_arr); } index_t get_num_rows() { return _num_rows; } index_t get_num_cols() { return _num_cols; } index_t get_num_entries() { return _num_entries; } index_t *get_row_idx() { return _row_idx; } index_t *get_col_idx() { return _col_idx; } value_t *get_vals() { return _vals; } int get_num_panels() { return _num_panels; } panel_info_t *get_panel_info() { return _panel_info; } int get_num_threads() { return _num_threads; } int get_num_vectors() { return _num_vectors; } index_t get_pad_entries() { return _pad_entries; } index_t get_pad_rows() { return _pad_rows; } index_t get_pad_cols() { return _pad_cols; } thr_info_t *get_thr_info() { return _thr_info; } index_t get_thr_info_size() { return _thr_info_size; } index_t *get_veceor_ptr() { return _veceor_ptr; } index_t get_veceor_size() { return _veceor_size; } uint8_t *get_scan_mask() { return _scan_mask; } index_t get_scan_mask_size() { return _scan_mask_size; } index_t *get_row_arr() { return _row_arr; } index_t get_row_arr_size() { return _row_arr_size; } index_t *get_col_arr() { return _col_arr; } index_t get_col_arr_size() { return _col_arr_size; } value_t *get_vals_arr() { return _vals_arr; } index_t get_vals_arr_size() { return _vals_arr_size; } void convert(int num_threads, int threads_per_core, int num_panel, int rlength, int clength, char* filename); private: index_t _num_rows; index_t _num_cols; index_t _num_entries; index_t *_row_idx; index_t *_col_idx; value_t *_vals; int _rlength; int _clength; int _num_cores; int _num_panels; panel_info_t *_panel_info; int _num_threads; index_t _num_vectors; index_t _nnz_per_panel; index_t _pad_entries; index_t _extended_rows; index_t _pad_rows; index_t _pad_cols; thr_info_t *_thr_info; index_t _thr_info_size; index_t *_veceor_ptr; index_t _veceor_size; uint8_t *_scan_mask; index_t _scan_mask_size; index_t *_tmp_row_arr; index_t *_row_arr; index_t _row_arr_size; index_t *_col_arr; index_t _col_arr_size; value_t *_vals_arr; index_t _vals_arr_size; typedef struct { index_t row; index_t col; value_t val; } coo_tuple_t; typedef vector<index_t> index_1d_t; typedef vector<index_1d_t> index_2d_t; typedef vector<index_2d_t> index_3d_t; typedef vector<value_t> value_1d_t; typedef vector<value_1d_t> value_2d_t; typedef vector<value_2d_t> value_3d_t; typedef vector<coo_tuple_t> tuple_1d_t; typedef vector<tuple_1d_t> tuple_2d_t; typedef vector<tuple_2d_t> tuple_3d_t; struct slice_t_tag { int global_tid; int panel_id; index_t nnz; index_t n_vec; index_t start_nnz; index_t end_nnz; index_t vec_write_base; index_t start_vec; index_t end_vec; index_t first_row; index_t last_row; index_t overflow_row; index_t n_colblk; index_t n_rowblk; index_1d_t row_arr; index_1d_t col_arr; value_1d_t val_arr; index_3d_t row_blocks; index_3d_t col_blocks; value_3d_t val_blocks; tuple_3d_t blocks; index_2d_t block_nnz_cnt; index_1d_t tmp_veceor; index_1d_t tmp_row_arr; vector<uint8_t> tmp_scan_mask; }; typedef struct slice_t_tag slice_t; template<typename T> struct reverse_weight_sorter { public: reverse_weight_sorter(vector<T>& weights) : _weights(weights) { } bool operator()(const int i, const int j) { if (_weights[i] > _weights[j]) return true; return 0; } private: vector<T>& _weights; }; struct rowcol_sorter { public: bool operator()(const coo_tuple_t& i, const coo_tuple_t& j) { if (i.row < j.row) return true; if (i.row > j.row) return false; if (i.col < j.col) return true; if (i.col > j.col) return false; return 0; } }; struct colrow_sorter { public: bool operator()(const coo_tuple_t& i, const coo_tuple_t& j) { if (i.col < j.col) return true; if (i.col > j.col) return false; if (i.row < j.row) return true; if (i.row > j.row) return false; return 0; } }; typedef struct { int id; int base_tid; int num_threads; index_t nnz; index_t start_nnz; index_t end_nnz; vector<coo_tuple_t> coo; vector<slice_t> slices; } panel_t; void partition(panel_t& panel); void sort_vertical_partition(panel_t& panel); colrow_sorter _panel_sorter; rowcol_sorter _slice_sorter; }; // implementation template<typename IndexType, typename ValueType> void vhcc_matrix<IndexType, ValueType> ::partition(panel_t& panel) { vector<slice_t>& slices = panel.slices; vector<coo_tuple_t>& coo = panel.coo; int nnz = panel.nnz; int num_threads = panel.num_threads; // sort from horizonal from the vertical stable_sort(panel.coo.begin(), panel.coo.end(), _slice_sorter); slices.resize(panel.num_threads); if (panel.num_threads == 0) return; index_t nnz_per_thread = (nnz+num_threads-1) / num_threads; index_t nnz_remain = nnz; index_t cur_index = 0; for (int i = 0; i < num_threads; ++i) { slice_t& slice = slices[i]; slice.global_tid = panel.base_tid + i; slice.panel_id = panel.id; if (nnz_remain >= nnz_per_thread) { slice.nnz = nnz_per_thread; nnz_remain -= nnz_per_thread; } else { slice.nnz = nnz_remain; nnz_remain = 0; } slice.n_vec = (slice.nnz+VECLEN-1) / VECLEN; slice.start_nnz = cur_index; slice.end_nnz = cur_index + slice.nnz; cur_index += slice.nnz; } #pragma omp parallel for for (int i = 0; i < num_threads; ++i) { slice_t& slice = slices[i]; slice.row_arr = index_1d_t(slice.n_vec*VECLEN); slice.col_arr = index_1d_t(slice.n_vec*VECLEN); slice.val_arr = value_1d_t(slice.n_vec*VECLEN); index_t tstart = slice.start_nnz; index_t tend = slice.end_nnz; slice.first_row = coo[tstart].row; slice.last_row = tend == tstart ? slice.first_row : coo[tend-1].row; slice.overflow_row = _extended_rows + slice.global_tid * VECLEN; index_t drows = slice.last_row - slice.first_row; index_t n_colblk = (_clength != -1) ? (_num_cols+_clength-1)/_clength : 1; index_t n_rowblk = (_rlength != -1) ? (drows+_rlength-1)/_rlength + 1 : 1; slice.n_colblk = n_colblk; slice.n_rowblk = n_rowblk; slice.blocks = tuple_3d_t(n_rowblk, tuple_2d_t(n_colblk, tuple_1d_t())); slice.block_nnz_cnt = index_2d_t(n_rowblk, index_1d_t(n_colblk, 0)); for (index_t j = tstart; j < tend; ++j) { index_t row = coo[j].row; index_t col = coo[j].col; index_t rblk = (_rlength != -1) ? (row-slice.first_row)/_rlength : 0; index_t cblk = (_clength != -1) ? col/_clength : 0; slice.block_nnz_cnt[rblk][cblk] += 1; } for (int p = 0; p < n_rowblk; ++p) { for (int q = 0; q < n_colblk; ++q) { int reserve = slice.block_nnz_cnt[p][q]; slice.blocks[p][q].reserve(reserve); } } for (index_t j = tstart; j < tend; ++j) { index_t row = coo[j].row; index_t col = coo[j].col; value_t val = coo[j].val; index_t shifted_row; if (row == slice.last_row) shifted_row = slice.overflow_row; else shifted_row = row; index_t rblk = (_rlength != -1) ? (row-slice.first_row)/_rlength : 0; index_t cblk = (_clength != -1) ? col/_clength : 0; coo_tuple_t tuple = { shifted_row, col, val }; slice.blocks[rblk][cblk].push_back(tuple); } index_t ptr = 0; for (index_t c = 0; c < slice.n_colblk; ++c) { for (index_t r = 0; r < slice.n_rowblk; ++r) { rowcol_sorter sorter; stable_sort(slice.blocks[r][c].begin(), slice.blocks[r][c].end(), sorter); for (index_t k = 0; k < slice.blocks[r][c].size(); ++k) { slice.row_arr[ptr] = slice.blocks[r][c][k].row; slice.col_arr[ptr] = slice.blocks[r][c][k].col; slice.val_arr[ptr] = slice.blocks[r][c][k].val; ptr++; } } } int pad = slice.n_vec * VECLEN - slice.nnz; for (int p = 0; p < pad; ++p) { slice.row_arr[ptr + p] = slice.row_arr[ptr-1]; slice.col_arr[ptr + p] = slice.col_arr[ptr-1]; slice.val_arr[ptr + p] = 0; } } #pragma omp parallel for for (int i = 0; i < num_threads; ++i) { slice_t& slice = slices[i]; slice.tmp_veceor.reserve(_num_rows); slice.tmp_row_arr.reserve(_num_rows); slice.tmp_scan_mask.reserve(_num_rows * 4); for (index_t v = 0; v < slice.n_vec; ++v) { index_t vstart = v*VECLEN; index_t vend = (v+1)*VECLEN; index_t vlen = vend - vstart; index_t tmprow1[VECLEN], tmprow2[VECLEN], eor[VECLEN]; tmprow1[0:vlen] = slice.row_arr.data()[vstart:vlen]; if (vend == slice.n_vec*VECLEN) { tmprow2[0:vlen-1] = slice.row_arr.data()[vstart+1:vlen-1]; tmprow2[vlen-1] = slice.row_arr[vend-1] + 1; } else { tmprow2[0:vlen] = slice.row_arr.data()[vstart+1:vlen]; } eor[:] = (tmprow2[:] - tmprow1[:]) != 0; int cnt = __sec_reduce_add(eor[:]); bool is_eor = cnt > 0; if (is_eor) { slice.tmp_veceor.push_back(v); for (int i = 0; i < VECLEN; ++i) if (eor[i] == 1) slice.tmp_row_arr.push_back(tmprow1[i]); mask_t mask1, mask2, mask3, maskwr, tmask, m; mask1 = mask2 = mask3 = maskwr = 0; for (int i = 0; i < VECLEN; ++i) maskwr |= (eor[i] << i); tmask = maskwr << 1; mask1 = (~tmask) & 0xAA; m = tmask & 0xCC; mask2 = (~(m | m<<1)) & 0xCC; m = tmask & 0xF0; mask3 = (~(m | m<<1 | m <<2 | m << 3)) & 0xF0; slice.tmp_scan_mask.push_back(mask1); slice.tmp_scan_mask.push_back(mask2); slice.tmp_scan_mask.push_back(mask3); slice.tmp_scan_mask.push_back(maskwr); } } } } template<typename IndexType, typename ValueType> void vhcc_matrix<IndexType, ValueType> ::convert(int num_threads, int threads_per_core, int num_panels, int rlength, int clength, char* filename) { vector<index_t> row_ptr(_num_rows+1, 0); index_t r = 0; row_ptr[0] = 0; // do the row of CSR format for (index_t i = 0; i < _num_entries; ++i) { index_t ridx = _row_idx[i] + 1 - 1; if (r != ridx) { for (index_t p = r+1; p < ridx; ++p) row_ptr[p] = i; r = ridx; row_ptr[r] = i; } } // do the last 0-nnz rows of CSR format for (index_t p = r+1; p < _num_rows; ++p) row_ptr[p] = _num_entries; row_ptr[_num_rows] = _num_entries; double t_pre = microtime(); index_t max_nnzpr = -1; index_t min_nnzpr = _num_entries; double std_nnzpr = 0; double ave_nnzpr = double(_num_entries)/_num_rows; #pragma omp parallel for reduction(max: max_nnzpr) reduction(min: min_nnzpr) reduction(+: std_nnzpr) for (index_t i = 0; i < _num_rows; ++i) { index_t ncol = row_ptr[i+1] - row_ptr[i]; min_nnzpr = ncol < min_nnzpr ? ncol : min_nnzpr; max_nnzpr = ncol > max_nnzpr ? ncol : max_nnzpr; std_nnzpr += (ncol - ave_nnzpr)*(ncol - ave_nnzpr); } std_nnzpr = sqrt(std_nnzpr/_num_rows); vector<coo_tuple_t> coo(_num_entries); #pragma omp parallel for for (int i = 0; i < _num_entries; ++i) { coo_tuple_t tmp_tuple; tmp_tuple.row = _row_idx[i]; tmp_tuple.col = _col_idx[i]; tmp_tuple.val = _vals[i]; coo[i] = tmp_tuple; } if (rlength < -1 || clength < -1) { printf("negtive number invalid.\n"); exit(1); } _rlength = rlength; _clength = clength; _num_threads = num_threads; if (num_threads % threads_per_core != 0) { printf("Expect num_threads to be divisible by %d\n", threads_per_core); exit(1); } _num_cores = (num_threads + threads_per_core-1) / threads_per_core; _num_panels = num_panels; _nnz_per_panel = (_num_entries+_num_panels-1) / _num_panels; _extended_rows = (_num_rows + VECLEN-1) / VECLEN * VECLEN; if (_num_threads % _num_panels != 0) { printf("Num_threads not divisible by panels %d mod %d\n", _num_threads, _num_panels); exit(1); } int threads_per_panel = _num_threads / _num_panels; std::cout<<"sorting............."<<endl; stable_sort(coo.begin(), coo.end(), _panel_sorter); std::cout<<"sorting.................end "<<endl; vector<panel_t> panels(_num_panels); index_t thr_remain = _num_threads; index_t nnz_remain = _num_entries; index_t cur_index = 0; index_t base_tid = 0; for (int i = 0; i < _num_panels; ++i) { panel_t& panel = panels[i]; panel.id = i; if (thr_remain >= threads_per_panel) { panel.num_threads = threads_per_panel; thr_remain -= threads_per_panel; } else { panel.num_threads = thr_remain; thr_remain = 0; } panel.base_tid = base_tid; base_tid += panel.num_threads; if (nnz_remain >= _nnz_per_panel) { panel.nnz = _nnz_per_panel; nnz_remain -= _nnz_per_panel; } else { panel.nnz = nnz_remain; nnz_remain = 0; } panel.start_nnz = cur_index; panel.end_nnz = cur_index + panel.nnz; cur_index += panel.nnz; } #pragma omp parallel for for (int c = 0; c < _num_panels; ++c) { panel_t& panel = panels[c]; panel.coo.resize(panel.nnz); for (int k = 0; k < panel.nnz; ++k) { panel.coo[k] = coo[panel.start_nnz + k]; } partition(panel); } index_t veceor_size = 0; index_t row_arr_size = 0; index_t scan_mask_size = 0; #pragma omp parallel for reduction(+: veceor_size, row_arr_size, scan_mask_size) for (int j = 0; j < _num_panels; ++j) { for (int i = 0; i < panels[j].num_threads; ++i) { slice_t& slice = panels[j].slices[i]; veceor_size += slice.tmp_veceor.size(); row_arr_size += slice.tmp_row_arr.size(); scan_mask_size += slice.tmp_scan_mask.size(); } } index_t num_vectors = 0; for (int j = 0; j < _num_panels; ++j) { for (int i = 0; i < panels[j].num_threads; ++i) { slice_t& slice = panels[j].slices[i]; slice.vec_write_base = num_vectors * VECLEN; slice.start_vec = num_vectors; slice.end_vec = num_vectors + slice.n_vec; num_vectors += slice.n_vec; } } _num_vectors = num_vectors; _pad_entries = _num_vectors * VECLEN; _pad_rows = _extended_rows + _num_threads * VECLEN; _pad_cols = _num_cols; _thr_info_size = _num_threads; _thr_info = (thr_info_t *)MALLOC(_thr_info_size * sizeof(thr_info_t)); _col_arr_size = _num_vectors * VECLEN; _vals_arr_size = _num_vectors * VECLEN; _col_arr = (index_t *)MALLOC(_col_arr_size * sizeof(index_t)); _vals_arr = (value_t *)MALLOC(_vals_arr_size * sizeof(value_t)); _tmp_row_arr = (index_t *)MALLOC(_num_vectors*VECLEN * sizeof(index_t)); _veceor_size = veceor_size; _row_arr_size = row_arr_size; _scan_mask_size = scan_mask_size; _veceor_ptr = (index_t *)MALLOC(_veceor_size * sizeof(index_t)); _row_arr = (index_t *)MALLOC(_row_arr_size * sizeof(index_t)); _scan_mask = (uint8_t *)MALLOC(_scan_mask_size * sizeof(uint8_t)); #pragma omp parallel for for (int j = 0; j < _num_panels; ++j) { for (int i = 0; i < panels[j].num_threads; ++i) { slice_t& slice = panels[j].slices[i]; std::copy(slice.col_arr.begin(), slice.col_arr.end(), &_col_arr[slice.vec_write_base]); std::copy(slice.val_arr.begin(), slice.val_arr.end(), &_vals_arr[slice.vec_write_base]); // for debugging use std::copy(slice.row_arr.begin(), slice.row_arr.end(), &_tmp_row_arr[slice.vec_write_base]); } } index_t v_accum = 0; index_t vbase = 0; index_t rbase = 0; index_t sbase = 0; for (int j = 0; j < _num_panels; ++j) { for (int i = 0; i < panels[j].num_threads; ++i) { slice_t& slice = panels[j].slices[i]; int tid = slice.global_tid; _thr_info[tid].vbase = vbase; _thr_info[tid].sbase = sbase; _thr_info[tid].rbase = rbase; _thr_info[tid].last_row = slice.last_row; _thr_info[tid].overflow_row = slice.overflow_row; _thr_info[tid].start_vec = slice.start_vec; _thr_info[tid].end_vec = slice.end_vec; _thr_info[tid].panel_id = slice.panel_id; _thr_info[tid].merge_start = 0; _thr_info[tid].merge_end = 0; for (index_t v = 0; v < slice.tmp_veceor.size(); ++v) { _veceor_ptr[vbase + v] = slice.tmp_veceor[v] + v_accum; } v_accum += slice.n_vec; std::copy(slice.tmp_row_arr.begin(), slice.tmp_row_arr.end(), &_row_arr[rbase]); std::copy(slice.tmp_scan_mask.begin(), slice.tmp_scan_mask.end(), &_scan_mask[sbase]); vbase += slice.tmp_veceor.size(); rbase += slice.tmp_row_arr.size(); sbase += slice.tmp_scan_mask.size(); } } _panel_info = (panel_info_t *)MALLOC(_num_panels * sizeof(panel_info_t)); for (int i = 0; i < _num_panels; ++i) { value_t *tmp_result = (value_t *)MALLOC(_pad_rows * sizeof(value_t)); memset(tmp_result, 0, _pad_rows * sizeof(value_t)); _panel_info[i].tmp_result = tmp_result; } int workers_per_core = threads_per_core; int n_workers = _num_cores * workers_per_core; index_t num_yvec = _extended_rows / VECLEN; index_t num_yvec_per_thread = (num_yvec + n_workers-1) / n_workers; for (int i = 0; i < n_workers; ++i) { index_t merge_start = i * num_yvec_per_thread*VECLEN; index_t merge_end = merge_start + num_yvec_per_thread*VECLEN > _extended_rows ? _extended_rows : merge_start + num_yvec_per_thread*VECLEN; int coreid = i / workers_per_core; int lthrid = i % workers_per_core; int globid = i; _thr_info[globid].merge_start = merge_start; _thr_info[globid].merge_end = merge_end; } std::cout<<"============="<<endl; // std::cout<<"The Pre-processing Time of VHCC is "<<filename<<" "<< microtime() - t_pre<<endl; std::cout<<"The Pre-processing(CSR->VHCC) Time of VHCC is "<< microtime() - t_pre<<" seconds. [file: "<<filename<<"] [threads: "<<num_threads<<"] [numPanels: "<<num_panels<<"]"<<endl; std::cout<<"============="<<endl; printf("Matrix %d x %d, nnz %d\n", _num_rows, _num_cols, _num_entries); printf("Number of threads: %d\n", _num_threads); printf("Number of cores: %d\n", _num_cores); printf("Number of panels: %d\n", _num_panels); printf("Threads per core: %d\n", threads_per_core); printf("\n"); }
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _sum2 = vld1q_f32(outptr2); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r04 = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r04, 1); float32x4_t _r02 = vextq_f32(_r00, _r04, 2); float32x4_t _r03 = vextq_f32(_r00, _r04, 3); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r54 = vld1q_f32(r5 + 4); float32x4_t _r51 = vextq_f32(_r50, _r54, 1); float32x4_t _r52 = vextq_f32(_r50, _r54, 2); float32x4_t _r53 = vextq_f32(_r50, _r54, 3); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k0123, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r11, _k0123, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r13, _k0123, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r14, _k4567, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r20, _k4567, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k4567, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r22, _k4567, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r23, _k891011, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r24, _k891011, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r30, _k891011, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r31, _k891011, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r32, _k12131415, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r33, _k12131415, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r34, _k12131415, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r40, _k12131415, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r41, _k16171819, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r42, _k16171819, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r43, _k16171819, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r44, _k16171819, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r50, _k20212223, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r51, _k20212223, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r52, _k20212223, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r53, _k20212223, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r54, _k24242424, 0); vst1q_f32(outptr, _sum); vst1q_f32(outptr2, _sum2); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( // "veor q13, q13 \n" // "veor q14, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = out "0: \n" // q11 = rx1 / rx3 // q12 = rx2 // q13 q14 = intermediate sum register "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n"// q8 = out2 "pld [%4, #256] \n" // r1 "vld1.f32 {d18-d21}, [%4] \n"// q9 q10 = r10 r14 "add %4, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r11 "vmul.f32 q13, q9, %e19[1] \n" "vmla.f32 q8, q9, %e18[0] \n" "vext.32 q12, q9, q10, #2 \n"// r12 "vmla.f32 q7, q11, %f19[0] \n" "vmul.f32 q14, q11, %e18[1] \n" "vext.32 q11, q9, q10, #3 \n"// r13 "vmla.f32 q13, q12, %f19[1] \n" "vmla.f32 q8, q12, %f18[0] \n" "vmla.f32 q7, q11, %e20[0] \n" "vmla.f32 q14, q11, %f18[1] \n" "pld [%5, #256] \n" "vmla.f32 q13, q10, %e20[1] \n" "vmla.f32 q8, q10, %e19[0] \n" // r2 "vld1.f32 {d18-d21}, [%5] \n"// q9 q10 = r20 r24 "add %5, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r21 "vmla.f32 q7, q9, %f20[0] \n" "vmla.f32 q14, q9, %e19[1] \n" "vext.32 q12, q9, q10, #2 \n"// r22 "vmla.f32 q13, q11, %f20[1] \n" "vmla.f32 q8, q11, %f19[0] \n" "vext.32 q11, q9, q10, #3 \n"// r23 "vmla.f32 q7, q12, %e21[0] \n" "vmla.f32 q14, q12, %f19[1] \n" "vmla.f32 q13, q11, %e21[1] \n" "vmla.f32 q8, q11, %e20[0] \n" "pld [%6, #256] \n" "vmla.f32 q7, q10, %f21[0] \n" "vmla.f32 q14, q10, %e20[1] \n" // r3 "vld1.f32 {d18-d21}, [%6] \n"// q9 q10 = r30 r34 "add %6, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r31 "vmla.f32 q13, q9, %f21[1] \n" "vmla.f32 q8, q9, %f20[0] \n" "vext.32 q12, q9, q10, #2 \n"// r32 "vmla.f32 q7, q11, %e22[0] \n" "vmla.f32 q14, q11, %f20[1] \n" "vext.32 q11, q9, q10, #3 \n"// r33 "vmla.f32 q13, q12, %e22[1] \n" "vmla.f32 q8, q12, %e21[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q14, q11, %e21[1] \n" "pld [%7, #256] \n" "vmla.f32 q13, q10, %f22[1] \n" "vmla.f32 q8, q10, %f21[0] \n" // r4 "vld1.f32 {d18-d21}, [%7] \n"// q9 q10 = r40 r44 "add %7, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r41 "vmla.f32 q7, q9, %e23[0] \n" "vmla.f32 q14, q9, %f21[1] \n" "vext.32 q12, q9, q10, #2 \n"// r42 "vmla.f32 q13, q11, %e23[1] \n" "vmla.f32 q8, q11, %e22[0] \n" "vext.32 q11, q9, q10, #3 \n"// r43 "vmla.f32 q7, q12, %f23[0] \n" "vmla.f32 q14, q12, %e22[1] \n" "vmla.f32 q13, q11, %f23[1] \n" "vmla.f32 q8, q11, %f22[0] \n" "pld [%3, #256] \n" "vmla.f32 q7, q10, %e24[0] \n" "vmla.f32 q14, q10, %f22[1] \n" // r0 and r5 "vld1.f32 {d18-d21}, [%3] \n"// q9 q10 = r00 r04 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r01 "vmla.f32 q13, q11, %e18[1] \n" "vext.32 q12, q9, q10, #2 \n"// r02 "vmla.f32 q7, q12, %f18[0] \n" "vext.32 q11, q9, q10, #3 \n"// r03 "pld [%8, #256] \n" "vmla.f32 q13, q11, %f18[1] \n" // r5 "vld1.f32 {d22-d25}, [%8] \n"// q11 q12 = r50 r54 "add %8, #16 \n" "vmla.f32 q8, q11, %e23[0] \n" "vmla.f32 q14, q12, %e24[0] \n" "vmla.f32 q7, q9, %e18[0] \n" "vmla.f32 q13, q10, %e19[0] \n" "vext.32 q9, q11, q12, #1 \n"// r51 "vext.32 q10, q11, q12, #2 \n"// r52 "vmla.f32 q14, q9, %e23[1] \n" "vext.32 q9, q11, q12, #3 \n"// r53 "vmla.f32 q8, q10, %f23[0] \n" "vmla.f32 q14, q9, %f23[1] \n" "vadd.f32 q7, q7, q13 \n" // "veor q13, q13 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vadd.f32 q8, q8, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = out // "veor q14, q14 \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424) // %24 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; float sum2 = 0; #if __ARM_NEON float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k3 = vld1q_f32(k3); _sum = vmlaq_f32(_sum, _r3, _k3); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); _sum2 = vmlaq_f32(_sum2, _r4, _k3); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 = r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r04 = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r04, 1); float32x4_t _r02 = vextq_f32(_r00, _r04, 2); float32x4_t _r03 = vextq_f32(_r00, _r04, 3); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); vst1q_f32(outptr, _sum); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( // "veor q15, q15 \n"// _sum3 = 0; "pld [%1, #128] \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "0: \n" "vld1.f32 {d14-d15}, [%1] \n"// _sum = vld1q_f32(outptr+j); // "veor q13, q13 \n"// _sum2 = 0; // "veor q14, q14 \n"// _sum3 = 0; "vext.32 q10, q8, q9, #1 \n"// _r01 "vext.32 q11, q8, q9, #2 \n"// _r02 "vext.32 q12, q8, q9, #3 \n"// _r03 "vmla.f32 q7, q8, %e14[0] \n" "vmul.f32 q13, q10, %e14[1] \n" "pld [%3, #256] \n" "vmul.f32 q14, q11, %f14[0] \n" "vmul.f32 q15, q12, %f14[1] \n" "vmla.f32 q7, q9, %e15[0] \n" "vld1.f32 {d16-d19}, [%3] \n" "add %3, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q10, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q11, %f15[1] \n" "vmla.f32 q15, q12, %e16[0] \n" "vmla.f32 q7, q9, %e16[1] \n" "vld1.f32 {d16-d19}, [%4] \n" "add %4, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q10, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q11, %e17[0] \n" "vmla.f32 q15, q12, %e17[1] \n" "vmla.f32 q7, q9, %f17[0] \n" "vld1.f32 {d16-d19}, [%5] \n" "add %5, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q10, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q11, %e18[1] \n" "vmla.f32 q15, q12, %f18[0] \n" "vmla.f32 q7, q9, %f18[1] \n" "vld1.f32 {d16-d19}, [%6] \n" "add %6, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q10, %e19[1] \n" "vmla.f32 q14, q11, %f19[0] \n" "vmla.f32 q15, q12, %f19[1] \n" "vmla.f32 q7, q9, %e20[0] \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" // "veor q15, q15 \n"// _sum3 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4x2_t _r00_02461357 = vld2q_f32(r0); float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8); float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14 float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15 float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6 float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8 float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9 float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10 float32x4x2_t _r10_02461357 = vld2q_f32(r1); float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8); float32x4_t _r1_8101214 = _r10nx2.val[0]; float32x4_t _r1_9111315 = _r10nx2.val[1]; float32x4_t _r10 = _r10_02461357.val[0]; float32x4_t _r11 = _r10_02461357.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1); float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1); float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2); float32x4x2_t _r20_02461357 = vld2q_f32(r2); float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8); float32x4_t _r2_8101214 = _r20nx2.val[0]; float32x4_t _r2_9111315 = _r20nx2.val[1]; float32x4_t _r20 = _r20_02461357.val[0]; float32x4_t _r21 = _r20_02461357.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1); float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1); float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2); float32x4x2_t _r30_02461357 = vld2q_f32(r3); float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8); float32x4_t _r3_8101214 = _r30nx2.val[0]; float32x4_t _r3_9111315 = _r30nx2.val[1]; float32x4_t _r30 = _r30_02461357.val[0]; float32x4_t _r31 = _r30_02461357.val[1]; float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1); float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1); float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2); float32x4x2_t _r40_02461357 = vld2q_f32(r4); float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8); float32x4_t _r4_8101214 = _r40nx2.val[0]; float32x4_t _r4_9111315 = _r40nx2.val[1]; float32x4_t _r40 = _r40_02461357.val[0]; float32x4_t _r41 = _r40_02461357.val[1]; float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1); float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1); float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); vst1q_f32(outptr, _sum); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( // "veor q15, q15 \n"// _sump3 = 0; // "veor q13, q13 \n"// _sump2 = 0; // "veor q14, q14 \n"// _sump3 = 0; "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15 "pld [%1, #128] \n" "0: \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr "vext.32 q12, q8, q10, #1 \n"// q12 = 2 4 6 8 "vext.32 q11, q9, q11, #1 \n"// q11 = 3 5 7 9 "vext.32 q10, q8, q10, #2 \n"// q10 = 4 6 8 10 "vmla.f32 q7, q8, %e14[0] \n" "vmul.f32 q13, q9, %e14[1] \n" "pld [%3, #256] \n" "vmul.f32 q14, q12, %f14[0] \n" "vmul.f32 q15, q11, %f14[1] \n" "vmla.f32 q7, q10, %e15[0] \n" "vld2.f32 {d16-d19}, [%3]! \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q9, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q12, %f15[1] \n" "vmla.f32 q15, q11, %e16[0] \n" "vmla.f32 q7, q10, %e16[1] \n" "vld2.f32 {d16-d19}, [%4]! \n" "pld [%4, #256] \n" "vld2.f32 {d20-d23}, [%4] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q9, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q12, %e17[0] \n" "vmla.f32 q15, q11, %e17[1] \n" "vmla.f32 q7, q10, %f17[0] \n" "vld2.f32 {d16-d19}, [%5]! \n" "pld [%5, #256] \n" "vld2.f32 {d20-d23}, [%5] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q9, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q12, %e18[1] \n" "vmla.f32 q15, q11, %f18[0] \n" "vmla.f32 q7, q10, %f18[1] \n" "vld2.f32 {d16-d19}, [%6]! \n" "pld [%6, #256] \n" "vld2.f32 {d20-d23}, [%6] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q9, %e19[1] \n" "vmla.f32 q14, q12, %f19[0] \n" "vmla.f32 q15, q11, %f19[1] \n" "vmla.f32 q7, q10, %e20[0] \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7 "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" // "veor q15, q15 \n"// _sump3 = 0; // "veor q13, q13 \n"// _sump2 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15 // "veor q14, q14 \n"// _sump3 = 0; "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); sum += r0[4] * k0[4]; sum += r1[4] * k1[4]; sum += r2[4] * k2[4]; sum += r3[4] * k3[4]; sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } } }
main.c
#include "omp.h" #include "conf_graphics.h" #include "conf_tree_gfx.h" #include "tree.h" #include "tree_gfx.h" #include <stdlib.h> #include <time.h> #include "evolution.h" #include "conf_evolution.h" #include "misc.h" #include <stdint.h> #define NUM_THREADS 6 #define NUM_GEN 4 int main(int argc, char **argv) { bool run; char *tree_genome[EVO_UNITS_ON_GENERATION]; treenode_t *tree[2][EVO_UNITS_ON_GENERATION]; int i = 0, buffer = 0, generation = 0, num_threads; //float fitness[EVO_UNITS_ON_GENERATION]; float *fitness = malloc(sizeof(float) * EVO_UNITS_ON_GENERATION); float fitness_mean; #ifdef __SDL__ SDL_Event event; #endif // init GFX_init(); srand(0); for (i = 0; i < EVO_UNITS_ON_GENERATION; i++) { tree[0][i] = NULL; tree[1][i] = NULL; } // generate initial population for (i = 0; i < EVO_UNITS_ON_GENERATION; i++) { tree_genome[i] = malloc(sizeof(char) * EVO_INITIAL_NUM_BRANCHES); EVO_get_random_genome(tree_genome[i], EVO_INITIAL_NUM_BRANCHES); tree_init(&tree[0][i]); tree_build(tree[0][i], &tree_genome[i]); } if (argc > 1) { num_threads = atoi(argv[1]); } else { num_threads = 4; } omp_set_num_threads(num_threads); // main loop run = true; #pragma omp parallel firstprivate(generation) { for (generation = 0; generation < NUM_GEN && run; generation++) { #ifdef __SDL__ #pragma omp single { // ======= EVOLVE ====== printf("======= Generation %d\n", generation); while (SDL_PollEvent(&event)) { if (event.type == SDL_QUIT) { run = false; break; } } } #endif // fitness fitness_mean = 0; #pragma omp for private(i) reduction(+:fitness_mean) for (i = 0; i < EVO_UNITS_ON_GENERATION; i++) { if (run) { fitness[i] = EVO_fitness(tree[buffer][i], false); fitness_mean += fitness[i]; } } #pragma omp single { for (i = 0; i < EVO_UNITS_ON_GENERATION; i++) printf("tree[%d] fitness %f\n", i, fitness[i]); // sort by fitness EVO_sort_by_fitness(fitness, &tree[buffer][0]); // crossover of fittest in other buffer EVO_crossover_on_generation(&tree[!buffer][0], &tree[buffer][0]); MISC_gen_rand(); } // mutate trees #pragma omp for private(i) for (i = 0; i < EVO_UNITS_ON_GENERATION; i++) EVO_mutate(tree[!buffer][i], i); #pragma omp single { printf( "Generation mean fitness %f\n", fitness_mean / EVO_UNITS_ON_GENERATION); buffer = !buffer; } } } #ifdef __SDL__ GFX_Clear(GFX_WHITE); TREEGFX_draw( tree[!buffer][0], SCREEN_WIDTH / 2, SCREEN_HEIGHT - 100, 0, 0 ); GFX_Present(); run = true; while (run) while (SDL_PollEvent(&event)) { if (event.type == SDL_QUIT) { run = false; break; } } #endif for (i = 0; i < EVO_UNITS_ON_GENERATION; i++) { if (tree[0][i] != NULL) { tree_free(&tree[0][i]); } if (tree[1][i] != NULL) { tree_free(&tree[1][i]); } } //free(tree_genome); #ifdef __SDL__ SDL_Quit(); #endif return 0; }
GB_unop__atanh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atanh_fc64_fc64) // op(A') function: GB (_unop_tran__atanh_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = catanh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = catanh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = catanh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATANH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atanh_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = catanh (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = catanh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atanh_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tiger_fmt_plug.c
/* Tiger cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_tiger; #elif FMT_REGISTERS_H john_register_one(&fmt_tiger); #else #include <string.h> #include "arch.h" #include "sph_tiger.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // OMP_SCALE tuned on core i7 quad core HT // 1 - 235k // 64 - 7723k // 128 - 10311K // 256 - 12043K // 512 - 13543 // 1k - 14256k // 2k - 14860k ** this one chosen // 4k - 15093k // 8k - 14935k // 16k - 14931k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 128 #else #define OMP_SCALE (1024*2) #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "Tiger" #define FORMAT_NAME "" #define FORMAT_TAG "$tiger$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "Tiger 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 24 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tiger_tests[] = { {"3293AC630C13F0245F92BBB1766E16167A4E58492DDE73F3", ""}, {"$tiger$D981F8CB78201A950DCF3048751E441C517FCA1AA55A29F6", "message digest"}, {"$tiger$a90197a19d2872ed8a5d508ba5b42deecf08344cc9f42195", "12346789"}, {"$tiger$4a82b9bb5911e1eccfd27d90584903d568e4f96b4ecf0d97", "UPPERCASE"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (strlen(p) != BINARY_SIZE * 2) return 0; while(*p) if(atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + BINARY_SIZE*2 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE*2 + 1); strupr(out + TAG_LENGTH); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_tiger_context ctx; sph_tiger_init(&ctx); sph_tiger(&ctx, saved_key[index], strlen(saved_key[index])); sph_tiger_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void tiger_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_tiger = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, tiger_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, tiger_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
Example_copyprivate.3.c
/* * @@name: copyprivate.3c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ #include <stdio.h> #include <stdlib.h> #include <omp.h> omp_lock_t *new_lock() { omp_lock_t *lock_ptr; #pragma omp single copyprivate(lock_ptr) { lock_ptr = (omp_lock_t *) malloc(sizeof(omp_lock_t)); omp_init_lock( lock_ptr ); } return lock_ptr; }
GB_binop__pow_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_fp32 // A.*B function (eWiseMult): GB_AemultB__pow_fp32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_fp32 // C+=b function (dense accum): GB_Cdense_accumb__pow_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_fp32 // C=scalar+B GB_bind1st__pow_fp32 // C=scalar+B' GB_bind1st_tran__pow_fp32 // C=A+scalar GB_bind2nd__pow_fp32 // C=A'+scalar GB_bind2nd_tran__pow_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = GB_powf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_powf (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP32 || GxB_NO_POW_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pow_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = GB_powf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = GB_powf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = GB_powf (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = GB_powf (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fac_restrict2.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * OpenMP Problems * * Are private static arrays a problem? * ******************************************************************************/ /****************************************************************************** * FAC composite level restriction. * Injection away from the refinement patches; constant restriction * inside patch. ******************************************************************************/ #include "_hypre_sstruct_ls.h" #include "fac.h" #define MapCellRank(i, j , k, rank) \ { \ rank = 4*k + 2*j + i; \ } #define InverseMapCellRank(rank, stencil) \ { \ HYPRE_Int ij,ii,jj,kk; \ ij = (rank%4); \ ii = (ij%2); \ jj = (ij-ii)/2; \ kk = (rank-2*jj-ii)/4; \ hypre_SetIndex3(stencil, ii, jj, kk); \ } /*-------------------------------------------------------------------------- * hypre_FacSemiRestrictData data structure *--------------------------------------------------------------------------*/ typedef struct { HYPRE_Int nvars; hypre_Index stride; hypre_SStructPVector *fgrid_cvectors; /* the grid of this vector may not be on the actual grid */ hypre_BoxArrayArray **identity_arrayboxes; hypre_BoxArrayArray **fullwgt_ownboxes; hypre_BoxArrayArray **fullwgt_sendboxes; HYPRE_Int ***own_cboxnums; /* local crs boxnums of ownboxes */ hypre_CommPkg **interlevel_comm; /* hypre_CommPkg **intralevel_comm;*/ /* may need to build an intra comm so that each processor only fullwts its own fine data- may need to add contrib */ } hypre_FacSemiRestrictData2; /*-------------------------------------------------------------------------- * hypre_FacSemiRestrictCreate *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacSemiRestrictCreate2( void **fac_restrict_vdata_ptr) { HYPRE_Int ierr = 0; hypre_FacSemiRestrictData2 *fac_restrict_data; fac_restrict_data = hypre_CTAlloc(hypre_FacSemiRestrictData2, 1); *fac_restrict_vdata_ptr = (void *) fac_restrict_data; return ierr; } /*-------------------------------------------------------------------------- * hypre_FacSemiRestrictSetup: * Two types of communication are needed- one for the interlevel coarsened * fine boxes, and the other for the ghostlayer of the restricted vector. * * Approach: Identity away from the patches & fullweighting in a patch. * Since a fbox may not have the desired mapping * fbox= [a_0, a_1, a_2]x [b_0, b_1, b_2], a_i= c_i*rfactor[i] * b_i= f_i*rfactor[i] + g_i * with g_i= (rfactor[i]-1), attention must be paid to what the own_boxes, * send_boxes, and recv_boxes are. These map overlap. The reason: * myproc fullwgts what it can or equivalently, gets the restriction * contributions of its data. Some off_procs can compute the remaining * part of the agglomerate belonging to myproc and communicate it to myproc. * Hence, myproc's own_boxes contains these nodes as well as myproc's * recv_boxes. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacSemiRestrictSetup2( void *fac_restrict_vdata, hypre_SStructVector *r, HYPRE_Int part_crse, HYPRE_Int part_fine, hypre_SStructPVector *rc, hypre_Index rfactors ) { HYPRE_Int ierr = 0; hypre_FacSemiRestrictData2 *fac_restrict_data = (hypre_FacSemiRestrictData2 *)fac_restrict_vdata; MPI_Comm comm= hypre_SStructPVectorComm(rc); hypre_CommInfo *comm_info; hypre_CommPkg **interlevel_comm; hypre_SStructPVector *rf= hypre_SStructVectorPVector(r, part_fine); hypre_StructVector *s_rc, *s_cvector; hypre_SStructPGrid *pgrid; hypre_SStructPVector *fgrid_cvectors; hypre_SStructPGrid *fgrid_coarsen; hypre_BoxArrayArray **identity_arrayboxes; hypre_BoxArrayArray **fullwgt_ownboxes; hypre_BoxArrayArray **fullwgt_sendboxes; hypre_BoxArray *boxarray; hypre_BoxArray *tmp_boxarray, *intersect_boxes; HYPRE_Int ***own_cboxnums; hypre_BoxArrayArray **send_boxes, *send_rboxes; HYPRE_Int ***send_processes; HYPRE_Int ***send_remote_boxnums; hypre_BoxArrayArray **recv_boxes, *recv_rboxes; HYPRE_Int ***recv_processes; HYPRE_Int ***recv_remote_boxnums; hypre_BoxManager *boxman; hypre_BoxManEntry **boxman_entries; HYPRE_Int nboxman_entries; hypre_Box box, scaled_box; hypre_Index zero_index, index, ilower, iupper; HYPRE_Int ndim= hypre_SStructVectorNDim(r); HYPRE_Int myproc, proc; HYPRE_Int nvars, vars; HYPRE_Int num_values; HYPRE_Int i, cnt1, cnt2; HYPRE_Int fi, ci; hypre_BoxInit(&box, ndim); hypre_BoxInit(&scaled_box, ndim); hypre_MPI_Comm_rank(comm, &myproc); hypre_ClearIndex(zero_index); nvars= hypre_SStructPVectorNVars(rc); (fac_restrict_data -> nvars)= nvars; hypre_CopyIndex(rfactors, (fac_restrict_data -> stride)); for (i= ndim; i< 3; i++) { rfactors[i]= 1; } /* work vector for storing the fullweighted fgrid boxes */ hypre_SStructPGridCreate(hypre_SStructPVectorComm(rf), ndim, &fgrid_coarsen); pgrid= hypre_SStructPVectorPGrid(rf); for (vars= 0; vars< nvars; vars++) { boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars)); hypre_ForBoxI(fi, boxarray) { hypre_CopyBox(hypre_BoxArrayBox(boxarray, fi), &box); hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index, rfactors, hypre_BoxIMax(&box)); hypre_SStructPGridSetExtents(fgrid_coarsen, hypre_BoxIMin(&box), hypre_BoxIMax(&box)); } } hypre_SStructPGridSetVariables( fgrid_coarsen, nvars, hypre_SStructPGridVarTypes(pgrid) ); hypre_SStructPGridAssemble(fgrid_coarsen); hypre_SStructPVectorCreate(hypre_SStructPGridComm(fgrid_coarsen), fgrid_coarsen, &fgrid_cvectors); hypre_SStructPVectorInitialize(fgrid_cvectors); hypre_SStructPVectorAssemble(fgrid_cvectors); /* pgrid fgrid_coarsen no longer needed */ hypre_SStructPGridDestroy(fgrid_coarsen); fac_restrict_data -> fgrid_cvectors= fgrid_cvectors; /*-------------------------------------------------------------------------- * boxes that are not underlying a fine box: * * algorithm: subtract all coarsened fine grid boxes that intersect with * this processor's coarse boxes. Note that we cannot loop over all the * coarsened fine boxes and subtract them from the coarse grid since we do * not know if some of the overlying fine boxes belong on another * processor. For each cbox, we get a boxarray of boxes that are not * underlying-> size(identity_arrayboxes[vars])= #cboxes. * * Note that no contraction is needed for the intersect boxes since they * will be subtracted from the cbox. Contraction can erroneously lead * to bigger identity boxes. *--------------------------------------------------------------------------*/ identity_arrayboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); pgrid= hypre_SStructPVectorPGrid(rc); hypre_ClearIndex(index); for (i= 0; i< ndim; i++) { index[i]= rfactors[i]-1; } tmp_boxarray = hypre_BoxArrayCreate(0, ndim); for (vars= 0; vars< nvars; vars++) { boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r), part_fine, vars); boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars)); identity_arrayboxes[vars]= hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray), ndim); hypre_ForBoxI(ci, boxarray) { hypre_CopyBox(hypre_BoxArrayBox(boxarray, ci), &box); hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci)); hypre_StructMapCoarseToFine(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&scaled_box)); hypre_StructMapCoarseToFine(hypre_BoxIMax(&box), index, rfactors, hypre_BoxIMax(&scaled_box)); hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box), hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries); /* all send and coarsened fboxes on this processor are collected */ intersect_boxes= hypre_BoxArrayCreate(0, ndim); for (i= 0; i< nboxman_entries; i++) { hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper); hypre_BoxSetExtents(&box, ilower, iupper); hypre_IntersectBoxes(&box, &scaled_box, &box); hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index, rfactors, hypre_BoxIMax(&box)); hypre_AppendBox(&box, intersect_boxes); } hypre_SubtractBoxArrays(hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci), intersect_boxes, tmp_boxarray); hypre_MinUnionBoxes(hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci)); hypre_TFree(boxman_entries); hypre_BoxArrayDestroy(intersect_boxes); } } hypre_BoxArrayDestroy(tmp_boxarray); fac_restrict_data -> identity_arrayboxes= identity_arrayboxes; /*-------------------------------------------------------------------------- * fboxes that are coarsened. Some will be sent. We create the communication * pattern. For each fbox, we need a boxarray of sendboxes or ownboxes. * * Algorithm: Coarsen each fbox and see which cboxes it intersects using * BoxManIntersect. Cboxes that do not belong on the processor will have * a chunk sent to it. * * Note that no contraction is needed. Contraction can lead to erroneous * send_boxes. *--------------------------------------------------------------------------*/ interlevel_comm= hypre_CTAlloc(hypre_CommPkg *, nvars); fullwgt_sendboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); fullwgt_ownboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); own_cboxnums= hypre_CTAlloc(HYPRE_Int **, nvars); send_boxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); send_processes= hypre_CTAlloc(HYPRE_Int **, nvars); send_remote_boxnums= hypre_CTAlloc(HYPRE_Int **, nvars); pgrid= hypre_SStructPVectorPGrid(rf); for (vars= 0; vars< nvars; vars++) { boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r), part_crse, vars); boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars)); fullwgt_sendboxes[vars]= hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray), ndim); fullwgt_ownboxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray), ndim); own_cboxnums[vars] = hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); send_boxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray), ndim); send_processes[vars] = hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); send_remote_boxnums[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); hypre_ForBoxI(fi, boxarray) { hypre_CopyBox(hypre_BoxArrayBox(boxarray, fi), &box); hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&scaled_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index, rfactors, hypre_BoxIMax(&scaled_box)); hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box), hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries); cnt1= 0; cnt2= 0; for (i= 0; i< nboxman_entries; i++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc); if (proc != myproc) { cnt1++; } else { cnt2++; } } send_processes[vars][fi] = hypre_CTAlloc(HYPRE_Int, cnt1); send_remote_boxnums[vars][fi]= hypre_CTAlloc(HYPRE_Int, cnt1); own_cboxnums[vars][fi] = hypre_CTAlloc(HYPRE_Int, cnt2); cnt1= 0; cnt2= 0; for (i= 0; i< nboxman_entries; i++) { hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper); hypre_BoxSetExtents(&box, ilower, iupper); hypre_IntersectBoxes(&box, &scaled_box, &box); hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc); if (proc != myproc) { hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(fullwgt_sendboxes[vars], fi)); hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(send_boxes[vars], fi)); send_processes[vars][fi][cnt1]= proc; hypre_SStructBoxManEntryGetBoxnum(boxman_entries[i], &send_remote_boxnums[vars][fi][cnt1]); cnt1++; } else { hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(fullwgt_ownboxes[vars], fi)); hypre_SStructBoxManEntryGetBoxnum(boxman_entries[i], &own_cboxnums[vars][fi][cnt2]); cnt2++; } } hypre_TFree(boxman_entries); } /* hypre_ForBoxI(fi, boxarray) */ } /* for (vars= 0; vars< nvars; vars++) */ (fac_restrict_data -> fullwgt_sendboxes)= fullwgt_sendboxes; (fac_restrict_data -> fullwgt_ownboxes)= fullwgt_ownboxes; (fac_restrict_data -> own_cboxnums)= own_cboxnums; /*-------------------------------------------------------------------------- * coarsened fboxes this processor will receive. * * Algorithm: For each cbox on this processor, refine it and find which * processors the refinement belongs in. The processors owning a chunk * are the recv_processors. *--------------------------------------------------------------------------*/ recv_boxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); recv_processes= hypre_CTAlloc(HYPRE_Int **, nvars); /* dummy pointer for CommInfoCreate */ recv_remote_boxnums= hypre_CTAlloc(HYPRE_Int **, nvars); pgrid= hypre_SStructPVectorPGrid(rc); for (vars= 0; vars< nvars; vars++) { boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r), part_fine, vars); boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars)); recv_boxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray), ndim); recv_processes[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); recv_remote_boxnums[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); hypre_ForBoxI(ci, boxarray) { hypre_CopyBox(hypre_BoxArrayBox(boxarray, ci), &box); hypre_StructMapCoarseToFine(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&scaled_box)); hypre_StructMapCoarseToFine(hypre_BoxIMax(&box), index, rfactors, hypre_BoxIMax(&scaled_box)); hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box), hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries); cnt1= 0; for (i= 0; i< nboxman_entries; i++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc); if (proc != myproc) { cnt1++; } } recv_processes[vars][ci]= hypre_CTAlloc(HYPRE_Int, cnt1); recv_remote_boxnums[vars][ci]= hypre_CTAlloc(HYPRE_Int , cnt1); cnt1= 0; for (i= 0; i< nboxman_entries; i++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc); if (proc != myproc) { hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper); hypre_BoxSetExtents(&box, ilower, iupper); hypre_IntersectBoxes(&box, &scaled_box, &box); /* no contracting neede */ hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index, rfactors, hypre_BoxIMax(&box)); hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(recv_boxes[vars], ci)); recv_processes[vars][ci][cnt1]= proc; cnt1++; } /* if (proc != myproc) */ } /* for (i= 0; i< nmap_entries; i++) */ hypre_TFree(boxman_entries); } /* hypre_ForBoxI(ci, boxarray) */ } /* for (vars= 0; vars< nvars; vars++) */ num_values= 1; for (vars= 0; vars< nvars; vars++) { s_rc = hypre_SStructPVectorSVector(rc, vars); s_cvector= hypre_SStructPVectorSVector(fgrid_cvectors, vars); send_rboxes= hypre_BoxArrayArrayDuplicate(send_boxes[vars]); recv_rboxes= hypre_BoxArrayArrayDuplicate(recv_boxes[vars]); hypre_CommInfoCreate(send_boxes[vars], recv_boxes[vars], send_processes[vars], recv_processes[vars], send_remote_boxnums[vars], recv_remote_boxnums[vars], send_rboxes, recv_rboxes, 1, &comm_info); hypre_CommPkgCreate(comm_info, hypre_StructVectorDataSpace(s_cvector), hypre_StructVectorDataSpace(s_rc), num_values, NULL, 0, hypre_StructVectorComm(s_rc), &interlevel_comm[vars]); hypre_CommInfoDestroy(comm_info); } hypre_TFree(send_boxes); hypre_TFree(recv_boxes); hypre_TFree(send_processes); hypre_TFree(recv_processes); hypre_TFree(send_remote_boxnums); hypre_TFree(recv_remote_boxnums); (fac_restrict_data -> interlevel_comm)= interlevel_comm; return ierr; } HYPRE_Int hypre_FACRestrict2( void * fac_restrict_vdata, hypre_SStructVector * xf, hypre_SStructPVector * xc) { HYPRE_Int ierr = 0; hypre_FacSemiRestrictData2 *restrict_data = (hypre_FacSemiRestrictData2 *)fac_restrict_vdata; hypre_SStructPVector *fgrid_cvectors = restrict_data->fgrid_cvectors; hypre_BoxArrayArray **identity_arrayboxes= restrict_data->identity_arrayboxes; hypre_BoxArrayArray **fullwgt_ownboxes = restrict_data->fullwgt_ownboxes; HYPRE_Int ***own_cboxnums = restrict_data->own_cboxnums; hypre_CommPkg **interlevel_comm= restrict_data-> interlevel_comm; hypre_CommHandle *comm_handle; HYPRE_Int ndim = hypre_SStructVectorNDim(xf); hypre_BoxArrayArray *arrayarray_ownboxes; hypre_IndexRef stride; /* refinement factors */ hypre_StructGrid *fgrid; hypre_BoxArray *fgrid_boxes; hypre_Box *fgrid_box; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_BoxArray *own_boxes; hypre_Box *own_box; HYPRE_Int *boxnums; hypre_Box *xc_temp_dbox; hypre_Box *xf_dbox; hypre_StructVector *xc_temp; hypre_StructVector *xc_var; hypre_StructVector *xf_var; HYPRE_Int xci; HYPRE_Int xfi; HYPRE_Real ***xfp; HYPRE_Real ***xcp; HYPRE_Real ***xcp_temp; hypre_Index loop_size, lindex; hypre_Index start, fbox_size, node_offset; hypre_Index startc; hypre_Index stridec; hypre_Index rfactors; hypre_Index temp_index1, temp_index2; HYPRE_Int fi, ci; HYPRE_Int nvars, var; HYPRE_Int volume_crse_cell; HYPRE_Int i, j, k; HYPRE_Int imax, jmax, kmax; HYPRE_Int icell, jcell, kcell, ijkcell; HYPRE_Real *sum; HYPRE_Real scaling; HYPRE_Int part_crse= 0; HYPRE_Int part_fine= 1; HYPRE_Int num_coarse_cells; /*----------------------------------------------------------------------- * Initialize some things *-----------------------------------------------------------------------*/ stride= (restrict_data -> stride); hypre_ClearIndex(stridec); for (i= 0; i< ndim; i++) { stridec[i]= 1; } hypre_CopyIndex(stride, rfactors); for (i= ndim; i< 3; i++) { rfactors[i]= 1; } volume_crse_cell= 1; for (i= 0; i< ndim; i++) { volume_crse_cell*= rfactors[i]; } /*----------------------------------------------------------------------- * We are assuming the refinement and coarsening have same variable * types. *-----------------------------------------------------------------------*/ nvars= hypre_SStructPVectorNVars(xc); /*----------------------------------------------------------------------- * For each coordinate direction, a fine node can contribute only to the * left or right cell=> only 2 coarse cells per direction. *-----------------------------------------------------------------------*/ num_coarse_cells= 1; for (i= 0; i< ndim; i++) { num_coarse_cells*= 2; } sum= hypre_CTAlloc(HYPRE_Real, num_coarse_cells); /*-------------------------------------------------------------------------- * Scaling for averaging restriction. *--------------------------------------------------------------------------*/ scaling= 1.0; for (i= 0; i< ndim-2; i++) { scaling*= rfactors[0]; } /*----------------------------------------------------------------------- * Initialize the coarse vector to zero. *-----------------------------------------------------------------------*/ hypre_SStructPVectorSetConstantValues(xc, 0.0); /*----------------------------------------------------------------------- * Copy the coarse data: xf[part_crse] -> xc *-----------------------------------------------------------------------*/ hypre_SStructPartialPCopy(hypre_SStructVectorPVector(xf, part_crse), xc, identity_arrayboxes); /*----------------------------------------------------------------------- * Piecewise constant restriction over the refinement patch. * * Initialize the work vector by setting to zero. *-----------------------------------------------------------------------*/ hypre_SStructPVectorSetConstantValues(fgrid_cvectors, 0.0); /*----------------------------------------------------------------------- * Allocate memory for the data pointers. Assuming constant restriction. * We stride through the refinement patch by the refinement factors, and * so we must have pointers to the intermediate fine nodes=> xfp will * be size rfactors[2]*rfactors[1]. Because the fbox may not have the * ideal refinement form, we need to contribute to 2^ndim cells. *-----------------------------------------------------------------------*/ if (ndim > 1) { xcp_temp= hypre_TAlloc(HYPRE_Real **, (ndim-1)); xcp = hypre_TAlloc(HYPRE_Real **, (ndim-1)); for (k= 0; k< (ndim-1); k++) { xcp_temp[k]= hypre_TAlloc(HYPRE_Real *, 2); xcp[k] = hypre_TAlloc(HYPRE_Real *, 2); } } else /* 1d does not really require these HYPRE_Real ptrs */ { xcp_temp = hypre_TAlloc(HYPRE_Real **, 1); xcp = hypre_TAlloc(HYPRE_Real **, 1); xcp_temp[0]= hypre_TAlloc(HYPRE_Real *, 1); xcp[0] = hypre_TAlloc(HYPRE_Real *, 1); } /* memory allocation of xfp is okay for all dimensions */ xfp= hypre_TAlloc(HYPRE_Real **, rfactors[2]); for (k= 0; k< rfactors[2]; k++) { xfp[k]= hypre_TAlloc(HYPRE_Real *, rfactors[1]); } for (var= 0; var< nvars; var++) { xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var); xf_var= hypre_SStructPVectorSVector(hypre_SStructVectorPVector(xf,part_fine), var); fgrid = hypre_StructVectorGrid(xf_var); fgrid_boxes = hypre_StructGridBoxes(fgrid); cgrid = hypre_StructVectorGrid(xc_temp); cgrid_boxes = hypre_StructGridBoxes(cgrid); hypre_ForBoxI(fi, fgrid_boxes) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi); /*-------------------------------------------------------------------- * Get the ptrs for the fine struct_vectors. *--------------------------------------------------------------------*/ xf_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(xf_var), fi); for (k= 0; k< rfactors[2]; k++) { for (j=0; j< rfactors[1]; j++) { hypre_SetIndex3(temp_index1, 0, j, k); xfp[k][j]= hypre_StructVectorBoxData(xf_var, fi) + hypre_BoxOffsetDistance(xf_dbox, temp_index1); } } /*-------------------------------------------------------------------- * Get the ptrs for the coarse struct_vectors. Note that the coarse * work vector is indexed with respect to the local fine box no.'s. * Work vectors were created this way. * Dimensionally dependent. *--------------------------------------------------------------------*/ xc_temp_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_temp), fi); if (ndim > 1) { for (k= 0; k< (ndim-1); k++) { for (j=0; j< 2; j++) { hypre_SetIndex3(temp_index1, 0, j, k); xcp_temp[k][j]= hypre_StructVectorBoxData(xc_temp, fi) + hypre_BoxOffsetDistance(xc_temp_dbox, temp_index1); } } } else /* 1d case */ { hypre_ClearIndex(temp_index1); xcp_temp[0][0]= hypre_StructVectorBoxData(xc_temp, fi) + hypre_BoxOffsetDistance(xc_temp_dbox, temp_index1); } hypre_CopyIndex(hypre_BoxIMin(fgrid_box), start); hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fbox_size); /*-------------------------------------------------------------------- * Adjust "fbox_size" so that this hypre_Index is appropriate for * ndim < 3. * fbox_size= hypre_BoxIMax(fgrid_box)-hypre_BoxIMin(fgrid_box)+1. *--------------------------------------------------------------------*/ for (i= 0; i< 3; i++) { fbox_size[i]-= (start[i]-1); } /*-------------------------------------------------------------------- * The fine intersection box may not be divisible by the refinement * factor. We need to know the remainder to determine which * coarse node gets the restricted values. *--------------------------------------------------------------------*/ hypre_ClearIndex(node_offset); for (i= 0; i< ndim; i++) { node_offset[i]= rfactors[i]-(start[i]%rfactors[i])-1; } hypre_SetIndex3(temp_index2, 0, 0, 0); hypre_StructMapFineToCoarse(start, temp_index2, rfactors, startc); hypre_BoxGetSize(fgrid_box, temp_index1); hypre_StructMapFineToCoarse(temp_index1, temp_index2, rfactors, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, xf_dbox, start, stride, xfi, xc_temp_dbox, startc, stridec, xci); #if 0 /* Are private static arrays a problem? */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xfi,xci,imax,jmax,kmax,k,kcell,j,jcell,i,icell,ijkcell,temp_index2) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop2For(xfi, xci) { /*----------------------------------------------------------------- * Arithmetic average the refinement patch values to get * restricted coarse grid values in an agglomerate; i.e., * piecewise constant restriction. *-----------------------------------------------------------------*/ hypre_BoxLoopGetIndex(lindex); imax= hypre_min( (fbox_size[0]-lindex[0]*stride[0]), rfactors[0] ); jmax= hypre_min( (fbox_size[1]-lindex[1]*stride[1]), rfactors[1] ); kmax= hypre_min( (fbox_size[2]-lindex[2]*stride[2]), rfactors[2] ); for (i= 0; i< num_coarse_cells; i++) { sum[i]= 0.0; } for (k= 0; k< kmax; k++) { kcell= 1; if (k <= node_offset[2]) { kcell= 0; } for (j= 0; j< jmax; j++) { jcell= 1; if (j <= node_offset[1]) { jcell= 0; } for (i= 0; i< imax; i++) { icell= 1; if (i <= node_offset[0]) { icell= 0; } MapCellRank(icell, jcell , kcell, ijkcell); sum[ijkcell]+= xfp[k][j][xfi+i]; } } } /*----------------------------------------------------------------- * Add the compute averages to the correct coarse cell. *-----------------------------------------------------------------*/ for (ijkcell= 0; ijkcell< num_coarse_cells; ijkcell++) { if (sum[ijkcell] != 0.0) { sum[ijkcell]/= scaling; InverseMapCellRank(ijkcell, temp_index2); i= temp_index2[0]; j= temp_index2[1]; k= temp_index2[2]; xcp_temp[k][j][xci+i]+= sum[ijkcell]; } } } hypre_BoxLoop2End(xfi, xci); } /* hypre_ForBoxI(fi, fgrid_boxes) */ } /* for (var= 0; var< nvars; var++)*/ /*------------------------------------------------------------------ * Communicate calculated restricted function over the coarsened * patch. Only actual communicated values will be put in the * coarse vector. *------------------------------------------------------------------*/ for (var= 0; var< nvars; var++) { xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var); xc_var= hypre_SStructPVectorSVector(xc, var); hypre_InitializeCommunication(interlevel_comm[var], hypre_StructVectorData(xc_temp), hypre_StructVectorData(xc_var), 0, 0, &comm_handle); hypre_FinalizeCommunication(comm_handle); } /*------------------------------------------------------------------ * Need to add the coarsened patches that belong on this processor * to the coarse vector. *------------------------------------------------------------------*/ for (var= 0; var< nvars; var++) { xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var); xc_var= hypre_SStructPVectorSVector(xc, var); cgrid = hypre_StructVectorGrid(xc_temp); cgrid_boxes = hypre_StructGridBoxes(cgrid); arrayarray_ownboxes= fullwgt_ownboxes[var]; hypre_ForBoxI(ci, cgrid_boxes) { xc_temp_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_temp), ci); xcp_temp[0][0]= hypre_StructVectorBoxData(xc_temp, ci); /*-------------------------------------------------------------- * Each ci box of cgrid_box has a boxarray of subboxes. Copy * each of these subboxes to the coarse vector. *--------------------------------------------------------------*/ own_boxes= hypre_BoxArrayArrayBoxArray(arrayarray_ownboxes, ci); boxnums = own_cboxnums[var][ci]; hypre_ForBoxI(i, own_boxes) { own_box= hypre_BoxArrayBox(own_boxes, i); xf_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_var), boxnums[i]); xcp[0][0]= hypre_StructVectorBoxData(xc_var, boxnums[i]); hypre_BoxGetSize(own_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, xc_temp_dbox, hypre_BoxIMin(own_box), stridec, xfi, xf_dbox, hypre_BoxIMin(own_box), stridec, xci); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xfi,xci) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xfi, xci) { xcp[0][0][xci]+= xcp_temp[0][0][xfi]; } hypre_BoxLoop2End(xfi, xci); } /* hypre_ForBoxI(i, own_boxes) */ } /* hypre_ForBoxI(ci, cgrid_boxes) */ } /* for (var= 0; var< nvars; var++) */ hypre_TFree(sum); for (k= 0; k< rfactors[2]; k++) { hypre_TFree(xfp[k]); } hypre_TFree(xfp); if (ndim > 1) { for (k= 0; k< (ndim-1); k++) { hypre_TFree(xcp_temp[k]); hypre_TFree(xcp[k]); } } else { hypre_TFree(xcp_temp[0]); hypre_TFree(xcp[0]); } hypre_TFree(xcp_temp); hypre_TFree(xcp); return ierr; } /*-------------------------------------------------------------------------- * hypre_FacSemiRestrictDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacSemiRestrictDestroy2( void *fac_restrict_vdata ) { HYPRE_Int ierr = 0; hypre_FacSemiRestrictData2 *fac_restrict_data = (hypre_FacSemiRestrictData2 *)fac_restrict_vdata; HYPRE_Int nvars; HYPRE_Int i, j; if (fac_restrict_data) { nvars= (fac_restrict_data-> nvars); hypre_SStructPVectorDestroy(fac_restrict_data-> fgrid_cvectors); for (i= 0; i< nvars; i++) { hypre_BoxArrayArrayDestroy((fac_restrict_data -> identity_arrayboxes)[i]); hypre_BoxArrayArrayDestroy((fac_restrict_data -> fullwgt_sendboxes)[i]); for (j= 0; j< hypre_BoxArrayArraySize(fac_restrict_data->fullwgt_ownboxes[i]); j++) { hypre_TFree((fac_restrict_data -> own_cboxnums)[i][j]); } hypre_TFree((fac_restrict_data -> own_cboxnums)[i]); hypre_BoxArrayArrayDestroy((fac_restrict_data -> fullwgt_ownboxes)[i]); hypre_CommPkgDestroy((fac_restrict_data -> interlevel_comm)[i]); } hypre_TFree(fac_restrict_data -> identity_arrayboxes); hypre_TFree(fac_restrict_data -> fullwgt_sendboxes); hypre_TFree(fac_restrict_data -> own_cboxnums); hypre_TFree(fac_restrict_data -> fullwgt_ownboxes); hypre_TFree(fac_restrict_data -> interlevel_comm); hypre_TFree(fac_restrict_data); } return ierr; }
GB_binop__ge_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_int8 // A.*B function (eWiseMult): GB_AemultB__ge_int8 // A*D function (colscale): GB_AxD__ge_int8 // D*A function (rowscale): GB_DxB__ge_int8 // C+=B function (dense accum): GB_Cdense_accumB__ge_int8 // C+=b function (dense accum): GB_Cdense_accumb__ge_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_int8 // C=scalar+B GB_bind1st__ge_int8 // C=scalar+B' GB_bind1st_tran__ge_int8 // C=A+scalar GB_bind2nd__ge_int8 // C=A'+scalar GB_bind2nd_tran__ge_int8 // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_INT8 || GxB_NO_GE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ge_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sequences.c
#include "sequences.h" /* preprocess_db function preprocess the database sequences named input_filename. The preprocessed database filenames start with out_filename. */ void preprocess_db (char * input_filename, char * out_filename, int n_procs) { unsigned long int sequences_count=0, D=0, disp, accum, chunk_size, i, j, k; unsigned short int *sequences_lengths=NULL, * title_lengths=NULL, length=0, tmp_length, ok; char ** sequences=NULL, **titles=NULL, buffer[BUFFER_SIZE], filename[BUFFER_SIZE], * bin_filename, * res, *tmp_seq, *b=NULL, diff, new_line='\n'; FILE * sequences_file, *titles_file, *info_file, * bin_file; int max_title_length; double tick= dwalltime(); // open dabatase sequence filename sequences_file = fopen(input_filename,"r"); if (sequences_file == NULL) { printf("SWIMM: An error occurred while opening input sequence file.\n"); exit(2); } // Allocate memory for sequences_lengths array sequences_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int)); title_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int)); // Calculate number of sequences in database and its lengths sequences_count=0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { length = 0; // read title while (strrchr(buffer,new_line) == NULL) { length += strlen(buffer); res = fgets(buffer,BUFFER_SIZE,sequences_file); } title_lengths[sequences_count] = length + strlen(buffer) + 1; // read sequence length = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while ((res != NULL) && (buffer[0] != '>')) { length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } sequences_lengths[sequences_count] = length; (sequences_count)++; if ((sequences_count) % ALLOCATION_CHUNK == 0) { sequences_lengths = (unsigned short int *) realloc(sequences_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int)); title_lengths = (unsigned short int *) realloc(title_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int)); } } // Allocate memory for sequences array sequences = (char **) malloc(sequences_count*sizeof(char *)); if (sequences == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); } for (i=0; i<sequences_count; i++ ) { sequences[i] = (char *) malloc(sequences_lengths[i]*sizeof(char)); if (sequences[i] == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } } // Rewind sequences database file rewind(sequences_file); // Read sequences from the database file and load them in sequences array i = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { // read title while (strrchr(buffer,new_line) == NULL) res = fgets(buffer,BUFFER_SIZE,sequences_file); // read sequence length = 1; res = fgets(buffer,BUFFER_SIZE,sequences_file); while ((res != NULL) && (buffer[0] != '>')) { //printf("%s %d\n",buffer,strlen(buffer)); strncpy(sequences[i]+(length-1),buffer,strlen(buffer)-1); length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } i++; } // Rewind sequences database file rewind(sequences_file); // Allocate memory for titles array titles = (char **) malloc(sequences_count*sizeof(char *)); if (titles == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); } for (i=0; i<sequences_count; i++ ) { titles[i] = (char *) malloc(title_lengths[i]*sizeof(char)); if (titles[i] == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); } } // calculate max title length max_title_length = 0; for (i=0; i<sequences_count ; i++) max_title_length = (max_title_length > title_lengths[i] ? max_title_length : title_lengths[i]); // free memory free(title_lengths); // read sequence headers i = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { // discard sequences while ((res != NULL) && (buffer[0] != '>')) res = fgets(buffer,BUFFER_SIZE,sequences_file); if (res != NULL){ // read header length = 1; do{ strncpy(titles[i]+(length-1),buffer,strlen(buffer)-1); length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } while (strrchr(buffer,new_line) == NULL); titles[i][length] = '\0'; i++; } } // Close sequences database file fclose(sequences_file); // Sort sequence array by length sort_sequences(sequences,titles,sequences_lengths, sequences_count, n_procs); // Create titles file: this text file contains the sequences description sprintf(filename,"%s.desc",out_filename); titles_file = fopen(filename,"w"); if (titles_file == NULL) { printf("SWIMM: An error occurred while opening sequence header file.\n"); exit(2); } // write titles for (i=0; i<sequences_count ; i++) fprintf(titles_file,"%s\n",titles[i]); // close titles file fclose(titles_file); // calculate total number of residues #pragma omp parallel for reduction(+:D) num_threads(n_procs) for (i=0; i< sequences_count; i++ ) D = D + sequences_lengths[i]; // transform bidimensional sequence array to a unidimensional one b = (char *) malloc(D*sizeof(char)); if (b == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); } disp = 0; for (i=0; i< sequences_count; i++ ) { memcpy(b+disp,sequences[i],sequences_lengths[i]); disp += sequences_lengths[i]; } // Free memory for (i=0; i< sequences_count; i++ ) free(sequences[i]); free(sequences); // preprocess vect sequences DB // original alphabet: 'A'..'Z' => preprocessed alphabet: 0..24 (J, O and U are replaced with dummy symbol) #pragma omp parallel for private(diff) num_threads(n_procs) schedule(dynamic) for (i=0; i< D; i++) { b[i] = ((b[i] == 'J') ? DUMMY_ELEMENT : b[i]); b[i] = ((b[i] == 'O') ? DUMMY_ELEMENT : b[i]); b[i] = ((b[i] == 'U') ? DUMMY_ELEMENT : b[i]); diff = 'A'; diff = (b[i] > 'J' ? diff+1 : diff); diff = (b[i] > 'O' ? diff+1 : diff); diff = (b[i] > 'U' ? diff+1 : diff); b[i] -= diff; } // Create info file: this file contains sequences count, number of residues and the maximum title length sprintf(filename,"%s.info",out_filename); info_file = fopen(filename,"w"); if (info_file == NULL) { printf("SWIMM: An error occurred while opening info file.\n"); exit(2); } // Write info fprintf(info_file,"%ld %ld %d",sequences_count,D,max_title_length); // close info file fclose(info_file); // Create sequences binary file: this file contains first the sequences lengths and then the preprocessed sequences residues sprintf(filename,"%s.seq",out_filename); bin_file = fopen(filename,"wb"); if (bin_file == NULL) { printf("SWIMM: An error occurred while opening sequence file.\n"); exit(2); } // Write vectorized sequences lengths fwrite(sequences_lengths,sizeof(unsigned short int),sequences_count,bin_file); //Write sequences fwrite(b,sizeof(char),D,bin_file); // Close bin file fclose(bin_file); // free memory free(sequences_lengths); free(b); printf("\nSWIMM v%s\n\n",VERSION); printf("Database file:\t\t\t %s\n",input_filename); printf("Database size:\t\t\t%ld sequences (%ld residues) \n",sequences_count,D); printf("Preprocessed database name:\t%s\n",out_filename); printf("Preprocessing time:\t\t%lf seconds\n\n",dwalltime()-tick); } // Load query sequence from file in a void load_query_sequences(char * queries_filename, char ** ptr_query_sequences, char *** ptr_query_headers, unsigned short int **ptr_query_sequences_lengths, unsigned short int **ptr_m, unsigned long int * query_sequences_count, unsigned long int * ptr_Q, unsigned int ** ptr_query_sequences_disp, int n_procs) { long int i, j, k; unsigned long int sequences_count=0, Q=0, disp, accum, chunk_size; unsigned int * sequences_disp; unsigned short int *sequences_lengths, *m, * title_lengths, *tmp, length=0, tmp_length, ok; char ** sequences=NULL, **titles, buffer[BUFFER_SIZE], filename[BUFFER_SIZE], * bin_filename, * res, *tmp_seq, *a, diff, new_line='\n'; FILE * sequences_file; // open query sequence filename sequences_file = fopen(queries_filename,"r"); if (sequences_file == NULL) { printf("SWIMM: An error occurred while opening input sequence file.\n"); exit(2); } // Allocate memory for sequences_lengths array sequences_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int)); title_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int)); // Calculate number of sequences in database and its lengths sequences_count=0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { length = 0; // read title while (strrchr(buffer,new_line) == NULL) { length += strlen(buffer); res = fgets(buffer,BUFFER_SIZE,sequences_file); } title_lengths[sequences_count] = length + strlen(buffer) + 1; // read sequence length = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while ((res != NULL) && (buffer[0] != '>')) { length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } sequences_lengths[sequences_count] = length; (sequences_count)++; if ((sequences_count) % ALLOCATION_CHUNK == 0) { sequences_lengths = (unsigned short int *) realloc(sequences_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int)); title_lengths = (unsigned short int *) realloc(title_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int)); } } // copy lengths to aligned buffer tmp = sequences_lengths; m = (unsigned short int *) _mm_malloc (sequences_count*sizeof(unsigned short int), MEMALIGN); sequences_lengths = (unsigned short int *) _mm_malloc (sequences_count*sizeof(unsigned short int), MEMALIGN); memcpy(m,tmp,sequences_count*sizeof(unsigned short int)); memcpy(sequences_lengths,tmp,sequences_count*sizeof(unsigned short int)); free(tmp); // Allocate memory for sequences array sequences = (char **) malloc(sequences_count*sizeof(char *)); if (sequences == NULL) { printf("SWIMM: An error occurred while allocating memory for query sequences.\n"); exit(1); } for (i=0; i<sequences_count; i++ ) { sequences[i] = (char *) malloc(sequences_lengths[i]*sizeof(char)); if (sequences[i] == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } } // Rewind sequences database file rewind(sequences_file); // Read sequences from the database file and load them in sequences array i = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { // read title while (strrchr(buffer,new_line) == NULL) res = fgets(buffer,BUFFER_SIZE,sequences_file); // read sequence length = 1; res = fgets(buffer,BUFFER_SIZE,sequences_file); while ((res != NULL) && (buffer[0] != '>')) { //printf("%s %d\n",buffer,strlen(buffer)); strncpy(sequences[i]+(length-1),buffer,strlen(buffer)-1); length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } i++; } // Rewind sequences database file rewind(sequences_file); // Allocate memory for titles array titles = (char **) malloc(sequences_count*sizeof(char *)); if (titles == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); } for (i=0; i<sequences_count; i++ ) { titles[i] = (char *) malloc(title_lengths[i]*sizeof(char)); if (titles[i] == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); } } i = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { // discard sequences while ((res != NULL) && (buffer[0] != '>')) res = fgets(buffer,BUFFER_SIZE,sequences_file); if (res != NULL){ // read header length = 1; do{ strncpy(titles[i]+(length-1),buffer,strlen(buffer)-1); length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } while (strrchr(buffer,new_line) == NULL); titles[i][length] = '\0'; i++; } } // Close sequences database file fclose(sequences_file); // Sort sequence array by length sort_sequences(sequences,titles,sequences_lengths, sequences_count, n_procs); // calculate total number of residues #pragma omp parallel for reduction(+:Q) num_threads(n_procs) for (i=0; i< sequences_count; i++ ) Q = Q + (ceil( (double) sequences_lengths[i] / (double) QUERY_SEQ_LEN_MULT) * QUERY_SEQ_LEN_MULT); *ptr_Q = Q; a = (char *) _mm_malloc(Q*sizeof(char), MEMALIGN); if (a == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); } disp = 0; for (i=0; i< sequences_count; i++ ) { // copy query sequence memcpy(a+disp,sequences[i],sequences_lengths[i]); // if length is not multiple of QUERY_SEQ_LEN_MULT, then make it multiple and copy dummy element at last position tmp_length = ceil( (double) sequences_lengths[i] / (double) QUERY_SEQ_LEN_MULT) * QUERY_SEQ_LEN_MULT; for (j=sequences_lengths[i]; j<tmp_length; j++) a[disp+j]=DUMMY_ELEMENT; // update query length m[i] = tmp_length; // update disp disp += m[i]; } // process vect sequences DB #pragma omp parallel for private(diff) num_threads(n_procs) schedule(dynamic) for (i=0; i< Q; i++) { a[i] = ((a[i] == 'J') ? DUMMY_ELEMENT : a[i]); a[i] = ((a[i] == 'O') ? DUMMY_ELEMENT : a[i]); a[i] = ((a[i] == 'U') ? DUMMY_ELEMENT : a[i]); diff = 'A'; diff = (a[i] > 'J' ? diff+1 : diff); diff = (a[i] > 'O' ? diff+1 : diff); diff = (a[i] > 'U' ? diff+1 : diff); a[i] -= diff; } // Calculate displacement for current sequences db sequences_disp = (unsigned int *) _mm_malloc((sequences_count+1)*sizeof(unsigned int), MEMALIGN); sequences_disp[0] = 0; for (i=1; i < sequences_count+1; i++) sequences_disp[i] = sequences_disp[i-1] + m[i-1]; *ptr_query_sequences = a; *ptr_query_sequences_lengths = sequences_lengths; *ptr_m = m; *ptr_query_sequences_disp = sequences_disp; *ptr_query_headers = titles; *query_sequences_count = sequences_count; // Free memory for (i=0; i< sequences_count; i++ ) free(sequences[i]); free(sequences); free(title_lengths); } void assemble_single_chunk_db (char * sequences_filename, int vector_length, unsigned long int * sequences_count, unsigned long int * D, unsigned short int * sequences_db_max_length, int * max_title_length, unsigned long int * vect_sequences_db_count, unsigned long int * vD, char **ptr_vect_sequences_db, unsigned short int ** ptr_vect_sequences_db_lengths, unsigned short int ** ptr_vect_sequences_db_blocks, unsigned long int ** ptr_vect_sequences_db_disp, int n_procs, int block_width) { char ** sequences, *s, filename[200], ** sequences_db_headers, *header, *b; unsigned short int * vect_sequences_lengths, * sequences_lengths, * vect_sequences_blocks; unsigned long int i, j, k, accum, aux_vD=0, *vect_sequences_disp; FILE * sequences_file, * info_file; // Open info file sprintf(filename,"%s.info",sequences_filename); info_file = fopen(filename,"r"); if (info_file == NULL) { printf("SWIMM: An error occurred while opening info file.\n"); exit(2); } fscanf(info_file,"%ld %ld %d",sequences_count,D,max_title_length); fclose(info_file); // Open sequences file sprintf(filename,"%s.seq",sequences_filename); sequences_file = fopen(filename,"rb"); if (sequences_file == NULL) { printf("SWIMM: An error occurred while opening info file.\n"); exit(2); } // Read sequences lengths sequences_lengths = (unsigned short int *) malloc((*sequences_count)*sizeof(unsigned short int)); fread(sequences_lengths,sizeof(unsigned short int),*sequences_count,sequences_file); // Read sequences s = (char *) malloc((*D)*sizeof(char)); fread(s,sizeof(char),*D,sequences_file); fclose(sequences_file); sequences = (char **) malloc((*sequences_count)*sizeof(char *)); sequences[0] = s; for (i=1; i<*sequences_count ; i++) sequences[i] = sequences[i-1] + sequences_lengths[i-1]; // calculate vect_sequences_count *vect_sequences_db_count = ceil( (double) (*sequences_count) / (double) vector_length); // Allocate memory for vect_sequences_lengths vect_sequences_lengths = (unsigned short int *) _mm_malloc((*vect_sequences_db_count)*sizeof(unsigned short int),MEMALIGN); if (vect_sequences_lengths == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } vect_sequences_disp = (unsigned long int *) _mm_malloc((*vect_sequences_db_count+1)*sizeof(unsigned long int),MEMALIGN); if (vect_sequences_disp == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } vect_sequences_blocks = (unsigned short int *) _mm_malloc((*vect_sequences_db_count)*sizeof(unsigned short int),MEMALIGN); if (vect_sequences_blocks == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } // calculate values for vect_sequences_lengths array for (i=0; i< *vect_sequences_db_count - 1; i++ ) vect_sequences_lengths[i] = sequences_lengths[(i+1)*vector_length-1]; vect_sequences_lengths[*vect_sequences_db_count-1] = sequences_lengths[*sequences_count-1]; // make length multiple of 4 to allow 32/64 bytes aligned data for (i=0; i< *vect_sequences_db_count; i++ ) vect_sequences_lengths[i] = ceil( (double) vect_sequences_lengths[i] / (double) DB_SEQ_LEN_MULT) * DB_SEQ_LEN_MULT; for (i=0; i< *vect_sequences_db_count; i++ ) vect_sequences_blocks[i] = ceil( (double) vect_sequences_lengths[i] / (double) block_width); #pragma omp parallel for reduction(+:aux_vD) num_threads(n_procs) for (i=0; i< *vect_sequences_db_count; i++ ) aux_vD = aux_vD + vect_sequences_lengths[i]*vector_length; *vD = aux_vD; b = (char *) _mm_malloc((*vD)*sizeof(char),MEMALIGN); // Calculate displacement for current sequences db vect_sequences_disp[0] = 0; for (k=1; k < *vect_sequences_db_count+1; k++) vect_sequences_disp[k] = vect_sequences_disp[k-1] + (vect_sequences_lengths[k-1]*vector_length); // Copy sequences db to host buffers reordering elements to get better locality when computing alignments for (i=0; i < *vect_sequences_db_count-1; i++) { for (j=0; j< vect_sequences_lengths[i]; j++ ) { for (k=0;k< vector_length; k++) if (j < sequences_lengths[i*vector_length+k]) *(b+vect_sequences_disp[i]+(j*vector_length)+k) = sequences[i*vector_length+k][j]; else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } } //rest = sequences_count % vector_length; for (i=*vect_sequences_db_count-1, j=0; j< vect_sequences_lengths[i]; j++ ) { for (k=0;k< vector_length; k++) if (i*vector_length+k < *sequences_count){ if (j < sequences_lengths[i*vector_length+k]) *(b+vect_sequences_disp[i]+(j*vector_length)+k) = sequences[i*vector_length+k][j]; else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } *ptr_vect_sequences_db = b; *ptr_vect_sequences_db_lengths = vect_sequences_lengths; *ptr_vect_sequences_db_blocks = vect_sequences_blocks; *ptr_vect_sequences_db_disp = vect_sequences_disp; *sequences_db_max_length = sequences_lengths[*sequences_count-1]; free(s); free(sequences); free(sequences_lengths); } void load_database_headers (char * sequences_filename, unsigned long int sequences_count, int max_title_length, char *** ptr_sequences_db_headers) { char ** sequences_db_headers, filename[200], * header; FILE * header_file; unsigned long int i; // Load sequence headers // Open header file sprintf(filename,"%s.desc",sequences_filename); header_file = fopen(filename,"r"); if (header_file == NULL) { printf("SWIMM: An error occurred while opening sequence description file.\n"); exit(3); } // Read sequences lengths sequences_db_headers = (char **) malloc(sequences_count*sizeof(char *)); header = (char *) malloc((max_title_length+1)*sizeof(char)); for (i=0; i<sequences_count; i++){ fgets(header,max_title_length,header_file); sequences_db_headers[i] = (char *) malloc((strlen(header)+1)*sizeof(char)); strcpy(sequences_db_headers[i],header); } fclose(header_file); free(header); *ptr_sequences_db_headers = sequences_db_headers; } void merge_sequences(char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size) { unsigned long int i1 = 0; unsigned long int i2 = size / 2; unsigned long int it = 0; // allocate memory for temporary buffers char ** tmp1 = (char **) malloc(size*sizeof(char *)); char ** tmp2 = (char **) malloc(size*sizeof(char *)); unsigned short int * tmp3 = (unsigned short int *) malloc (size*sizeof(unsigned short int)); while(i1 < size/2 && i2 < size) { if (sequences_lengths[i1] <= sequences_lengths[i2]) { tmp1[it] = sequences[i1]; tmp2[it] = titles[i1]; tmp3[it] = sequences_lengths[i1]; i1++; } else { tmp1[it] = sequences[i2]; tmp2[it] = titles[i2]; tmp3[it] = sequences_lengths[i2]; i2 ++; } it ++; } while (i1 < size/2) { tmp1[it] = sequences[i1]; tmp2[it] = titles[i1]; tmp3[it] = sequences_lengths[i1]; i1++; it++; } while (i2 < size) { tmp1[it] = sequences[i2]; tmp2[it] = titles[i2]; tmp3[it] = sequences_lengths[i2]; i2++; it++; } memcpy(sequences, tmp1, size*sizeof(char *)); memcpy(titles, tmp2, size*sizeof(char *)); memcpy(sequences_lengths, tmp3, size*sizeof(unsigned short int)); free(tmp1); free(tmp2); free(tmp3); } void mergesort_sequences_serial (char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size) { char * tmp_seq; unsigned short int tmp_seq_len; if (size == 2) { if (sequences_lengths[0] > sequences_lengths[1]) { // swap sequences tmp_seq = sequences[0]; sequences[0] = sequences[1]; sequences[1] = tmp_seq; // swap titles tmp_seq = titles[0]; titles[0] = titles[1]; titles[1] = tmp_seq; // swap sequences lengths tmp_seq_len = sequences_lengths[0]; sequences_lengths[0] = sequences_lengths[1]; sequences_lengths[1] = tmp_seq_len; return; } } else { if (size > 2){ mergesort_sequences_serial(sequences, titles, sequences_lengths, size/2); mergesort_sequences_serial(sequences + size/2, titles + size/2, sequences_lengths + size/2, size - size/2); merge_sequences(sequences, titles, sequences_lengths, size); } } } void sort_sequences (char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size, int threads) { if ( threads == 1) { mergesort_sequences_serial(sequences, titles, sequences_lengths, size); } else if (threads > 1) { #pragma omp parallel sections { #pragma omp section sort_sequences(sequences, titles, sequences_lengths, size/2, threads/2); #pragma omp section sort_sequences(sequences + size/2, titles + size/2, sequences_lengths + size/2, size-size/2, threads-threads/2); } merge_sequences(sequences, titles, sequences_lengths, size); } // threads > 1 } void load_tuning_query_sequence (char ** ptr_tun_query_sequences, unsigned short int ** ptr_tun_query_sequence_lengths, unsigned int ** ptr_tun_query_sequence_disps) { unsigned short int * tun_query_sequence_lengths, lengths[TUNING_QUERY_COUNT] = {189, 375, 567, 729, 1000, 2005, 3005, 4061, 4743, 5478}; // unsigned short int * tun_query_sequence_lengths, lengths[TUNING_QUERY_COUNT] = {189, 567, 1000, 3005, 4743, 5478}; unsigned int * tun_query_sequence_disps, i, tunQ=0; char * tun_query_sequences; tun_query_sequence_lengths = _mm_malloc(TUNING_QUERY_COUNT*sizeof(unsigned short int), MEMALIGN); tun_query_sequence_disps = _mm_malloc((TUNING_QUERY_COUNT+1)*sizeof(unsigned int), MEMALIGN); // adapt query sequence length for (i=0; i < TUNING_QUERY_COUNT ; i++) { tun_query_sequence_lengths[i] = (lengths[i] / QUERY_SEQ_LEN_MULT) * QUERY_SEQ_LEN_MULT; tunQ += tun_query_sequence_lengths[i]; } tun_query_sequences = _mm_malloc(tunQ*sizeof(char), MEMALIGN); // generate synthetic sequences for (i=0; i < tunQ ; i++) tun_query_sequences[i] = rand() % SUBMAT_ROWS; // complete disps tun_query_sequence_disps[0] = 0; for (i=1; i <= TUNING_QUERY_COUNT ; i++) tun_query_sequence_disps[i] = tun_query_sequence_disps[i-1] + tun_query_sequence_lengths[i-1]; *ptr_tun_query_sequences = tun_query_sequences; *ptr_tun_query_sequence_lengths = tun_query_sequence_lengths; *ptr_tun_query_sequence_disps = tun_query_sequence_disps; } void assemble_tuning_chunk_db (char ** ptr_tun_vect_db_sequences, unsigned short int ** ptr_tun_vect_db_sequences_lengths, unsigned short int ** ptr_tun_vect_db_sequences_blocks, unsigned long int ** ptr_tun_vect_db_sequences_disp, unsigned long int * ptr_tun_vect_db_sequences_count) { char *tun_vect_db_sequences; unsigned short int * tun_vect_db_sequences_lengths, * tun_vect_db_sequences_blocks, tun_db_seq_length; unsigned long int i, j, k, tun_vect_db_sequences_count, tun_db_seq_size, tun_db_seq_count, *tun_vect_db_sequences_disp; // adapt length of synthetic sequences tun_db_seq_length = (TUNING_DB_SEQ_LENGTH / DB_SEQ_LEN_MULT) * DB_SEQ_LEN_MULT; // calculate synthetic db seq size and count tun_db_seq_size = (TUNING_DB_SEQ_SIZE / tun_db_seq_length) * tun_db_seq_length; tun_db_seq_count = (tun_db_seq_size / tun_db_seq_length); tun_db_seq_count = (tun_db_seq_count / VECTOR_LENGTH) * VECTOR_LENGTH; // calculate synthetic vect db sequences count tun_vect_db_sequences_count = tun_db_seq_count / VECTOR_LENGTH; /* printf("\n db seq len %u",tun_db_seq_length); printf("\n db seq size %lu",tun_db_seq_size); printf("\n db seq count %lu",tun_db_seq_count); printf("\n vect db seq count %lu",tun_vect_db_sequences_count);*/ // allocate memory for buffers tun_vect_db_sequences = _mm_malloc(tun_db_seq_size*sizeof(char), MEMALIGN); tun_vect_db_sequences_lengths = _mm_malloc(tun_vect_db_sequences_count*sizeof(unsigned short int), MEMALIGN); tun_vect_db_sequences_blocks = _mm_malloc(tun_vect_db_sequences_count*sizeof(unsigned short int), MEMALIGN); tun_vect_db_sequences_disp = _mm_malloc((tun_vect_db_sequences_count+1)*sizeof(unsigned long int), MEMALIGN); // generate synthetic sequences for (i=0; i < tun_db_seq_size ; i++) tun_vect_db_sequences[i] = rand() % SUBMAT_ROWS; // complete lengths for (i=0; i < tun_vect_db_sequences_count ; i++) tun_vect_db_sequences_lengths[i] = tun_db_seq_length; // complete disps tun_vect_db_sequences_disp[0] = 0; for (i=1; i <= tun_vect_db_sequences_count ; i++) tun_vect_db_sequences_disp[i] = tun_vect_db_sequences_disp[i-1] + tun_vect_db_sequences_lengths[i-1]*VECTOR_LENGTH; *ptr_tun_vect_db_sequences = tun_vect_db_sequences; *ptr_tun_vect_db_sequences_lengths = tun_vect_db_sequences_lengths; *ptr_tun_vect_db_sequences_blocks = tun_vect_db_sequences_blocks; *ptr_tun_vect_db_sequences_disp = tun_vect_db_sequences_disp; *ptr_tun_vect_db_sequences_count = tun_vect_db_sequences_count; }
ch_common.c
#define MAIN #include "ch_common.h" #include "cholesky.h" #include "../timing.h" #if (defined(DEBUG) || defined(USE_TIMING)) _Atomic int cnt_pdotrf = 0; _Atomic int cnt_trsm = 0; _Atomic int cnt_gemm = 0; _Atomic int cnt_syrk = 0; #endif #if defined(USE_TIMING) void helper_start_timing(int tt) { if (tt == TIME_POTRF) { cnt_pdotrf++; } else if (tt == TIME_TRSM) { cnt_trsm++; } else if (tt == TIME_GEMM) { cnt_gemm++; } else if (tt == TIME_SYRK) { cnt_syrk++; } } void helper_end_timing(int tt, double elapsed) { __timing[THREAD_NUM].ts[tt] += elapsed; } #endif static void get_block_rank(int *block_rank, int nt); #ifdef CHAMELEON_TARGET #pragma omp declare target #endif void omp_potrf(double * SPEC_RESTRICT const A, int ts, int ld) { #ifdef TRACE static int event_potrf = -1; if(event_potrf == -1) { char* event_name = "potrf"; int ierr; ierr = VT_funcdef(event_name, VT_NOCLASS, &event_potrf); } VT_begin(event_potrf); #endif #if (defined(DEBUG) || defined(USE_TIMING)) START_TIMING(TIME_POTRF); #endif static int INFO; static const char L = 'L'; dpotrf_(&L, &ts, A, &ld, &INFO); #if (defined(DEBUG) || defined(USE_TIMING)) END_TIMING(TIME_POTRF); #endif #ifdef TRACE VT_end(event_potrf); #endif } #ifdef CHAMELEON_TARGET #pragma omp end declare target #pragma omp declare target #endif void omp_trsm(double * SPEC_RESTRICT A, double * SPEC_RESTRICT B, int ts, int ld) { #ifdef TRACE static int event_trsm = -1; if(event_trsm == -1) { char* event_name = "trsm"; int ierr; ierr = VT_funcdef(event_name, VT_NOCLASS, &event_trsm); } VT_begin(event_trsm); #endif #if (defined(DEBUG) || defined(USE_TIMING)) START_TIMING(TIME_TRSM); #endif static char LO = 'L', TR = 'T', NU = 'N', RI = 'R'; static double DONE = 1.0; dtrsm_(&RI, &LO, &TR, &NU, &ts, &ts, &DONE, A, &ld, B, &ld ); #if (defined(DEBUG) || defined(USE_TIMING)) END_TIMING(TIME_TRSM); #endif #ifdef TRACE VT_end(event_trsm); #endif } #ifdef CHAMELEON_TARGET #pragma omp end declare target #pragma omp declare target #endif void omp_gemm(double * SPEC_RESTRICT A, double * SPEC_RESTRICT B, double * SPEC_RESTRICT C, int ts, int ld) { #ifdef TRACE static int event_gemm = -1; if(event_gemm == -1) { char* event_name = "gemm"; int ierr; ierr = VT_funcdef(event_name, VT_NOCLASS, &event_gemm); } VT_begin(event_gemm); #endif #if (defined(DEBUG) || defined(USE_TIMING)) START_TIMING(TIME_GEMM); #endif static const char TR = 'T', NT = 'N'; static double DONE = 1.0, DMONE = -1.0; dgemm_(&NT, &TR, &ts, &ts, &ts, &DMONE, A, &ld, B, &ld, &DONE, C, &ld); #if (defined(DEBUG) || defined(USE_TIMING)) END_TIMING(TIME_GEMM); #endif #ifdef TRACE VT_end(event_gemm); #endif } #ifdef CHAMELEON_TARGET #pragma omp end declare target #pragma omp declare target #endif void omp_syrk(double * SPEC_RESTRICT A, double * SPEC_RESTRICT B, int ts, int ld) { #ifdef TRACE static int event_syrk = -1; if(event_syrk == -1) { char* event_name = "syrk"; int ierr; ierr = VT_funcdef(event_name, VT_NOCLASS, &event_syrk); } VT_begin(event_syrk); #endif #if (defined(DEBUG) || defined(USE_TIMING)) START_TIMING(TIME_SYRK); #endif static char LO = 'L', NT = 'N'; static double DONE = 1.0, DMONE = -1.0; dsyrk_(&LO, &NT, &ts, &ts, &DMONE, A, &ld, &DONE, B, &ld ); #if (defined(DEBUG) || defined(USE_TIMING)) END_TIMING(TIME_SYRK); #endif #ifdef TRACE VT_end(event_syrk); #endif } #ifdef CHAMELEON_TARGET #pragma omp end declare target #endif void cholesky_single(const int ts, const int nt, double* A[nt][nt]) { // speed up "serial" verification with only a single rank #if !defined(OMPSS_VER) #pragma omp parallel { #pragma omp single { #endif for (int k = 0; k < nt; k++) { #pragma omp task depend(out: A[k][k]) { omp_potrf(A[k][k], ts, ts); #ifdef DEBUG if (mype == 0) printf("potrf:out:A[%d][%d]\n", k, k); #endif } for (int i = k + 1; i < nt; i++) { #pragma omp task depend(in: A[k][k]) depend(out: A[k][i]) { omp_trsm(A[k][k], A[k][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("trsm :in:A[%d][%d]:out:A[%d][%d]\n", k, k, k, i); #endif } } for (int i = k + 1; i < nt; i++) { for (int j = k + 1; j < i; j++) { #pragma omp task depend(in: A[k][i], A[k][j]) depend(out: A[j][i]) { omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("gemm :in:A[%d][%d]:A[%d][%d]:out:A[%d][%d]\n", k, i, k, j, j, i); #endif } } #pragma omp task depend(in: A[k][i]) depend(out: A[i][i]) { omp_syrk(A[k][i], A[i][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("syrk :in:A[%d][%d]:out:A[%d][%d]\n", k, i, i, i); #endif } } } #pragma omp taskwait #if !defined(OMPSS_VER) } } #endif } inline void wait(MPI_Request *comm_req) { #ifdef TRACE static int event_wait = -1; if(event_wait == -1) { char* event_name = "wait"; int ierr; ierr = VT_funcdef(event_name, VT_NOCLASS, &event_wait); } VT_begin(event_wait); #endif int comm_comp = 0; #ifdef DISABLE_TASKYIELD MPI_Wait(comm_req, MPI_STATUS_IGNORE); #else MPI_Test(comm_req, &comm_comp, MPI_STATUS_IGNORE); while (!comm_comp) { #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) chameleon_taskyield(); #else #pragma omp taskyield #endif MPI_Test(comm_req, &comm_comp, MPI_STATUS_IGNORE); } #endif #ifdef TRACE VT_end(event_wait); #endif } inline void reset_send_flags(char *send_flags) { for (int i = 0; i < np; i++) send_flags[i] = 0; } inline int get_send_flags(char *send_flags, int *block_rank, int itr1_str, int itr1_end, int itr2_str, int itr2_end, int n) { int send_cnt = 0; for (int i = itr1_str; i <= itr1_end; i++) { for (int j = itr2_str; j <= itr2_end; j++) { if (!send_flags[block_rank[i*n+j]]) { send_flags[block_rank[i*n+j]] = 1; send_cnt++; } } } return send_cnt; } inline void get_recv_flag(char *recv_flag, int *block_rank, int itr1_str, int itr1_end, int itr2_str, int itr2_end, int n) { if (*recv_flag == 1) return; for (int i = itr1_str; i <= itr1_end; i++) { for (int j = itr2_str; j <= itr2_end; j++) { if (block_rank[i*n+j] == mype) { *recv_flag = 1; return; } } } } int main(int argc, char *argv[]) { /* MPI Initialize */ int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); if (provided != MPI_THREAD_MULTIPLE) { fprintf(stderr, "This Compiler does not support MPI_THREAD_MULTIPLE\n"); exit(0); } MPI_Comm_rank(MPI_COMM_WORLD, &mype); MPI_Comm_size(MPI_COMM_WORLD, &np); /* cholesky init */ const char *result[3] = {"n/a","successful","UNSUCCESSFUL"}; const double eps = BLAS_dfpinfo(blas_eps); if (argc < 4) { printf("cholesky matrix_size block_size check\n"); exit(-1); } const int n = atoi(argv[1]); // matrix size const int ts = atoi(argv[2]); // tile size int check = atoi(argv[3]); // check result? const int nt = n / ts; if (mype == 0) printf("R#%d, n = %d, nt = %d, ts = %d\n", mype, n, nt, ts); /* Set block rank */ int *block_rank = malloc(nt * nt * sizeof(int)); get_block_rank(block_rank, nt); #if (defined(DEBUG) || defined(USE_TIMING)) if (mype == 0) { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { printf("%d ", block_rank[i * nt + j]); } printf("\n"); } } MPI_Barrier(MPI_COMM_WORLD); // calculate how many tiles are assigned to the speicifc ranks and how many diagonals int nr_tiles = 0; int nr_tiles_diag = 0; for (int i = 0; i < nt; i++) { for (int j = i; j < nt; j++) { if(block_rank[i * nt + j] == mype) { nr_tiles++; if(i == j) nr_tiles_diag++; } } } printf("[%d] has %d tiles in total and %d tiles on the diagonal\n", mype, nr_tiles, nr_tiles_diag); #endif double * SPEC_RESTRICT A[nt][nt], * SPEC_RESTRICT B, * SPEC_RESTRICT C[nt], * SPEC_RESTRICT Ans[nt][nt]; #if !defined(OMPSS_VER) #pragma omp parallel { #pragma omp single #endif { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { #pragma omp task depend(out: A[i][j]) shared(Ans, A) { if (check) { MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &Ans[i][j]); initialize_tile(ts, Ans[i][j]); } if (block_rank[i*nt+j] == mype) { MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &A[i][j]); if (!check) { initialize_tile(ts, A[i][j]); } else { for (int k = 0; k < ts * ts; k++) { A[i][j][k] = Ans[i][j][k]; } } } } } #pragma omp task depend(inout: A[i][i]) shared(Ans, A) { // add to diagonal if (check) { Ans[i][i][ts/2*ts+ts/2] = (double)nt; } if (block_rank[i*nt+i] == mype) { A[i][i][ts/2*ts+ts/2] = (double)nt; } } } } // omp single #if !defined(OMPSS_VER) } // omp parallel #endif B = (double*) malloc(ts * ts * sizeof(double)); for (int i = 0; i < nt; i++) { C[i] = (double*) malloc(ts * ts * sizeof(double)); } #if !defined(OMPSS_VER) #pragma omp parallel #pragma omp single #endif num_threads = omp_get_num_threads(); INIT_TIMING(num_threads); RESET_TIMINGS(num_threads); const float t3 = get_time(); if (check) cholesky_single(ts, nt, (double* (*)[nt]) Ans); const float t4 = get_time() - t3; MPI_Barrier(MPI_COMM_WORLD); RESET_TIMINGS(num_threads); if (mype == 0) printf("Starting parallel computation\n"); const float t1 = get_time(); cholesky_mpi(ts, nt, (double* SPEC_RESTRICT (*)[nt])A, B, C, block_rank); const float t2 = get_time() - t1; if (mype == 0) printf("Finished parallel computation\n"); MPI_Barrier(MPI_COMM_WORLD); /* Verification */ if (check) { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { if (block_rank[i * nt + j] == mype) { for (int k = 0; k < ts*ts; k++) { // if (Ans[i][j][k] != A[i][j][k]) check = 2; if (Ans[i][j][k] != A[i][j][k]) { check = 2; printf("Rank R#%2d: A[%4d][%4d][%4d] --> Expected: %11.5f Value: %11.5f Diff: %11.5f\n", mype, i,j,k, Ans[i][j][k], A[i][j][k], abs(Ans[i][j][k]-A[i][j][k])); break; } } } if(check == 2) break; } if(check == 2) break; } } float time_mpi = t2; float gflops_mpi = (((1.0 / 3.0) * n * n * n) / ((time_mpi) * 1.0e+9)); float time_ser = t4; float gflops_ser = (((1.0 / 3.0) * n * n * n) / ((time_ser) * 1.0e+9)); if(mype == 0 || check == 2) printf("test:%s-%d-%d-%d:mype:%2d:np:%2d:threads:%2d:result:%s:gflops:%f:time:%f:gflops_ser:%f:time_ser:%f\n", argv[0], n, ts, num_threads, mype, np, num_threads, result[check], gflops_mpi, t2, gflops_ser, t4); #if (defined(DEBUG) || defined(USE_TIMING)) printf("[%d] count#pdotrf:%d:count#trsm:%d:count#gemm:%d:count#syrk:%d\n", mype, cnt_pdotrf, cnt_trsm, cnt_gemm, cnt_syrk); #endif for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { if (block_rank[i*nt+j] == mype) { free(A[i][j]); } if (check) free(Ans[i][j]); } free(C[i]); } free(B); free(block_rank); MPI_Finalize(); return 0; } static void get_block_rank(int *block_rank, int nt) { int row, col; row = col = np; if (np != 1) { while (1) { row = row / 2; if (row * col == np) break; col = col / 2; if (row * col == np) break; } } if (mype == 0) printf("row = %d, col = %d\n", row, col); int i, j, tmp_rank = 0, offset = 0; for (i = 0; i < nt; i++) { for (j = 0; j < nt; j++) { block_rank[i*nt + j] = tmp_rank + offset; tmp_rank++; if (tmp_rank >= col) tmp_rank = 0; } tmp_rank = 0; offset = (offset + col >= np) ? 0 : offset + col; } }
GB_unop__conj_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__conj_fc32_fc32) // op(A') function: GB (_unop_tran__conj_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = conjf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = conjf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = conjf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CONJ || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__conj_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = conjf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = conjf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__conj_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_bool_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_int64) // op(A') function: GB (_unop_tran__identity_bool_int64) // C type: bool // A type: int64_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_int64) ( bool *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
oskar_auto_correlate_omp.c
/* * Copyright (c) 2015-2018, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "correlate/private_correlate_functions_inline.h" #include "correlate/oskar_auto_correlate_omp.h" #include "math/oskar_add_inline.h" #include "math/oskar_kahan_sum.h" #ifdef __cplusplus extern "C" { #endif /* Single precision. */ void oskar_auto_correlate_omp_f(const int num_sources, const int num_stations, const float4c* jones, const float* source_I, const float* source_Q, const float* source_U, const float* source_V, float4c* vis) { int s; #pragma omp parallel for private(s) for (s = 0; s < num_stations; ++s) { int i; float4c m1, m2, sum, guard; const float4c *const jones_station = &jones[s * num_sources]; OSKAR_CLEAR_COMPLEX_MATRIX(float, sum) OSKAR_CLEAR_COMPLEX_MATRIX(float, guard) for (i = 0; i < num_sources; ++i) { /* Construct source brightness matrix. */ OSKAR_CONSTRUCT_B(float, m2, source_I[i], source_Q[i], source_U[i], source_V[i]) /* Multiply first Jones matrix with source brightness matrix. */ OSKAR_LOAD_MATRIX(m1, jones_station[i]) OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(float2, m1, m2); /* Multiply result with second (Hermitian transposed) Jones matrix. */ OSKAR_LOAD_MATRIX(m2, jones_station[i]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(float2, m1, m2); /* Accumulate. */ OSKAR_KAHAN_SUM_COMPLEX_MATRIX(float, sum, m1, guard) } /* Blank non-Hermitian values. */ sum.a.y = 0.0f; sum.d.y = 0.0f; OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(vis[s], sum); } } /* Double precision. */ void oskar_auto_correlate_omp_d(const int num_sources, const int num_stations, const double4c* jones, const double* source_I, const double* source_Q, const double* source_U, const double* source_V, double4c* vis) { int s; #pragma omp parallel for private(s) for (s = 0; s < num_stations; ++s) { int i; double4c m1, m2, sum; const double4c *const jones_station = &jones[s * num_sources]; OSKAR_CLEAR_COMPLEX_MATRIX(double, sum) for (i = 0; i < num_sources; ++i) { /* Construct source brightness matrix. */ OSKAR_CONSTRUCT_B(double, m2, source_I[i], source_Q[i], source_U[i], source_V[i]) /* Multiply first Jones matrix with source brightness matrix. */ OSKAR_LOAD_MATRIX(m1, jones_station[i]) OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(double2, m1, m2); /* Multiply result with second (Hermitian transposed) Jones matrix. */ OSKAR_LOAD_MATRIX(m2, jones_station[i]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(double2, m1, m2); /* Accumulate. */ OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(sum, m1) } /* Blank non-Hermitian values. */ sum.a.y = 0.0; sum.d.y = 0.0; OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(vis[s], sum); } } #ifdef __cplusplus } #endif
SoftmaxLoss.h
#ifndef SOFTMAXLOSS_H #define SOFTMAXLOSS_H #include <Eigen/Dense> #include "multinomial.h" #include "util.h" namespace nplm { // is this cheating? using Eigen::Matrix; using Eigen::MatrixBase; using Eigen::Dynamic; ///// Softmax layer plus log-loss function. enum loss_function_type { LogLoss, NCELoss, InvalidLoss }; inline loss_function_type string_to_loss_function (const std::string &s) { if (s == "log") return LogLoss; else if (s == "nce") return NCELoss; else return InvalidLoss; } inline std::string loss_function_to_string (loss_function_type f) { if (f == LogLoss) return "log"; else if (f == NCELoss) return "nce"; } /// Note: Outputs log-probabilities. struct SoftmaxLogLoss { template <typename DerivedI, typename DerivedW, typename DerivedO> void fProp(const MatrixBase<DerivedI> &input, const MatrixBase<DerivedW> &output_words, const MatrixBase<DerivedO> &output_const, double &loss) { UNCONST(DerivedO, output_const, output); double log_likelihood = 0.0; #pragma omp parallel for reduction(+:log_likelihood) for (int train_id = 0; train_id < input.cols(); train_id++) { double normalization = logsum(input.col(train_id)); output.col(train_id).array() = input.col(train_id).array() - normalization; log_likelihood += output(output_words(train_id), train_id); } loss = log_likelihood; } template <typename DerivedW, typename DerivedO, typename DerivedI> void bProp(const MatrixBase<DerivedW> &output_words, const MatrixBase<DerivedO> &output, const MatrixBase<DerivedI> &grad_input_const) { UNCONST(DerivedI, grad_input_const, grad_input); grad_input.setZero(); #pragma omp parallel for for (int train_id = 0; train_id < output.cols(); train_id++) { grad_input(output_words(train_id), train_id) += 1.; grad_input.col(train_id) -= output.col(train_id).array().exp().matrix(); } } }; ///// Softmax layer plus NCE loss function. ///// Note: Outputs probabilities. ///// Note: Unlike SoftmaxLogLoss, does not compute *or* apply precomputed ///// normalizations. Currently the caller is expected to do normalization. template <typename Multinomial> class SoftmaxNCELoss { const Multinomial &unigram; public: SoftmaxNCELoss(const Multinomial &unigram) : unigram(unigram) { } template <typename DerivedI, typename DerivedW, typename DerivedO> void fProp(const MatrixBase<DerivedI> &scores, const MatrixBase<DerivedW> &minibatch_samples, const MatrixBase<DerivedO> &output_const, double &loss) { UNCONST(DerivedO, output_const, output); double log_likelihood = 0.0; int num_noise_samples = minibatch_samples.rows()-1; double log_num_noise_samples = std::log(num_noise_samples); #pragma omp parallel for reduction(+:log_likelihood) schedule(static) for (int train_id = 0; train_id < scores.cols(); train_id++) { for (int sample_id = 0;sample_id < minibatch_samples.rows(); sample_id++) { int sample = minibatch_samples(sample_id, train_id); // To avoid zero or infinite probabilities, // never take exp of score without normalizing first, // even if it's a little slower... double score = scores(sample_id, train_id); double score_noise = log_num_noise_samples + unigram.logprob(sample); double z = logadd(score, score_noise); double logprob = score - z; double logprob_noise = score_noise - z; output(sample_id, train_id) = std::exp(logprob); log_likelihood += sample_id == 0 ? logprob : logprob_noise; } } loss = log_likelihood; } template <typename DerivedO, typename DerivedI> void bProp(const MatrixBase<DerivedO> &probs, const MatrixBase<DerivedI> &output_const) { UNCONST(DerivedI, output_const, output); #pragma omp parallel for schedule(static) for (int train_id = 0; train_id < probs.cols(); train_id++) { output.col(train_id) = -probs.col(train_id); output(0, train_id) += 1.0; } } }; } // namespace nplm #endif
coordinate_common.h
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #pragma once #include <algorithm> #include <string> #include <utility> #include <vector> #include <limits> #include "xgboost/data.h" #include "xgboost/parameter.h" #include "./param.h" #include "../gbm/gblinear_model.h" #include "../common/random.h" namespace xgboost { namespace linear { struct CoordinateParam : public XGBoostParameter<CoordinateParam> { int top_k; DMLC_DECLARE_PARAMETER(CoordinateParam) { DMLC_DECLARE_FIELD(top_k) .set_lower_bound(0) .set_default(0) .describe("The number of top features to select in 'thrifty' feature_selector. " "The value of zero means using all the features."); } }; /** * \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the * number of training instances. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * \param w The weight. * \param reg_alpha Unnormalised L1 penalty. * \param reg_lambda Unnormalised L2 penalty. * * \return The weight update. */ inline double CoordinateDelta(double sum_grad, double sum_hess, double w, double reg_alpha, double reg_lambda) { if (sum_hess < 1e-5f) return 0.0f; const double sum_grad_l2 = sum_grad + reg_lambda * w; const double sum_hess_l2 = sum_hess + reg_lambda; const double tmp = w - sum_grad_l2 / sum_hess_l2; if (tmp >= 0) { return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w); } else { return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w); } } /** * \brief Calculate update to bias. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * * \return The weight update. */ inline double CoordinateDeltaBias(double sum_grad, double sum_hess) { return -sum_grad / sum_hess; } /** * \brief Get the gradient with respect to a single feature. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to a single feature. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to the bias. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for the bias. */ inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint i = 0; i < ndata; ++i) { auto &p = gpair[i * num_group + group_idx]; if (p.GetHess() >= 0.0f) { sum_grad += p.GetGrad(); sum_hess += p.GetHess(); } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Updates the gradient vector with respect to a change in weight. * * \param fidx The feature index. * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dw The change in weight. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateResidualParallel(int fidx, int group_idx, int num_group, float dw, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dw == 0.0f) return; for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { auto col = batch[fidx]; // update grad value const auto num_row = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < num_row; ++j) { GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0); } } } /** * \brief Updates the gradient vector based on a change in the bias. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dbias The change in bias. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dbias == 0.0f) return; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { GradientPair &g = (*in_gpair)[i * num_group + group_idx]; if (g.GetHess() < 0.0f) continue; g += GradientPair(g.GetHess() * dbias, 0); } } /** * \brief Abstract class for stateful feature selection or ordering * in coordinate descent algorithms. */ class FeatureSelector { public: /*! \brief factory method */ static FeatureSelector *Create(int choice); /*! \brief virtual destructor */ virtual ~FeatureSelector() = default; /** * \brief Setting up the selector state prior to looping through features. * * \param model The model. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * \param param A parameter with algorithm-dependent use. */ virtual void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) {} /** * \brief Select next coordinate to update. * * \param iteration The iteration in a loop through features * \param model The model. * \param group_idx Zero-based index of the group. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * * \return The index of the selected feature. -1 indicates none selected. */ virtual int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) = 0; }; /** * \brief Deterministic selection by cycling through features one at a time. */ class CyclicFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return iteration % model.learner_model_param_->num_feature; } }; /** * \brief Similar to Cyclic but with random feature shuffling prior to each update. * \note Its randomness is controllable by setting a random seed. */ class ShuffleFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { if (feat_index_.size() == 0) { feat_index_.resize(model.learner_model_param_->num_feature); std::iota(feat_index_.begin(), feat_index_.end(), 0); } std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom()); } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return feat_index_[iteration % model.learner_model_param_->num_feature]; } protected: std::vector<bst_uint> feat_index_; }; /** * \brief A random (with replacement) coordinate selector. * \note Its randomness is controllable by setting a random seed. */ class RandomFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return common::GlobalRandom()() % model.learner_model_param_->num_feature; } }; /** * \brief Select coordinate with the greatest gradient magnitude. * \note It has O(num_feature^2) complexity. It is fully deterministic. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). That would reduce the complexity to * O(num_feature*top_k). */ class GreedyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); const bst_uint ngroup = model.learner_model_param_->num_output_group; if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); if (counter_.size() == 0) { counter_.resize(ngroup); gpair_sums_.resize(model.learner_model_param_->num_feature * ngroup); } for (bst_uint gid = 0u; gid < ngroup; ++gid) { counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-K or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.learner_model_param_->num_feature) return -1; const int ngroup = model.learner_model_param_->num_output_group; const bst_omp_uint nfeat = model.learner_model_param_->num_feature; // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); auto &sums = gpair_sums_[group_idx * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + group_idx]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } // Find a feature with the largest magnitude of weight change int best_fidx = 0; double best_weight_update = 0.0f; for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) { auto &s = gpair_sums_[group_idx * nfeat + fidx]; float dw = std::abs(static_cast<bst_float>( CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda))); if (dw > best_weight_update) { best_weight_update = dw; best_fidx = fidx; } } return best_fidx; } protected: bst_uint top_k_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; /** * \brief Thrifty, approximately-greedy feature selector. * * \note Prior to cyclic updates, reorders features in descending magnitude of * their univariate weight changes. This operation is multithreaded and is a * linear complexity approximation of the quadratic greedy selection. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). */ class ThriftyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); const bst_uint ngroup = model.learner_model_param_->num_output_group; const bst_omp_uint nfeat = model.learner_model_param_->num_feature; if (deltaw_.size() == 0) { deltaw_.resize(nfeat * ngroup); sorted_idx_.resize(nfeat * ngroup); counter_.resize(ngroup); gpair_sums_.resize(nfeat * ngroup); } // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { // column-parallel is usually faster than row-parallel #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); for (bst_uint gid = 0u; gid < ngroup; ++gid) { auto &sums = gpair_sums_[gid * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + gid]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } } // rank by descending weight magnitude within the groups std::fill(deltaw_.begin(), deltaw_.end(), 0.f); std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0); bst_float *pdeltaw = &deltaw_[0]; for (bst_uint gid = 0u; gid < ngroup; ++gid) { // Calculate univariate weight changes for (bst_omp_uint i = 0; i < nfeat; ++i) { auto ii = gid * nfeat + i; auto &s = gpair_sums_[ii]; deltaw_[ii] = static_cast<bst_float>(CoordinateDelta( s.first, s.second, model[i][gid], alpha, lambda)); } // sort in descending order of deltaw abs values auto start = sorted_idx_.begin() + gid * nfeat; std::sort(start, start + nfeat, [pdeltaw](size_t i, size_t j) { return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j)); }); counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-N or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.learner_model_param_->num_feature) return -1; // note that sorted_idx stores the "long" indices const size_t grp_offset = group_idx * model.learner_model_param_->num_feature; return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset); } protected: bst_uint top_k_; std::vector<bst_float> deltaw_; std::vector<size_t> sorted_idx_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; inline FeatureSelector *FeatureSelector::Create(int choice) { switch (choice) { case kCyclic: return new CyclicFeatureSelector(); case kShuffle: return new ShuffleFeatureSelector(); case kThrifty: return new ThriftyFeatureSelector(); case kGreedy: return new GreedyFeatureSelector(); case kRandom: return new RandomFeatureSelector(); default: LOG(FATAL) << "unknown coordinate selector: " << choice; } return nullptr; } } // namespace linear } // namespace xgboost
GB_unop__minv_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_fc64_fc64 // op(A') function: GB_unop_tran__minv_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_FC64_minv (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_FC64_minv (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_FC64_minv (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_FC64_minv (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
binTree.h
#ifndef _BINARYTREE_H_ #define _BINARYTREE_H_ #include <mpi.h> #include <cassert> #include <vector> #include <algorithm> #include <fstream> #include <iostream> #include <map> #include <utility> #include <cstring> #include <omp.h> #include "util.h" using namespace std; /* ************************************************** */ class binNode; class binData; typedef binNode* pbinNode; typedef binData* pbinData; /* ************************************************** */ struct treeParams{ int splitter; int hypertree; int debug_verbose; int timing_verbose; int pruning_verbose; int max_points_per_node; int max_tree_level; int min_comm_size_per_node; int flops_verbose; int eval_verbose; int traverse_type; treeParams():splitter(0),hypertree(1),debug_verbose(0),timing_verbose(0),pruning_verbose(0),max_points_per_node(1000),max_tree_level(20),min_comm_size_per_node(1),flops_verbose(0),eval_verbose(0),traverse_type(0){} }; /** Auxiliary data structure to hold point coords (X), their dimension (dim) and global ids. */ class binData { public: vector<double> X; ///< Data point coordinates. int dim; ///< Dimensionality of points. int numof_points; vector<long> gids; ///< global ids of points. vector<long> lids; /// < local ids of points, used by shared memory tree vector<double> radii; ///< Search radii of points (only meaningful if this binData object is a query point set). binData() : dim(0),numof_points(0) {;} //------------- Methods virtual void Copy(pbinData data){ X.resize( data->numof_points * data->dim ); gids.resize( data->numof_points ); dim = data->dim; numof_points = data->numof_points; int npoints = numof_points; #pragma omp parallel if(npoints > 2000) { int omp_num_points, last_omp_num_points; int t = omp_get_thread_num(); int numt = omp_get_num_threads(); omp_num_points = npoints / numt; last_omp_num_points = npoints - (omp_num_points * (numt-1)); //This thread's number of points int threadpoints = (t == numt-1) ? last_omp_num_points : omp_num_points; memcpy( (void*)&(X[t*omp_num_points*dim]), (void*)&(data->X[t*omp_num_points*dim]), threadpoints*dim*sizeof(double) ); memcpy( (void*)&(gids[t*omp_num_points]), (void*)&(data->gids[t*omp_num_points]), threadpoints*sizeof(long) ); } if(data->radii.size()>0){ radii.resize(data->numof_points); #pragma omp parallel if(npoints > 2000) { #pragma omp for //schedule(dynamic,256) for(int i=0; i<npoints; i++) radii[i] = data->radii[i]; } } if(data->lids.size()>0){ lids.resize(data->numof_points); #pragma omp parallel if(npoints > 2000) { #pragma omp for //schedule(dynamic,256) for(int i=0; i<npoints; i++) lids[i] = data->lids[i]; } } } }; /* ************************************************** */ /** * This is the main data structure for the PCL-tree. Each MPI process stores a doubly-linked * list of binNode objects, representing a path from the root to that process's leaf node. */ class binNode { public: pbinData data; int level; MPI_Comm comm; pbinNode parent; pbinNode kid; int Nglobal; // The total number of points stored in or beneath this tree node, across all member processes. //vector<double> matR; // rotation matrix on this level vector<double> rw; // workspace for fast rotation vector<double> proj; int coord_mv; // which coord to use double median; // median of coord_mv //vector<int> cluster_to_kid_membership; vector<int> rank_colors; // Length is equal to size of comm. Stores the child that each MPI rank belongs to. int chid; // This node's child id. struct Options{ string splitter; // splitter type: 0 mtree 1 maxVar int flag_r; // do not rotate (0), rotate on root level (1) or rotate on every level (2) int flag_c; // choose coord randomly (0), or with max variance (1) int hypertree; // repartition points using hypertree(1) or oldtree (0) int debug_verbose; // print extensive information and/or compare results against direct search. int timing_verbose; // print timing. int pruning_verbose; // Print pruning statistics in tree query operations. int flops_verbose; // now it's useful, to be deleted later static const int max_max_treelevel=50; // Maximum allowable value for maxlev. Options() : splitter("rkdt"),hypertree(1),flag_r(1),flag_c(0),flops_verbose(false),debug_verbose(false),timing_verbose(false),pruning_verbose(false) {;} }; Options options; ///< This nodes Options object. //------------- Methods binNode() : data(NULL),level(0),chid(0),comm(MPI_COMM_WORLD),parent(NULL),kid(NULL) {;} binNode(int ci) : data(NULL),level(0),chid(ci),comm(MPI_COMM_WORLD),parent(NULL),kid(NULL) {;} virtual ~binNode(); //void destroy_node(pbinNode inNode); void Insert(pbinNode inParent, int maxp, int maxlev, int minCommSize, MPI_Comm comm, pbinData inData); //void Insert_hypertree(pbinNode inParent, int maxp, int maxlev, int minCommSize, MPI_Comm comm, pbinData inData); //void Insert_oldtree(pbinNode inParent, int maxp, int maxlev, int minCommSize, MPI_Comm comm, pbinData inData); void InsertInMemory(pbinNode in_parent, int maxp, int maxLevel, int minCommSize, MPI_Comm inComm, pbinData inData, binData *datapool, vector<int> &gid2lid); void parvar(double *points, int numof_points, int dim, double *mean, double *var); void maxVarSplitter( double *points, int numof_points, int dim, int flag_c, // output int &coord_mv, double &medV, int* point_to_hyperplane_membership, int *local_numof_points_per_hyperplane, int *global_numof_points_per_hyperplane, MPI_Comm comm); void medianSplitter(// input const vector<double> &px, // output double &medV, int* point_to_hyperplane_membership, int* local_numof_points_per_hyperplane, int* global_numof_points_per_hyperplane, MPI_Comm comm ); void getProjection(double * points, int numof_points, int dim, double *proj, MPI_Comm comm); void furthestPoint(double *points, int numof_points, int dim, double* query, double* furP, MPI_Comm comm); void mtreeSplitter( double *points, int numof_points, int dim, // output double *proj, double &medianValue, int* point_to_hyperplane_membership, int *local_numof_points_per_hyperplane, int *global_numof_points_per_hyperplane, MPI_Comm comm); double distributeSelect(vector<double> &arr, int ks, MPI_Comm comm); }; #endif
omp-parallel-single.c
#include <omp.h> #include <stdio.h> #define LEN 20 int main(void) { int num[LEN] = {0}, k=0; #pragma omp parallel #pragma omp single for (k=0; k<LEN; k++) { num[k] = omp_get_thread_num(); } return 0; }
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "api/libperf.h" #include "lib/libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/sys/sock.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <sys/poll.h> #include <locale.h> #if defined (HAVE_MPI) # include <mpi.h> #elif defined (HAVE_RTE) # include<rte.h> #endif #define MAX_BATCH_FILES 32 #define MAX_CPUS 1024 #define TL_RESOURCE_NAME_NONE "<none>" #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:r:T:d:x:A:BUm:" #define TEST_ID_UNDEFINED -1 enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; const char *overhead_lat; unsigned window_size; } test_type_t; typedef struct perftest_params { ucx_perf_params_t super; int test_id; } perftest_params_t; struct perftest_context { perftest_params_t params; const char *server_addr; int port; int mpi; unsigned num_cpus; unsigned cpus[MAX_CPUS]; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency", "latency", 1}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency", "latency", 1}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / rate", "latency", 1}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / rate", "latency", 1}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / rate", "latency", 1}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate", "overhead", 1}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate", "overhead", 1}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate", "overhead", 1}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "tag match latency", "latency", 1}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag match bandwidth", "overhead", 32}, {"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG, "tag sync match latency", "latency", 1}, {"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag sync match bandwidth", "overhead", 32}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth", "overhead", 32}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add bandwidth / message rate", "overhead", 1}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / bandwidth / rate", "latency", 1}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / bandwidth / rate", "latency", 1}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / bandwidth / rate", "latency", 1}, {"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI, "stream bandwidth", "overhead", 1}, {"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG, "stream latency", "latency", 1}, {NULL} }; static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int), int poll_events, void *data, size_t size, void (*progress)(void *arg), void *arg, const char *name) { size_t total = 0; struct pollfd pfd; int ret; while (total < size) { pfd.fd = sock; pfd.events = poll_events; pfd.revents = 0; ret = poll(&pfd, 1, 1); /* poll for 1ms */ if (ret > 0) { ucs_assert(ret == 1); ucs_assert(pfd.revents & poll_events); ret = sock_call(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("%s() failed: %m", name); return -1; } total += ret; } else if ((ret < 0) && (errno != EINTR)) { ucs_error("poll(fd=%d) failed: %m", sock); return -1; } /* progress user context */ if (progress != NULL) { progress(arg); } } return 0; } static int safe_send(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { typedef ssize_t (*sock_call)(int, void *, size_t, int); return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send"); } static int safe_recv(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv"); } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final, int is_server, int is_multi_thread) { static const char *fmt_csv; static const char *fmt_numeric; static const char *fmt_plain; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } #if _OPENMP if (!final) { printf("[thread %d]", omp_get_thread_num()); } else if (flags & TEST_FLAG_PRINT_RESULTS) { printf("Final: "); } #endif if (is_multi_thread && final) { fmt_csv = "%4.0f,%.3f,%.2f,%.0f\n"; fmt_numeric = "%'18.0f %29.3f %22.2f %'24.0f\n"; fmt_plain = "%18.0f %29.3f %22.2f %23.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.total_average * 1000000.0, result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.total_average); } else { fmt_csv = "%4.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; fmt_numeric = "%'18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %'11.0f %'11.0f\n"; fmt_plain = "%18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %11.0f %11.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.typical * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); } fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *overhead_lat_str; const char *test_data_str; const char *test_api_str; test_type_t *test; unsigned i; test = (ctx->params.test_id == TEST_ID_UNDEFINED) ? NULL : &tests[ctx->params.test_id]; if ((ctx->flags & TEST_FLAG_PRINT_TEST) && (test != NULL)) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.super.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Send memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.send_mem_type]); printf("| Recv memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.recv_mem_type]); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params.super)); } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", ucs_basename(ctx->batch_files[i])); } printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n"); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { overhead_lat_str = (test == NULL) ? "overhead" : test->overhead_lat; printf("+--------------+--------------+-----------------------------+---------------------+-----------------------+\n"); printf("| | | %8s (usec) | bandwidth (MB/s) | message rate (msg/s) |\n", overhead_lat_str); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| Stage | # iterations | typical | average | overall | average | overall | average | overall |\n"); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void print_memory_type_usage(void) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if (ucx_perf_mem_type_allocators[it] != NULL) { printf(" %s - %s\n", ucs_memory_type_names[it], ucs_memory_type_descs[it]); } } } static void usage(const struct perftest_context *ctx, const char *program) { static const char* api_names[] = { [UCX_PERF_API_UCT] = "UCT", [UCX_PERF_API_UCP] = "UCP" }; test_type_t *test; int UCS_V_UNUSED rank; #ifdef HAVE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (ctx->mpi && (rank != 0)) { return; } #endif #if defined (HAVE_MPI) printf(" Note: test can be also launched as an MPI application\n"); printf("\n"); #elif defined (HAVE_RTE) printf(" Note: this test can be also launched as an libRTE application\n"); printf("\n"); #endif printf(" Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); printf(" Common options:\n"); printf(" -t <test> test to run:\n"); for (test = tests; test->name; ++test) { printf(" %13s - %s %s\n", test->name, api_names[test->api], test->desc); } printf("\n"); printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n", ctx->params.super.msg_size_list[0]); printf(" for example: \"-s 16,48,8192,8192,14\"\n"); printf(" -m <send mem type>[,<recv mem type>]\n"); printf(" memory type of message for sender and receiver (host)\n"); print_memory_type_usage(); printf(" -n <iters> number of iterations to run (%"PRIu64")\n", ctx->params.super.max_iter); printf(" -w <iters> number of warm-up iterations (%"PRIu64")\n", ctx->params.super.warmup_iter); printf(" -c <cpulist> set affinity to this CPU list (separated by comma) (off)\n"); printf(" -O <count> maximal number of uncompleted outstanding sends\n"); printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n", ctx->params.super.iov_stride); printf(" -T <threads> number of threads in the test (%d)\n", ctx->params.super.thread_count); printf(" -o do not progress the responder in one-sided tests\n"); printf(" -B register memory with NONBLOCK flag\n"); printf(" -b <file> read and execute tests from a batch file: every line in the\n"); printf(" file is a test to run, first word is test name, the rest of\n"); printf(" the line is command-line arguments for the test.\n"); printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port); #ifdef HAVE_MPI printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -h show this help message\n"); printf("\n"); printf(" Output format:\n"); printf(" -N use numeric formatting (thousands separator)\n"); printf(" -f print only final numbers\n"); printf(" -v print CSV-formatted output\n"); printf("\n"); printf(" UCT only:\n"); printf(" -d <device> device to use for testing\n"); printf(" -x <tl> transport to use for testing\n"); printf(" -D <layout> data layout for sender side:\n"); printf(" short - short messages (default, cannot be used for get)\n"); printf(" bcopy - copy-out (cannot be used for atomics)\n"); printf(" zcopy - zero-copy (cannot be used for atomics)\n"); printf(" iov - scatter-gather list (iovec)\n"); printf(" -W <count> flow control window size, for active messages (%u)\n", ctx->params.super.uct.fc_window); printf(" -H <size> active message header size (%zu)\n", ctx->params.super.am_hdr_size); printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n"); printf(" thread_spinlock - separate progress thread with spin locking\n"); printf(" thread_mutex - separate progress thread with mutex locking\n"); printf(" signal - signal-based timer\n"); printf("\n"); printf(" UCP only:\n"); printf(" -M <thread> thread support level for progress engine (single)\n"); printf(" single - only the master thread can access\n"); printf(" serialized - one thread can access at a time\n"); printf(" multi - multiple threads can access\n"); printf(" -D <layout>[,<layout>]\n"); printf(" data layout for sender and receiver side (contig)\n"); printf(" contig - Continuous datatype\n"); printf(" iov - Scatter-gather list\n"); printf(" -C use wild-card tag for tag tests\n"); printf(" -U force unexpected flow by using tag probe\n"); printf(" -r <mode> receive mode for stream tests (recv)\n"); printf(" recv : Use ucp_stream_recv_nb\n"); printf(" recv_data : Use ucp_stream_recv_data_nb\n"); printf("\n"); printf(" NOTE: When running UCP tests, transport and device should be specified by\n"); printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n"); printf("\n"); } static ucs_status_t parse_ucp_datatype_params(const char *opt_arg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(opt_arg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(opt_arg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_mem_type(const char *opt_arg, ucs_memory_type_t *mem_type) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if(!strcmp(opt_arg, ucs_memory_type_names[it]) && (ucx_perf_mem_type_allocators[it] != NULL)) { *mem_type = it; return UCS_OK; } } ucs_error("Unsupported memory type: \"%s\"", opt_arg); return UCS_ERR_INVALID_PARAM; } static ucs_status_t parse_mem_type_params(const char *opt_arg, ucs_memory_type_t *send_mem_type, ucs_memory_type_t *recv_mem_type) { const char *delim = ","; char *token = strtok((char*)opt_arg, delim); if (UCS_OK != parse_mem_type(token, send_mem_type)) { return UCS_ERR_INVALID_PARAM; } token = strtok(NULL, delim); if (NULL == token) { *recv_mem_type = *send_mem_type; return UCS_OK; } else { return parse_mem_type(token, recv_mem_type); } } static ucs_status_t parse_message_sizes_params(const char *opt_arg, ucx_perf_params_t *params) { const char delim = ','; size_t *msg_size_list, token_num, token_it; char *optarg_ptr, *optarg_ptr2; optarg_ptr = (char *)opt_arg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; msg_size_list = realloc(params->msg_size_list, sizeof(*params->msg_size_list) * token_num); if (NULL == msg_size_list) { return UCS_ERR_NO_MEMORY; } params->msg_size_list = msg_size_list; optarg_ptr = (char *)opt_arg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static ucs_status_t init_test_params(perftest_params_t *params) { memset(params, 0, sizeof(*params)); params->super.api = UCX_PERF_API_LAST; params->super.command = UCX_PERF_CMD_LAST; params->super.test_type = UCX_PERF_TEST_TYPE_LAST; params->super.thread_mode = UCS_THREAD_MODE_SINGLE; params->super.thread_count = 1; params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE; params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST; params->super.max_outstanding = 0; params->super.warmup_iter = 10000; params->super.am_hdr_size = 8; params->super.alignment = ucs_get_page_size(); params->super.max_iter = 1000000l; params->super.max_time = 0.0; params->super.report_interval = 1.0; params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE; params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->super.send_mem_type = UCS_MEMORY_TYPE_HOST; params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST; params->super.msg_size_cnt = 1; params->super.iov_stride = 0; params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE); strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE); params->super.msg_size_list = calloc(params->super.msg_size_cnt, sizeof(*params->super.msg_size_list)); if (params->super.msg_size_list == NULL) { return UCS_ERR_NO_MEMORY; } params->super.msg_size_list[0] = 8; params->test_id = TEST_ID_UNDEFINED; return UCS_OK; } static ucs_status_t parse_test_params(perftest_params_t *params, char opt, const char *opt_arg) { char *optarg2 = NULL; test_type_t *test; unsigned i; switch (opt) { case 'd': ucs_snprintf_zero(params->super.uct.dev_name, sizeof(params->super.uct.dev_name), "%s", opt_arg); return UCS_OK; case 'x': ucs_snprintf_zero(params->super.uct.tl_name, sizeof(params->super.uct.tl_name), "%s", opt_arg); return UCS_OK; case 't': for (i = 0; tests[i].name != NULL; ++i) { test = &tests[i]; if (!strcmp(opt_arg, test->name)) { params->super.api = test->api; params->super.command = test->command; params->super.test_type = test->test_type; params->test_id = i; break; } } if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (!strcmp(opt_arg, "short")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (!strcmp(opt_arg, "bcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (!strcmp(opt_arg, "zcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(opt_arg, &params->super.ucp.send_datatype)) { optarg2 = strchr(opt_arg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->super.ucp.recv_datatype)) { return UCS_ERR_INVALID_PARAM; } } } else { ucs_error("Invalid option argument for -D"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'i': params->super.iov_stride = atol(opt_arg); return UCS_OK; case 'n': params->super.max_iter = atol(opt_arg); return UCS_OK; case 's': return parse_message_sizes_params(opt_arg, &params->super); case 'H': params->super.am_hdr_size = atol(opt_arg); return UCS_OK; case 'W': params->super.uct.fc_window = atoi(opt_arg); return UCS_OK; case 'O': params->super.max_outstanding = atoi(opt_arg); return UCS_OK; case 'w': params->super.warmup_iter = atol(opt_arg); return UCS_OK; case 'o': params->super.flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->super.flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->super.flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'C': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD; return UCS_OK; case 'U': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE; return UCS_OK; case 'M': if (!strcmp(opt_arg, "single")) { params->super.thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (!strcmp(opt_arg, "serialized")) { params->super.thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (!strcmp(opt_arg, "multi")) { params->super.thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->super.thread_count = atoi(opt_arg); return UCS_OK; case 'A': if (!strcmp(opt_arg, "thread") || !strcmp(opt_arg, "thread_spinlock")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK; return UCS_OK; } else if (!strcmp(opt_arg, "thread_mutex")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_MUTEX; return UCS_OK; } else if (!strcmp(opt_arg, "signal")) { params->super.async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } case 'r': if (!strcmp(opt_arg, "recv_data")) { params->super.flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } else if (!strcmp(opt_arg, "recv")) { params->super.flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } return UCS_ERR_INVALID_PARAM; case 'm': if (UCS_OK != parse_mem_type_params(opt_arg, &params->super.send_mem_type, &params->super.recv_mem_type)) { return UCS_ERR_INVALID_PARAM; } return UCS_OK; default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t adjust_test_params(perftest_params_t *params, const char *error_prefix) { test_type_t *test; if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("%smissing test name", error_prefix); return UCS_ERR_INVALID_PARAM; } test = &tests[params->test_id]; if (params->super.max_outstanding == 0) { params->super.max_outstanding = test->window_size; } return UCS_OK; } static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name, int *line_num, perftest_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; char error_prefix[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } ++(*line_num); argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); ucs_snprintf_safe(error_prefix, sizeof(error_prefix), "in batch file '%s' line %d: ", file_name, *line_num); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("%s-%c %s: %s", error_prefix, c, optarg, ucs_status_string(status)); return status; } } status = adjust_test_params(params, error_prefix); if (status != UCS_OK) { return status; } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_cpus(char *opt_arg, struct perftest_context *ctx) { char *endptr, *cpu_list = opt_arg; int cpu; ctx->num_cpus = 0; cpu = strtol(cpu_list, &endptr, 10); while (((*endptr == ',') || (*endptr == '\0')) && (ctx->num_cpus < MAX_CPUS)) { if (cpu < 0) { ucs_error("invalid cpu number detected: (%d)", cpu); return UCS_ERR_INVALID_PARAM; } ctx->cpus[ctx->num_cpus++] = cpu; if (*endptr == '\0') { break; } cpu_list = endptr + 1; /* skip the comma */ cpu = strtol(cpu_list, &endptr, 10); } if (*endptr == ',') { ucs_error("number of listed cpus exceeds the maximum supported value (%d)", MAX_CPUS); return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); ucx_perf_global_init(); /* initialize memory types */ status = init_test_params(&ctx->params); if (status != UCS_OK) { return status; } ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; ctx->mpi = mpi_initialized; optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = optarg; } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; status = parse_cpus(optarg, ctx); if (status != UCS_OK) { return status; } break; case 'P': #ifdef HAVE_MPI ctx->mpi = atoi(optarg) && mpi_initialized; break; #endif case 'h': usage(ctx, ucs_basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, ucs_basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned snc; snc = magic; safe_send(group->connfd, &snc, sizeof(unsigned), progress, arg); snc = 0; safe_recv(group->connfd, &snc, sizeof(unsigned), progress, arg); ucs_assert(snc == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size), NULL, NULL); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL, NULL); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size), NULL, NULL); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size, NULL, NULL); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (status != UCS_OK) { goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); /* release the memory for the list of the message sizes allocated * during the initialization of the default testing parameters */ free(ctx->params.super.msg_size_list); ctx->params.super.msg_size_list = NULL; ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } if (ctx->params.super.msg_size_cnt != 0) { ctx->params.super.msg_size_list = calloc(ctx->params.super.msg_size_cnt, sizeof(*ctx->params.super.msg_size_list)); if (NULL == ctx->params.super.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } ret = safe_recv(connfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.super.msg_size_cnt != 0) { safe_send(sockfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = &ctx->sock_rte_group; ctx->params.super.rte = &sock_rte; ctx->params.super.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if defined (HAVE_MPI) static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { int group_size, my_rank, i; MPI_Request *reqs; int nreqs = 0; int dummy; int flag; #pragma omp barrier #pragma omp master { /* * Naive non-blocking barrier implementation over send/recv, to call user * progress while waiting for completion. * Not using MPI_Ibarrier to be compatible with MPI-1. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); /* allocate maximal possible number of requests */ reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size); if (my_rank == 0) { /* root gathers "ping" from all other ranks */ for (i = 1; i < group_size; ++i) { MPI_Irecv(&dummy, 0, MPI_INT, i /* source */, 1 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } } else { /* every non-root rank sends "ping" and waits for "pong" */ MPI_Send(&dummy, 0, MPI_INT, 0 /* dest */, 1 /* tag */, MPI_COMM_WORLD); MPI_Irecv(&dummy, 0, MPI_INT, 0 /* source */, 2 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } /* Waiting for receive requests */ do { MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE); progress(arg); } while (!flag); if (my_rank == 0) { /* root sends "pong" to all ranks */ for (i = 1; i < group_size; ++i) { MPI_Send(&dummy, 0, MPI_INT, i /* dest */, 2 /* tag */, MPI_COMM_WORLD); } } } #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } #elif defined (HAVE_RTE) static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { ucs_trace_func(""); #if defined (HAVE_MPI) static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = NULL; ctx->params.super.rte = &mpi_rte; ctx->params.super.report_arg = ctx; #elif defined (HAVE_RTE) ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; rte_group_t group; rte_init(NULL, NULL, &group); if (1 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = group; ctx->params.super.rte = &ext_rte; ctx->params.super.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #ifdef HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { ucs_sys_cpuset_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { for (i = 0; i < ctx->num_cpus; i++) { if (ctx->cpus[i] >= nr_cpus) { ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } } for (i = 0; i < ctx->num_cpus; i++) { CPU_SET(ctx->cpus[i], &cpuset); } ret = ucs_sys_setaffinity(&cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = ucs_sys_getaffinity(&cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static ucs_status_t clone_params(perftest_params_t *dest, const perftest_params_t *src) { size_t msg_size_list_size; *dest = *src; msg_size_list_size = dest->super.msg_size_cnt * sizeof(*dest->super.msg_size_list); dest->super.msg_size_list = malloc(msg_size_list_size); if (dest->super.msg_size_list == NULL) { return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK); } memcpy(dest->super.msg_size_list, src->super.msg_size_list, msg_size_list_size); return UCS_OK; } static ucs_status_t run_test_recurs(struct perftest_context *ctx, const perftest_params_t *parent_params, unsigned depth) { perftest_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; int line_num; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (parent_params->super.api == UCX_PERF_API_UCP) { if (strcmp(parent_params->super.uct.dev_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.dev_name); } if (strcmp(parent_params->super.uct.tl_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.tl_name); } } if (depth >= ctx->num_batch_files) { print_test_name(ctx); return ucx_perf_run(&parent_params->super, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } line_num = 0; do { status = clone_params(&params, parent_params); if (status != UCS_OK) { goto out; } status = read_batch_file(batch_file, ctx->batch_files[depth], &line_num, &params, &ctx->test_names[depth]); if (status == UCS_OK) { run_test_recurs(ctx, &params, depth + 1); free(ctx->test_names[depth]); ctx->test_names[depth] = NULL; } free(params.super.msg_size_list); params.super.msg_size_list = NULL; } while (status == UCS_OK); if (status == UCS_ERR_NO_ELEM) { status = UCS_OK; } out: fclose(batch_file); return status; } static ucs_status_t run_test(struct perftest_context *ctx) { const char *error_prefix; ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); /* no batch files, only command line params */ if (ctx->num_batch_files == 0) { error_prefix = (ctx->flags & TEST_FLAG_PRINT_RESULTS) ? "command line: " : ""; status = adjust_test_params(&ctx->params, error_prefix); if (status != UCS_OK) { return status; } } print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int mpi_initialized; int mpi_rte; int ret; #ifdef HAVE_MPI int provided; mpi_initialized = !isatty(0) && /* Using MPI_THREAD_FUNNELED since ucx_perftest supports * using multiple threads when only the main one makes * MPI calls (which is also suitable for a single threaded * run). * MPI_THREAD_FUNNELED: * The process may be multi-threaded, but only the main * thread will make MPI calls (all MPI calls are funneled * to the main thread). */ (MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0); if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) { printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n", provided); ret = -1; goto out; } #else mpi_initialized = 0; #endif /* Parse command line */ status = parse_opts(&ctx, mpi_initialized, argc, argv); if (status != UCS_OK) { ret = (status == UCS_ERR_CANCELED) ? 0 : -127; goto out_msg_size_list; } #ifdef __COVERITY__ /* coverity[dont_call] */ mpi_rte = rand(); /* Shut up deadcode error */ #endif if (ctx.mpi) { mpi_rte = 1; } else { #ifdef HAVE_RTE mpi_rte = 1; #else mpi_rte = 0; #endif } status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Create RTE */ status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out_msg_size_list: free(ctx.params.super.msg_size_list); #if HAVE_MPI out: #endif if (mpi_initialized) { #ifdef HAVE_MPI MPI_Finalize(); #endif } return ret; }
#nn_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja ([email protected]). All rights reserved. * Copyright 2008-2009 David G. Lowe ([email protected]). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #ifndef FLANN_NNINDEX_H #define FLANN_NNINDEX_H #include <vector> //#include <iostream> #include "flann/general.h" #include "flann/util/matrix.h" #include "flann/util/params.h" #include "flann/util/result_set.h" #include "flann/util/dynamic_bitset.h" #include "flann/util/saving.h" namespace flann { #define KNN_HEAP_THRESHOLD 250 class IndexBase { public: virtual ~IndexBase() {}; virtual size_t veclen() const = 0; virtual size_t size() const = 0; virtual flann_algorithm_t getType() const = 0; virtual int usedMemory() const = 0; virtual IndexParams getParameters() const = 0; virtual void loadIndex(FILE* stream) = 0; virtual void saveIndex(FILE* stream) = 0; }; /** * Nearest-neighbour index base class */ template <typename Distance> class NNIndex : public IndexBase { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const NNIndex& other) : distance_(other.distance_), last_id_(other.last_id_), size_(other.size_), size_at_build_(other.size_at_build_), veclen_(other.veclen_), index_params_(other.index_params_), removed_(other.removed_), removed_points_(other.removed_points_), removed_count_(other.removed_count_), ids_(other.ids_), points_(other.points_), data_ptr_(NULL) { if (other.data_ptr_) { data_ptr_ = new ElementType[size_*veclen_]; std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } } virtual ~NNIndex() { if (data_ptr_) { delete[] data_ptr_; } } virtual NNIndex* clone() const = 0; /** * Builds the index */ virtual void buildIndex() { freeIndex(); cleanRemovedPoints(); // building index buildIndexImpl(); size_at_build_ = size_; } /** * Builds th index using using the specified dataset * @param dataset the dataset to use */ virtual void buildIndex(const Matrix<ElementType>& dataset) { setDataset(dataset); this->buildIndex(); } /** * @brief Incrementally add points to the index. * @param points Matrix with points to be added * @param rebuild_threshold */ virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { throw FLANNException("Functionality not supported by this index"); } /** * Remove point from the index * @param index Index of point to be removed */ virtual void removePoint(size_t id) { if (!removed_) { ids_.resize(size_); for (size_t i=0;i<size_;++i) { ids_[i] = i; } removed_points_.resize(size_); removed_points_.reset(); last_id_ = size_; removed_ = true; } size_t point_index = id_to_index(id); if (point_index!=size_t(-1) && !removed_points_.test(point_index)) { removed_points_.set(point_index); removed_count_++; } } /** * Get point with specific id * @param id * @return */ virtual ElementType* getPoint(size_t id) { size_t index = id_to_index(id); if (index!=size_t(-1)) { return points_[index]; } else { return NULL; } } /** * @return number of features in this index. */ inline size_t size() const { return size_ - removed_count_; } /** * @return The dimensionality of the features in this index. */ inline size_t veclen() const { return veclen_; } /** * Returns the parameters used by the index. * * @return The index parameters */ IndexParams getParameters() const { return index_params_; } template<typename Archive> void serialize(Archive& ar) { IndexHeader header; if (Archive::is_saving::value) { header.data_type = flann_datatype_value<ElementType>::value; header.index_type = getType(); header.rows = size_; header.cols = veclen_; } ar & header; // sanity checks if (Archive::is_loading::value) { if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) { throw FLANNException("Invalid index file, wrong signature"); } if (header.data_type != flann_datatype_value<ElementType>::value) { throw FLANNException("Datatype of saved index is different than of the one to be created."); } if (header.index_type != getType()) { throw FLANNException("Saved index type is different then the current index type."); } // TODO: check for distance type } ar & size_; ar & veclen_; ar & size_at_build_; bool save_dataset; if (Archive::is_saving::value) { save_dataset = get_param(index_params_,"save_dataset", false); } ar & save_dataset; if (save_dataset) { if (Archive::is_loading::value) { if (data_ptr_) { delete[] data_ptr_; } data_ptr_ = new ElementType[size_*veclen_]; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } for (size_t i=0;i<size_;++i) { ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType)); } } else { if (points_.size()!=size_) { throw FLANNException("Saved index does not contain the dataset and no dataset was provided."); } } ar & last_id_; ar & ids_; ar & removed_; if (removed_) { ar & removed_points_; } ar & removed_count_; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } int count = 0; if (use_heap) { //#pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted);//params.sorted indices_to_ids(indices[i], indices[i], n); count += n; } } } else { //#pragma omp parallel num_threads(params.cores) { //this way KNNSimpleResultSet<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = knnSearch(queries, indices_, dists, knn, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = knnSearch(queries, indices_, dists, knn, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indinces of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; size_t num_neighbors = std::min(indices.cols, dists.cols); int max_neighbors = params.max_neighbors; if (max_neighbors<0) max_neighbors = num_neighbors; else max_neighbors = std::min(max_neighbors,(int)num_neighbors); if (max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { // explicitly indicated to use unbounded radius result set // and we know there'll be enough room for resulting indices and dists if (params.max_neighbors<0 && (num_neighbors>=size())) { #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if (n>num_neighbors) n = num_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>max_neighbors) n = max_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = radiusSearch(queries, indices_, dists, radius, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indinces of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; // just count neighbors if (params.max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); if (params.max_neighbors<0) { // search for all neighbors #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>params.max_neighbors) n = params.max_neighbors; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = radiusSearch(queries, indices_, dists, radius, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0; protected: virtual void freeIndex() = 0; virtual void buildIndexImpl() = 0; size_t id_to_index(size_t id) { if (ids_.size()==0) { return id; } size_t point_index = size_t(-1); if (ids_[id]==id) { return id; } else { // binary search size_t start = 0; size_t end = ids_.size(); while (start<end) { size_t mid = (start+end)/2; if (ids_[mid]==id) { point_index = mid; break; } else if (ids_[mid]<id) { start = mid + 1; } else { end = mid; } } } return point_index; } void indices_to_ids(const size_t* in, size_t* out, size_t size) const { if (removed_) { for (size_t i=0;i<size;++i) { out[i] = ids_[in[i]]; } } } void setDataset(const Matrix<ElementType>& dataset) { size_ = dataset.rows; veclen_ = dataset.cols; last_id_ = 0; ids_.clear(); removed_points_.clear(); removed_ = false; removed_count_ = 0; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = dataset[i]; } } void extendDataset(const Matrix<ElementType>& new_points) { size_t new_size = size_ + new_points.rows; if (removed_) { removed_points_.resize(new_size); ids_.resize(new_size); } points_.resize(new_size); for (size_t i=size_;i<new_size;++i) { points_[i] = new_points[i-size_]; if (removed_) { ids_[i] = last_id_++; removed_points_.reset(i); } } size_ = new_size; } void cleanRemovedPoints() { if (!removed_) return; size_t last_idx = 0; for (size_t i=0;i<size_;++i) { if (!removed_points_.test(i)) { points_[last_idx] = points_[i]; ids_[last_idx] = ids_[i]; removed_points_.reset(last_idx); ++last_idx; } } points_.resize(last_idx); ids_.resize(last_idx); removed_points_.resize(last_idx); size_ = last_idx; removed_count_ = 0; } void swap(NNIndex& other) { std::swap(distance_, other.distance_); std::swap(last_id_, other.last_id_); std::swap(size_, other.size_); std::swap(size_at_build_, other.size_at_build_); std::swap(veclen_, other.veclen_); std::swap(index_params_, other.index_params_); std::swap(removed_, other.removed_); std::swap(removed_points_, other.removed_points_); std::swap(removed_count_, other.removed_count_); std::swap(ids_, other.ids_); std::swap(points_, other.points_); std::swap(data_ptr_, other.data_ptr_); } protected: /** * The distance functor */ Distance distance_; /** * Each index point has an associated ID. IDs are assigned sequentially in * increasing order. This indicates the ID assigned to the last point added to the * index. */ size_t last_id_; /** * Number of points in the index (and database) */ size_t size_; /** * Number of features in the dataset when the index was last built. */ size_t size_at_build_; /** * Size of one point in the index (and database) */ size_t veclen_; /** * Parameters of the index. */ IndexParams index_params_; /** * Flag indicating if at least a point was removed from the index */ bool removed_; /** * Array used to mark points removed from the index */ DynamicBitset removed_points_; /** * Number of points removed from the index */ size_t removed_count_; /** * Array of point IDs, returned by nearest-neighbour operations */ std::vector<size_t> ids_; /** * Point data */ std::vector<ElementType*> points_; /** * Pointer to dataset memory if allocated by this index, otherwise NULL */ ElementType* data_ptr_; }; #define USING_BASECLASS_SYMBOLS \ using NNIndex<Distance>::distance_;\ using NNIndex<Distance>::size_;\ using NNIndex<Distance>::size_at_build_;\ using NNIndex<Distance>::veclen_;\ using NNIndex<Distance>::index_params_;\ using NNIndex<Distance>::removed_points_;\ using NNIndex<Distance>::ids_;\ using NNIndex<Distance>::removed_;\ using NNIndex<Distance>::points_;\ using NNIndex<Distance>::extendDataset;\ using NNIndex<Distance>::setDataset;\ using NNIndex<Distance>::cleanRemovedPoints;\ using NNIndex<Distance>::indices_to_ids; } #endif //FLANN_NNINDEX_H
GB_binop__pair_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_fp32 // A.*B function (eWiseMult): GB_AemultB__pair_fp32 // A*D function (colscale): GB_AxD__pair_fp32 // D*A function (rowscale): GB_DxB__pair_fp32 // C+=B function (dense accum): GB_Cdense_accumB__pair_fp32 // C+=b function (dense accum): GB_Cdense_accumb__pair_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_fp32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: float // A type: float // B,b type: float // BinaryOp: cij = 1 #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP32 || GxB_NO_PAIR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
ast-dump-openmp-begin-declare-variant_12.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX // expected-no-diagnostics #ifdef __cplusplus #define OVERLOADABLE #else #define OVERLOADABLE __attribute__((overloadable)) #endif OVERLOADABLE int also_before(void) { return 1; } OVERLOADABLE int also_before(int i) { return 2; } OVERLOADABLE int also_before(float f) { return 0; } OVERLOADABLE int also_before(double d) { return 3; } OVERLOADABLE int also_before(long l) { return 4; } #pragma omp begin declare variant match(implementation = {vendor(llvm)}) OVERLOADABLE int also_before(void) { return 0; } OVERLOADABLE int also_before(int i) { return 0; } // No float! OVERLOADABLE int also_before(double d) { return 0; } OVERLOADABLE int also_before(long l) { return 0; } #pragma omp end declare variant int main(void) { // Should return 0. return also_before() + also_before(1) + also_before(2.0f) + also_before(3.0) + also_before(4L); } // Make sure: // - we see the specialization in the AST // - we pick the right callees // C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:14:1> line:12:5 used also_before 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:14:1> // C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:13:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // C-NEXT: | |-OverloadableAttr [[ADDR_4:0x[a-z0-9]*]] <line:8:37> // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_5:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_6:0x[a-z0-9]*]] <col:22> 'int ({{.*}})' Function [[ADDR_7:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_8:0x[a-z0-9]*]] <col:22, line:18:1> line:16:5 used also_before 'int (int)' // C-NEXT: | |-ParmVarDecl [[ADDR_9:0x[a-z0-9]*]] <col:17, col:21> col:21 i 'int' // C-NEXT: | |-CompoundStmt [[ADDR_10:0x[a-z0-9]*]] <col:24, line:18:1> // C-NEXT: | | `-ReturnStmt [[ADDR_11:0x[a-z0-9]*]] <line:17:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_12:0x[a-z0-9]*]] <col:10> 'int' 2 // C-NEXT: | |-OverloadableAttr [[ADDR_13:0x[a-z0-9]*]] <line:8:37> // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_14:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_15:0x[a-z0-9]*]] <col:22> 'int (int)' Function [[ADDR_16:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (int)' // C-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] <col:22, line:22:1> line:20:5 used also_before 'int (float)' // C-NEXT: | |-ParmVarDecl [[ADDR_18:0x[a-z0-9]*]] <col:17, col:23> col:23 f 'float' // C-NEXT: | |-CompoundStmt [[ADDR_19:0x[a-z0-9]*]] <col:26, line:22:1> // C-NEXT: | | `-ReturnStmt [[ADDR_20:0x[a-z0-9]*]] <line:21:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_21:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: | `-OverloadableAttr [[ADDR_22:0x[a-z0-9]*]] <line:8:37> // C-NEXT: |-FunctionDecl [[ADDR_23:0x[a-z0-9]*]] <col:22, line:26:1> line:24:5 used also_before 'int (double)' // C-NEXT: | |-ParmVarDecl [[ADDR_24:0x[a-z0-9]*]] <col:17, col:24> col:24 d 'double' // C-NEXT: | |-CompoundStmt [[ADDR_25:0x[a-z0-9]*]] <col:27, line:26:1> // C-NEXT: | | `-ReturnStmt [[ADDR_26:0x[a-z0-9]*]] <line:25:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int' 3 // C-NEXT: | |-OverloadableAttr [[ADDR_28:0x[a-z0-9]*]] <line:8:37> // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_29:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_30:0x[a-z0-9]*]] <col:22> 'int (double)' Function [[ADDR_31:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (double)' // C-NEXT: |-FunctionDecl [[ADDR_32:0x[a-z0-9]*]] <col:22, line:30:1> line:28:5 used also_before 'int (long)' // C-NEXT: | |-ParmVarDecl [[ADDR_33:0x[a-z0-9]*]] <col:17, col:22> col:22 l 'long' // C-NEXT: | |-CompoundStmt [[ADDR_34:0x[a-z0-9]*]] <col:25, line:30:1> // C-NEXT: | | `-ReturnStmt [[ADDR_35:0x[a-z0-9]*]] <line:29:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:10> 'int' 4 // C-NEXT: | |-OverloadableAttr [[ADDR_37:0x[a-z0-9]*]] <line:8:37> // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_38:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:22> 'int (long)' Function [[ADDR_40:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (long)' // C-NEXT: |-FunctionDecl [[ADDR_7]] <col:22, line:36:1> line:8:22 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_41:0x[a-z0-9]*]] <line:34:23, line:36:1> // C-NEXT: | | `-ReturnStmt [[ADDR_42:0x[a-z0-9]*]] <line:35:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: | `-OverloadableAttr [[ADDR_44:0x[a-z0-9]*]] <line:8:37> // C-NEXT: |-FunctionDecl [[ADDR_16]] <col:22, line:40:1> line:8:22 also_before[implementation={vendor(llvm)}] 'int (int)' // C-NEXT: | |-ParmVarDecl [[ADDR_45:0x[a-z0-9]*]] <line:38:17, col:21> col:21 i 'int' // C-NEXT: | |-CompoundStmt [[ADDR_46:0x[a-z0-9]*]] <col:24, line:40:1> // C-NEXT: | | `-ReturnStmt [[ADDR_47:0x[a-z0-9]*]] <line:39:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_48:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: | `-OverloadableAttr [[ADDR_49:0x[a-z0-9]*]] <line:8:37> // C-NEXT: |-FunctionDecl [[ADDR_31]] <col:22, line:45:1> line:8:22 also_before[implementation={vendor(llvm)}] 'int (double)' // C-NEXT: | |-ParmVarDecl [[ADDR_50:0x[a-z0-9]*]] <line:43:17, col:24> col:24 d 'double' // C-NEXT: | |-CompoundStmt [[ADDR_51:0x[a-z0-9]*]] <col:27, line:45:1> // C-NEXT: | | `-ReturnStmt [[ADDR_52:0x[a-z0-9]*]] <line:44:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_53:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: | `-OverloadableAttr [[ADDR_54:0x[a-z0-9]*]] <line:8:37> // C-NEXT: |-FunctionDecl [[ADDR_40]] <col:22, line:49:1> line:8:22 also_before[implementation={vendor(llvm)}] 'int (long)' // C-NEXT: | |-ParmVarDecl [[ADDR_55:0x[a-z0-9]*]] <line:47:17, col:22> col:22 l 'long' // C-NEXT: | |-CompoundStmt [[ADDR_56:0x[a-z0-9]*]] <col:25, line:49:1> // C-NEXT: | | `-ReturnStmt [[ADDR_57:0x[a-z0-9]*]] <line:48:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_58:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: | `-OverloadableAttr [[ADDR_59:0x[a-z0-9]*]] <line:8:37> // C-NEXT: `-FunctionDecl [[ADDR_60:0x[a-z0-9]*]] <line:53:1, line:56:1> line:53:5 main 'int ({{.*}})' // C-NEXT: `-CompoundStmt [[ADDR_61:0x[a-z0-9]*]] <col:16, line:56:1> // C-NEXT: `-ReturnStmt [[ADDR_62:0x[a-z0-9]*]] <line:55:3, col:96> // C-NEXT: `-BinaryOperator [[ADDR_63:0x[a-z0-9]*]] <col:10, col:96> 'int' '+' // C-NEXT: |-BinaryOperator [[ADDR_64:0x[a-z0-9]*]] <col:10, col:78> 'int' '+' // C-NEXT: | |-BinaryOperator [[ADDR_65:0x[a-z0-9]*]] <col:10, col:59> 'int' '+' // C-NEXT: | | |-BinaryOperator [[ADDR_66:0x[a-z0-9]*]] <col:10, col:39> 'int' '+' // C-NEXT: | | | |-PseudoObjectExpr [[ADDR_67:0x[a-z0-9]*]] <col:10, col:22> 'int' // C-NEXT: | | | | |-CallExpr [[ADDR_68:0x[a-z0-9]*]] <col:10, col:22> 'int' // C-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | | | `-DeclRefExpr [[ADDR_70:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // C-NEXT: | | | | `-CallExpr [[ADDR_71:0x[a-z0-9]*]] <line:8:22, line:55:22> 'int' // C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_72:0x[a-z0-9]*]] <line:8:22> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | | `-DeclRefExpr [[ADDR_6]] <col:22> 'int ({{.*}})' Function [[ADDR_7]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: | | | `-PseudoObjectExpr [[ADDR_73:0x[a-z0-9]*]] <line:55:26, col:39> 'int' // C-NEXT: | | | |-CallExpr [[ADDR_74:0x[a-z0-9]*]] <col:26, col:39> 'int' // C-NEXT: | | | | |-ImplicitCastExpr [[ADDR_75:0x[a-z0-9]*]] <col:26> 'int (*)(int)' <FunctionToPointerDecay> // C-NEXT: | | | | | `-DeclRefExpr [[ADDR_76:0x[a-z0-9]*]] <col:26> 'int (int)' {{.*}}Function [[ADDR_8]] 'also_before' 'int (int)' // C-NEXT: | | | | `-IntegerLiteral [[ADDR_77:0x[a-z0-9]*]] <col:38> 'int' 1 // C-NEXT: | | | `-CallExpr [[ADDR_78:0x[a-z0-9]*]] <line:8:22, line:55:39> 'int' // C-NEXT: | | | |-ImplicitCastExpr [[ADDR_79:0x[a-z0-9]*]] <line:8:22> 'int (*)(int)' <FunctionToPointerDecay> // C-NEXT: | | | | `-DeclRefExpr [[ADDR_15]] <col:22> 'int (int)' Function [[ADDR_16]] 'also_before[implementation={vendor(llvm)}]' 'int (int)' // C-NEXT: | | | `-IntegerLiteral [[ADDR_77]] <line:55:38> 'int' 1 // C-NEXT: | | `-CallExpr [[ADDR_80:0x[a-z0-9]*]] <col:43, col:59> 'int' // C-NEXT: | | |-ImplicitCastExpr [[ADDR_81:0x[a-z0-9]*]] <col:43> 'int (*)(float)' <FunctionToPointerDecay> // C-NEXT: | | | `-DeclRefExpr [[ADDR_82:0x[a-z0-9]*]] <col:43> 'int (float)' {{.*}}Function [[ADDR_17]] 'also_before' 'int (float)' // C-NEXT: | | `-FloatingLiteral [[ADDR_83:0x[a-z0-9]*]] <col:55> 'float' 2.000000e+00 // C-NEXT: | `-PseudoObjectExpr [[ADDR_84:0x[a-z0-9]*]] <col:63, col:78> 'int' // C-NEXT: | |-CallExpr [[ADDR_85:0x[a-z0-9]*]] <col:63, col:78> 'int' // C-NEXT: | | |-ImplicitCastExpr [[ADDR_86:0x[a-z0-9]*]] <col:63> 'int (*)(double)' <FunctionToPointerDecay> // C-NEXT: | | | `-DeclRefExpr [[ADDR_87:0x[a-z0-9]*]] <col:63> 'int (double)' {{.*}}Function [[ADDR_23]] 'also_before' 'int (double)' // C-NEXT: | | `-FloatingLiteral [[ADDR_88:0x[a-z0-9]*]] <col:75> 'double' 3.000000e+00 // C-NEXT: | `-CallExpr [[ADDR_89:0x[a-z0-9]*]] <line:8:22, line:55:78> 'int' // C-NEXT: | |-ImplicitCastExpr [[ADDR_90:0x[a-z0-9]*]] <line:8:22> 'int (*)(double)' <FunctionToPointerDecay> // C-NEXT: | | `-DeclRefExpr [[ADDR_30]] <col:22> 'int (double)' Function [[ADDR_31]] 'also_before[implementation={vendor(llvm)}]' 'int (double)' // C-NEXT: | `-FloatingLiteral [[ADDR_88]] <line:55:75> 'double' 3.000000e+00 // C-NEXT: `-PseudoObjectExpr [[ADDR_91:0x[a-z0-9]*]] <col:82, col:96> 'int' // C-NEXT: |-CallExpr [[ADDR_92:0x[a-z0-9]*]] <col:82, col:96> 'int' // C-NEXT: | |-ImplicitCastExpr [[ADDR_93:0x[a-z0-9]*]] <col:82> 'int (*)(long)' <FunctionToPointerDecay> // C-NEXT: | | `-DeclRefExpr [[ADDR_94:0x[a-z0-9]*]] <col:82> 'int (long)' {{.*}}Function [[ADDR_32]] 'also_before' 'int (long)' // C-NEXT: | `-IntegerLiteral [[ADDR_95:0x[a-z0-9]*]] <col:94> 'long' 4 // C-NEXT: `-CallExpr [[ADDR_96:0x[a-z0-9]*]] <line:8:22, line:55:96> 'int' // C-NEXT: |-ImplicitCastExpr [[ADDR_97:0x[a-z0-9]*]] <line:8:22> 'int (*)(long)' <FunctionToPointerDecay> // C-NEXT: | `-DeclRefExpr [[ADDR_39]] <col:22> 'int (long)' Function [[ADDR_40]] 'also_before[implementation={vendor(llvm)}]' 'int (long)' // C-NEXT: `-IntegerLiteral [[ADDR_95]] <line:55:94> 'long' 4 // CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:14:1> line:12:5 used also_before 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:14:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:13:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:34:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:16:1, line:18:1> line:16:5 used also_before 'int (int)' // CXX-NEXT: | |-ParmVarDecl [[ADDR_8:0x[a-z0-9]*]] <col:17, col:21> col:21 i 'int' // CXX-NEXT: | |-CompoundStmt [[ADDR_9:0x[a-z0-9]*]] <col:24, line:18:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_10:0x[a-z0-9]*]] <line:17:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_11:0x[a-z0-9]*]] <col:10> 'int' 2 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_12:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_13:0x[a-z0-9]*]] <line:38:1> 'int (int)' Function [[ADDR_14:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (int)' // CXX-NEXT: |-FunctionDecl [[ADDR_15:0x[a-z0-9]*]] <line:20:1, line:22:1> line:20:5 used also_before 'int (float)' // CXX-NEXT: | |-ParmVarDecl [[ADDR_16:0x[a-z0-9]*]] <col:17, col:23> col:23 f 'float' // CXX-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:26, line:22:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:21:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] <line:24:1, line:26:1> line:24:5 used also_before 'int (double)' // CXX-NEXT: | |-ParmVarDecl [[ADDR_21:0x[a-z0-9]*]] <col:17, col:24> col:24 d 'double' // CXX-NEXT: | |-CompoundStmt [[ADDR_22:0x[a-z0-9]*]] <col:27, line:26:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_23:0x[a-z0-9]*]] <line:25:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_24:0x[a-z0-9]*]] <col:10> 'int' 3 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_25:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_26:0x[a-z0-9]*]] <line:43:1> 'int (double)' Function [[ADDR_27:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (double)' // CXX-NEXT: |-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] <line:28:1, line:30:1> line:28:5 used also_before 'int (long)' // CXX-NEXT: | |-ParmVarDecl [[ADDR_29:0x[a-z0-9]*]] <col:17, col:22> col:22 l 'long' // CXX-NEXT: | |-CompoundStmt [[ADDR_30:0x[a-z0-9]*]] <col:25, line:30:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_31:0x[a-z0-9]*]] <line:29:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_32:0x[a-z0-9]*]] <col:10> 'int' 4 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_33:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_34:0x[a-z0-9]*]] <line:47:1> 'int (long)' Function [[ADDR_35:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (long)' // CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:34:1, line:36:1> line:34:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // CXX-NEXT: | `-CompoundStmt [[ADDR_36:0x[a-z0-9]*]] <col:23, line:36:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_37:0x[a-z0-9]*]] <line:35:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_38:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_14]] <line:38:1, line:40:1> line:38:1 also_before[implementation={vendor(llvm)}] 'int (int)' // CXX-NEXT: | |-ParmVarDecl [[ADDR_39:0x[a-z0-9]*]] <col:17, col:21> col:21 i 'int' // CXX-NEXT: | `-CompoundStmt [[ADDR_40:0x[a-z0-9]*]] <col:24, line:40:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_41:0x[a-z0-9]*]] <line:39:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_42:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_27]] <line:43:1, line:45:1> line:43:1 also_before[implementation={vendor(llvm)}] 'int (double)' // CXX-NEXT: | |-ParmVarDecl [[ADDR_43:0x[a-z0-9]*]] <col:17, col:24> col:24 d 'double' // CXX-NEXT: | `-CompoundStmt [[ADDR_44:0x[a-z0-9]*]] <col:27, line:45:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_45:0x[a-z0-9]*]] <line:44:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_46:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_35]] <line:47:1, line:49:1> line:47:1 also_before[implementation={vendor(llvm)}] 'int (long)' // CXX-NEXT: | |-ParmVarDecl [[ADDR_47:0x[a-z0-9]*]] <col:17, col:22> col:22 l 'long' // CXX-NEXT: | `-CompoundStmt [[ADDR_48:0x[a-z0-9]*]] <col:25, line:49:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_49:0x[a-z0-9]*]] <line:48:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_50:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: `-FunctionDecl [[ADDR_51:0x[a-z0-9]*]] <line:53:1, line:56:1> line:53:5 main 'int ({{.*}})' // CXX-NEXT: `-CompoundStmt [[ADDR_52:0x[a-z0-9]*]] <col:16, line:56:1> // CXX-NEXT: `-ReturnStmt [[ADDR_53:0x[a-z0-9]*]] <line:55:3, col:96> // CXX-NEXT: `-BinaryOperator [[ADDR_54:0x[a-z0-9]*]] <col:10, col:96> 'int' '+' // CXX-NEXT: |-BinaryOperator [[ADDR_55:0x[a-z0-9]*]] <col:10, col:78> 'int' '+' // CXX-NEXT: | |-BinaryOperator [[ADDR_56:0x[a-z0-9]*]] <col:10, col:59> 'int' '+' // CXX-NEXT: | | |-BinaryOperator [[ADDR_57:0x[a-z0-9]*]] <col:10, col:39> 'int' '+' // CXX-NEXT: | | | |-PseudoObjectExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:22> 'int' // CXX-NEXT: | | | | |-CallExpr [[ADDR_59:0x[a-z0-9]*]] <col:10, col:22> 'int' // CXX-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_60:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | | | `-DeclRefExpr [[ADDR_61:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CXX-NEXT: | | | | `-CallExpr [[ADDR_62:0x[a-z0-9]*]] <line:34:1, line:55:22> 'int' // CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_63:0x[a-z0-9]*]] <line:34:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: | | | `-PseudoObjectExpr [[ADDR_64:0x[a-z0-9]*]] <line:55:26, col:39> 'int' // CXX-NEXT: | | | |-CallExpr [[ADDR_65:0x[a-z0-9]*]] <col:26, col:39> 'int' // CXX-NEXT: | | | | |-ImplicitCastExpr [[ADDR_66:0x[a-z0-9]*]] <col:26> 'int (*)(int)' <FunctionToPointerDecay> // CXX-NEXT: | | | | | `-DeclRefExpr [[ADDR_67:0x[a-z0-9]*]] <col:26> 'int (int)' {{.*}}Function [[ADDR_7]] 'also_before' 'int (int)' // CXX-NEXT: | | | | `-IntegerLiteral [[ADDR_68:0x[a-z0-9]*]] <col:38> 'int' 1 // CXX-NEXT: | | | `-CallExpr [[ADDR_69:0x[a-z0-9]*]] <line:38:1, line:55:39> 'int' // CXX-NEXT: | | | |-ImplicitCastExpr [[ADDR_70:0x[a-z0-9]*]] <line:38:1> 'int (*)(int)' <FunctionToPointerDecay> // CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_13]] <col:1> 'int (int)' Function [[ADDR_14]] 'also_before[implementation={vendor(llvm)}]' 'int (int)' // CXX-NEXT: | | | `-IntegerLiteral [[ADDR_68]] <line:55:38> 'int' 1 // CXX-NEXT: | | `-CallExpr [[ADDR_71:0x[a-z0-9]*]] <col:43, col:59> 'int' // CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_72:0x[a-z0-9]*]] <col:43> 'int (*)(float)' <FunctionToPointerDecay> // CXX-NEXT: | | | `-DeclRefExpr [[ADDR_73:0x[a-z0-9]*]] <col:43> 'int (float)' {{.*}}Function [[ADDR_15]] 'also_before' 'int (float)' // CXX-NEXT: | | `-FloatingLiteral [[ADDR_74:0x[a-z0-9]*]] <col:55> 'float' 2.000000e+00 // CXX-NEXT: | `-PseudoObjectExpr [[ADDR_75:0x[a-z0-9]*]] <col:63, col:78> 'int' // CXX-NEXT: | |-CallExpr [[ADDR_76:0x[a-z0-9]*]] <col:63, col:78> 'int' // CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <col:63> 'int (*)(double)' <FunctionToPointerDecay> // CXX-NEXT: | | | `-DeclRefExpr [[ADDR_78:0x[a-z0-9]*]] <col:63> 'int (double)' {{.*}}Function [[ADDR_20]] 'also_before' 'int (double)' // CXX-NEXT: | | `-FloatingLiteral [[ADDR_79:0x[a-z0-9]*]] <col:75> 'double' 3.000000e+00 // CXX-NEXT: | `-CallExpr [[ADDR_80:0x[a-z0-9]*]] <line:43:1, line:55:78> 'int' // CXX-NEXT: | |-ImplicitCastExpr [[ADDR_81:0x[a-z0-9]*]] <line:43:1> 'int (*)(double)' <FunctionToPointerDecay> // CXX-NEXT: | | `-DeclRefExpr [[ADDR_26]] <col:1> 'int (double)' Function [[ADDR_27]] 'also_before[implementation={vendor(llvm)}]' 'int (double)' // CXX-NEXT: | `-FloatingLiteral [[ADDR_79]] <line:55:75> 'double' 3.000000e+00 // CXX-NEXT: `-PseudoObjectExpr [[ADDR_82:0x[a-z0-9]*]] <col:82, col:96> 'int' // CXX-NEXT: |-CallExpr [[ADDR_83:0x[a-z0-9]*]] <col:82, col:96> 'int' // CXX-NEXT: | |-ImplicitCastExpr [[ADDR_84:0x[a-z0-9]*]] <col:82> 'int (*)(long)' <FunctionToPointerDecay> // CXX-NEXT: | | `-DeclRefExpr [[ADDR_85:0x[a-z0-9]*]] <col:82> 'int (long)' {{.*}}Function [[ADDR_28]] 'also_before' 'int (long)' // CXX-NEXT: | `-IntegerLiteral [[ADDR_86:0x[a-z0-9]*]] <col:94> 'long' 4 // CXX-NEXT: `-CallExpr [[ADDR_87:0x[a-z0-9]*]] <line:47:1, line:55:96> 'int' // CXX-NEXT: |-ImplicitCastExpr [[ADDR_88:0x[a-z0-9]*]] <line:47:1> 'int (*)(long)' <FunctionToPointerDecay> // CXX-NEXT: | `-DeclRefExpr [[ADDR_34]] <col:1> 'int (long)' Function [[ADDR_35]] 'also_before[implementation={vendor(llvm)}]' 'int (long)' // CXX-NEXT: `-IntegerLiteral [[ADDR_86]] <line:55:94> 'long' 4
GB_unop__identity_fc32_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_int16) // op(A') function: GB (_unop_tran__identity_fc32_int16) // C type: GxB_FC32_t // A type: int16_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_int16) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel.h" #include "magick/option.h" #include "magick/resample.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/string_.h" #include "magick/utility.h" #include "magick/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { MagickRealType (*filter)(const MagickRealType,const ResizeFilter *), (*window)(const MagickRealType,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension to scale to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ cubic[8]; /* cubic coefficents for smooth Cubic filters */ unsigned long signature; }; /* Forward declaractions. */ static MagickRealType I0(MagickRealType x), BesselOrderOne(MagickRealType); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided, % % They are all internal to this module only. See AcquireResizeFilterInfo() % for details of the access to these functions, via the % GetResizeFilterSupport() and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const MagickRealType x, % const MagickRealType support) % % o x: the distance from the sampling point % generally in the range of 0 to support % The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: Current Filter Information % This allows function to access support, and posibly other % pre-calculated information defineding the functions. % */ static MagickRealType Bessel(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* See Pratt "Digital Image Processing" p.97 for Bessel functions This function actually a X-scaled Jinc(x) function. http://mathworld.wolfram.com/JincFunction.html And on page 11 of... http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf */ if (x == 0.0) return((MagickRealType) (MagickPI/4.0)); return(BesselOrderOne(MagickPI*x)/(2.0*x)); } static MagickRealType Blackman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2rd Order cosine windowing function. */ return(0.42+0.5*cos(MagickPI*(double) x)+0.08*cos(2.0*MagickPI*(double) x)); } static MagickRealType Bohman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function. */ return((1-x)*cos(MagickPI*(double) x)+sin(MagickPI*(double) x)/MagickPI); } static MagickRealType Box(const MagickRealType magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { /* Just return 1.0, filter will still be clipped by its support window. */ return(1.0); } static MagickRealType CubicBC(const MagickRealType x, const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B=1/3 C=1/3 Qualitively ideal Cubic Filter Catmull-Rom B= 0 C=1/2 Cublic Interpolation Function Cubic B-Spline B= 1 C= 0 Spline Approximation of Gaussian Hermite B= 0 C= 0 Quadratic Spline (support = 1) See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/ lectures/mitchell/Mitchell.pdf Coefficents are determined from B,C values P0 = ( 6 - 2*B )/6 P1 = 0 P2 = (-18 +12*B + 6*C )/6 P3 = ( 12 - 9*B - 6*C )/6 Q0 = ( 8*B +24*C )/6 Q1 = ( -12*B -48*C )/6 Q2 = ( 6*B +30*C )/6 Q3 = ( - 1*B - 6*C )/6 Which is used to define the filter... P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x <= 2 Which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->cubic[0]+x*(resize_filter->cubic[1]+x* (resize_filter->cubic[2]+x*resize_filter->cubic[3]))); if (x < 2.0) return(resize_filter->cubic[4] +x*(resize_filter->cubic[5]+x* (resize_filter->cubic[6] +x*resize_filter->cubic[7]))); return(0.0); } static MagickRealType Gaussian(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { return(exp((double) (-2.0*x*x))*sqrt(2.0/MagickPI)); } static MagickRealType Hanning(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* A Cosine windowing function. */ return(0.5+0.5*cos(MagickPI*(double) x)); } static MagickRealType Hamming(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* A offset Cosine windowing function. */ return(0.54+0.46*cos(MagickPI*(double) x)); } static MagickRealType Kaiser(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { #define Alpha 6.5 #define I0A (1.0/I0(Alpha)) /* Kaiser Windowing Function (bessel windowing): Alpha is a free value from 5 to 8 (currently hardcoded to 6.5) Future: make alphand the IOA pre-calculation, a 'expert' setting. */ return(I0A*I0(Alpha*sqrt((double) (1.0-x*x)))); } static MagickRealType Lagrange(const MagickRealType x, const ResizeFilter *resize_filter) { long n, order; MagickRealType value; register long i; /* Lagrange Piece-Wise polynomial fit of Sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is for a support of 2, gives a lagrange-4 or piece-wise cubic functions Note that n is the specific piece of the piece-wise function to calculate. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064 */ if (x > resize_filter->support) return(0.0); order=(long) (2.0*resize_filter->window_support); /* number of pieces */ n=(long) ((1.0*order)/2.0+x); /* which piece does x belong to */ value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static MagickRealType Quadratic(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static MagickRealType Sinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* This function actually a X-scaled Sinc(x) function. */ if (x == 0.0) return(1.0); return(sin(MagickPI*(double) x)/(MagickPI*(double) x)); } static MagickRealType Triangle(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 1rd order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter */ if (x < 1.0) return(1.0-x); return(0.0); } static MagickRealType Welsh(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Welsh parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Cubic Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Bessel % % Windowed Sinc/Bessel Method % Blackman Hanning Hamming % Kaiser Lancos (Sinc) % % FIR filters are used as is, and are limited by that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (1.5). % % Requesting a windowed filter will return either a windowed Sinc, for a one % dimentional orthogonal filtering method, such as ResizeImage(), or a % windowed Bessel for image operations requiring a two dimentional % cylindrical filtering method, such a DistortImage(). Which function is % is used set by the "cylindrical" boolean argument. % % Directly requesting 'Sinc' or 'Bessel' will force the use of that filter % function, with a default 'Blackman' windowing method. This not however % recommended as it removes the correct filter selection for different % filtering image operations. Selecting a window filtering method is better. % % Lanczos is purely special case of a Sinc windowed Sinc, but defulting to % a 3 lobe support, rather that the default 4 lobe support. % % Special options can be used to override specific, or all the filter % settings. However doing so is not advisible unless you have expert % knowledge of the use of resampling filtered techniques. Extreme caution is % advised. % % "filter:filter" Select this function as the filter. % If a "filter:window" operation is not provided, then no windowing % will be performed on the selected filter, (support clipped) % % This can be used to force the use of a windowing method as filter, % request a 'Sinc' filter in a radially filtered operation, or the % 'Bessel' filter for a othogonal filtered operation. % % "filter:window" Select this windowing function for the filter. % While any filter could be used as a windowing function, % using that filters first lobe over the whole support window, % using a non-windowing method is not advisible. % % "filter:lobes" Number of lobes to use for the Sinc/Bessel filter. % This a simper method of setting filter support size that will % correctly handle the Sinc/Bessel switch for an operators filtering % requirements. % % "filter:support" Set the support size for filtering to the size given % This not recomented for Sinc/Bessel windowed filters, but is % used for simple filters like FIR filters, and the Gaussian Filter. % This will override any 'filter:lobes' option. % % "filter:blur" Scale the filter and support window by this amount. % A value >1 will generally result in a more burred image with % more ringing effects, while a value <1 will sharpen the % resulting image with more aliasing and Morie effects. % % "filter:win-support" Scale windowing function to this size instead. % This causes the windowing (or self-windowing Lagrange filter) % to act is if the support winodw it much much larger than what % is actually supplied to the calling operator. The filter however % is still clipped to the real support size given. If unset this % will equal the normal filter support size. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic type of filter % If only one of these are given it is assumes to be a 'Keys' % type of filter such that B+2C=1, where Keys 'alpha' value = C % % "filter:verbose" Output verbose plotting data for graphing the % resulting filter over the whole support range (with blur effect). % % Set a true un-windowed Sinc filter with 10 lobes (very slow) % -set option:filter:filter Sinc % -set option:filter:lobes 8 % % For example force an 8 lobe Lanczos (Sinc or Bessel) filter... % -filter Lanczos % -set option:filter:lobes 8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterTypes filter_type, const MagickBooleanType radial, % ExceptionInfo *exception) % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % % o blur: blur the filter by this amount, use 1.0 if unknown. % Image artifact "filter:blur" will override this old usage % % o radial: 1D orthogonal filter (Sinc) or 2D radial filter (Bessel) % % o exception: return any errors or warnings in this structure. % */ MagickExport ResizeFilter *AcquireResizeFilter(const Image *image, const FilterTypes filter, const MagickRealType blur, const MagickBooleanType cylindrical,ExceptionInfo *exception) { const char *artifact; FilterTypes filter_type, window_type; long filter_artifact; MagickRealType B, C; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. A 'Sinc' filter function (must be windowed) could be upgraded to a 'Bessel' filter if a "cylindrical" filter is requested, unless a "Sinc" filter specifically request. */ static struct { FilterTypes filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* undefined */ { PointFilter, BoxFilter }, /* special, nearest-neighbour filter */ { BoxFilter, BoxFilter }, /* Box averaging Filter */ { TriangleFilter, BoxFilter }, /* Linear Interpolation Filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFilter, HanningFilter }, /* Hanning -- Cosine-Sinc */ { SincFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFilter, BlackmanFilter }, /* Blackman -- 2*Cosine-Sinc */ { GaussianFilter, BoxFilter }, /* Gaussain Blurring filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approximation */ { CubicFilter, BoxFilter }, /* Cubic Gaussian approximation */ { CatromFilter, BoxFilter }, /* Cubic Interpolator */ { MitchellFilter, BoxFilter }, /* 'ideal' Cubic Filter */ { LanczosFilter, SincFilter }, /* Special, 3 lobed Sinc-Sinc */ { BesselFilter, BlackmanFilter }, /* 3 lobed bessel -specific request */ { SincFilter, BlackmanFilter }, /* 4 lobed sinc - specific request */ { SincFilter, KaiserFilter }, /* Kaiser -- SqRoot-Sinc */ { SincFilter, WelshFilter }, /* Welsh -- Parabolic-Sinc */ { SincFilter, CubicFilter }, /* Parzen -- Cubic-Sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing filter */ { SincFilter, BohmanFilter }, /* Bohman -- 2*Cosine-Sinc */ { SincFilter, TriangleFilter } /* Bartlett -- Triangle-Sinc */ }; /* Table maping the filter/window function from the above table to the actual filter/window function call to use. The default support size for that filter as a weighting function, and the point to scale when that function is used as a windowing function (typ 1.0). */ static struct { MagickRealType (*function)(const MagickRealType, const ResizeFilter*), support, /* default support size for function as a filter */ scale, /* size windowing function, for scaling windowing function */ B, C; /* Cubic Filter factors for a CubicBC function, else ignored */ } const filters[SentinelFilter] = { { Box, 0.0f, 0.5f, 0.0f, 0.0f }, /* Undefined */ { Box, 0.0f, 0.5f, 0.0f, 0.0f }, /* Point */ { Box, 0.5f, 0.5f, 0.0f, 0.0f }, /* Box */ { Triangle, 1.0f, 1.0f, 0.0f, 0.0f }, /* Triangle */ { CubicBC, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hermite, Cubic B=C=0 */ { Hanning, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hanning, Cosine window */ { Hamming, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hamming, '' variation */ { Blackman, 1.0f, 1.0f, 0.0f, 0.0f }, /* Blackman, 2*cos window */ { Gaussian, 1.5f, 1.5f, 0.0f, 0.0f }, /* Gaussian */ { Quadratic, 1.5f, 1.5f, 0.0f, 0.0f }, /* Quadratic Gaussian */ { CubicBC, 2.0f, 2.0f, 1.0f, 0.0f }, /* B-Spline of Gaussian B=1 C=0 */ { CubicBC, 2.0f, 1.0f, 0.0f, 0.5f }, /* Catmull-Rom B=0 C=1/2 */ { CubicBC, 2.0f, 1.0f, 1.0f/3.0f, 1.0f/3.0f }, /* Mitchel B=C=1/3 */ { Sinc, 3.0f, 1.0f, 0.0f, 0.0f }, /* Lanczos, 3 lobed Sinc-Sinc */ { Bessel, 3.2383f,1.2197f,.0f,.0f }, /* 3 lobed Blackman-Bessel */ { Sinc, 4.0f, 1.0f, 0.0f, 0.0f }, /* 4 lobed Blackman-Sinc */ { Kaiser, 1.0f, 1.0f, 0.0f, 0.0f }, /* Kaiser, sq-root windowing */ { Welsh, 1.0f, 1.0f, 0.0f, 0.0f }, /* Welsh, Parabolic windowing */ { CubicBC, 2.0f, 2.0f, 1.0f, 0.0f }, /* Parzen, B-Spline windowing */ { Lagrange, 2.0f, 1.0f, 0.0f, 0.0f }, /* Lagrangian Filter */ { Bohman, 1.0f, 1.0f, 0.0f, 0.0f }, /* Bohman, 2*Cosine windowing */ { Triangle, 1.0f, 1.0f, 0.0f, 0.0f } /* Bartlett, Triangle windowing */ }; /* The known zero crossings of the Bessel() or the Jinc(x*PI) function Found by using http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp for Jv-function with v=1, then dividing X-roots by PI (tabled below) */ static MagickRealType bessel_zeros[16] = { 1.21966989126651f, 2.23313059438153f, 3.23831548416624f, 4.24106286379607f, 5.24276437687019f, 6.24392168986449f, 7.24475986871996f, 8.24539491395205f, 9.24589268494948f, 10.2462933487549f, 11.2466227948779f, 12.2468984611381f, 13.2471325221811f, 14.2473337358069f, 15.2475085630373f, 16.247661874701f }; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter)); if (resize_filter == (ResizeFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* defaults for the requested filter */ filter_type = mapping[filter].filter; window_type = mapping[filter].window; /* Filter blur -- scaling both filter and support window */ resize_filter->blur = blur; artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur = atof(artifact); if ( resize_filter->blur < MagickEpsilon ) resize_filter->blur = (MagickRealType) MagickEpsilon; /* Modifications for Cylindrical filter use */ if ( cylindrical != MagickFalse && filter != SincFilter ) { /* promote 1D Sinc Filter to a 2D Bessel filter */ if ( filter_type == SincFilter ) filter_type = BesselFilter; /* Prompote Lanczos (Sinc-Sinc) to Lanczos (Bessel-Bessel) */ else if ( filter_type == LanczosFilter ) { filter_type = BesselFilter; window_type = BesselFilter; } /* Blur other filters appropriatally correct cylindrical usage */ else if ( filter_type == GaussianFilter ) /* Gaussian is scaled by 4*ln(2) and not 4*sqrt(2/MagickPI) - according to Paul Heckbert's paper on EWA resampling */ resize_filter->blur *= 2.0*log(2.0)/sqrt(2.0/MagickPI); else if ( filter_type != BesselFilter ) /* filters with a 1.0 zero root crossing by the first bessel_zero */ resize_filter->blur *= bessel_zeros[0]; } /* Override Filter Selection */ artifact=GetImageArtifact(image,"filter:filter"); if (artifact != (const char *) NULL) { /* raw filter request - no window function */ filter_artifact=ParseMagickOption(MagickFilterOptions, MagickFalse,artifact); if ( UndefinedFilter < filter_artifact && filter_artifact < SentinelFilter ) { filter_type = (FilterTypes) filter_artifact; window_type = BoxFilter; } /* Lanczos is nor a real filter but a self windowing Sinc/Bessel */ if ( filter_artifact == LanczosFilter ) { filter_type = (cylindrical!=MagickFalse) ? BesselFilter : LanczosFilter; window_type = (cylindrical!=MagickFalse) ? BesselFilter : SincFilter; } /* Filter overwide with a specific window function? */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { filter_artifact=ParseMagickOption(MagickFilterOptions, MagickFalse,artifact); if ( UndefinedFilter < filter_artifact && filter_artifact < SentinelFilter ) { if ( filter_artifact != LanczosFilter ) window_type = (FilterTypes) filter_artifact; else window_type = (cylindrical!=MagickFalse) ? BesselFilter : SincFilter; } } } else { /* window specified, but no filter function? Assume Sinc/Bessel */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { filter_artifact=ParseMagickOption(MagickFilterOptions,MagickFalse, artifact); if ( UndefinedFilter < filter_artifact && filter_artifact < SentinelFilter ) { filter_type = (cylindrical!=MagickFalse) ? BesselFilter : SincFilter; if ( filter_artifact != LanczosFilter ) window_type = (FilterTypes) filter_artifact; else window_type = filter_type; } } } resize_filter->filter = filters[filter_type].function; resize_filter->support = filters[filter_type].support; resize_filter->window = filters[window_type].function; resize_filter->scale = filters[window_type].scale; resize_filter->signature=MagickSignature; /* Filter support overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { long lobes = atol(artifact); if ( lobes < 1 ) lobes = 1; resize_filter->support = (MagickRealType) lobes; if ( filter_type == BesselFilter ) { if ( lobes > 16 ) lobes = 16; resize_filter->support = bessel_zeros[lobes-1]; } } artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support = fabs(atof(artifact)); /* Scale windowing function separatally to the support 'clipping' window that calling operator is planning to actually use. - Expert Use Only */ resize_filter->window_support = resize_filter->support; artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support = fabs(atof(artifact)); /* Set Cubic Spline B,C values, calculate Cubic coefficents */ B=0.0; C=0.0; if ( filters[filter_type].function == CubicBC || filters[window_type].function == CubicBC ) { if ( filters[filter_type].function == CubicBC ) { B=filters[filter_type].B; C=filters[filter_type].C; } else if ( filters[window_type].function == CubicBC ) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=atof(artifact); C=1.0-2.0*B; /* Calculate C as if it is a Keys cubic filter */ artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) C=atof(artifact); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=atof(artifact); B=(1.0-C)/2.0; /* Calculate B as if it is a Keys cubic filter */ } } /* Convert B,C values into Cubic Coefficents - See CubicBC() */ resize_filter->cubic[0]=( 6.0 -2.0*B )/6.0; resize_filter->cubic[1]=0.0; resize_filter->cubic[2]=(-18.0+12.0*B+ 6.0*C)/6.0; resize_filter->cubic[3]=( 12.0- 9.0*B- 6.0*C)/6.0; resize_filter->cubic[4]=( 8.0*B+24.0*C)/6.0; resize_filter->cubic[5]=( -12.0*B-48.0*C)/6.0; resize_filter->cubic[6]=( 6.0*B+30.0*C)/6.0; resize_filter->cubic[7]=( - 1.0*B- 6.0*C)/6.0; } artifact=GetImageArtifact(image,"filter:verbose"); if (artifact != (const char *) NULL) { double support, x; /* Output filter graph -- for graphing filter result. */ support=GetResizeFilterSupport(resize_filter); (void) printf("# support = %lg\n",support); for (x=0.0; x <= support; x+=0.01f) (void) printf("%5.2lf\t%lf\n",x,GetResizeFilterWeight(resize_filter,x)); (void) printf("%5.2lf\t%lf\n",support,0.0); } return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image, % const unsigned long columns,const unsigned long rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const unsigned long columns,const unsigned long rows,ExceptionInfo *exception) { #define AdaptiveResizeImageTag "Resize/Image" Image *resize_image; long y; MagickBooleanType proceed; MagickPixelPacket pixel; PointInfo offset; register IndexPacket *resize_indexes; register long x; register PixelPacket *q; ResampleFilter *resample_filter; ViewInfo *resize_view; /* Adaptively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); resize_image=DestroyImage(resize_image); return((Image *) NULL); } GetMagickPixelPacket(image,&pixel); resample_filter=AcquireResampleFilter(image,exception); if (image->interpolate == UndefinedInterpolatePixel) (void) SetResampleFilterInterpolateMethod(resample_filter, MeshInterpolatePixel); resize_view=AcquireCacheView(resize_image); for (y=0; y < (long) resize_image->rows; y++) { q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (PixelPacket *) NULL) break; resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); offset.y=((MagickRealType) y*image->rows/resize_image->rows); for (x=0; x < (long) resize_image->columns; x++) { offset.x=((MagickRealType) x*image->columns/resize_image->columns); (void) ResamplePixelColor(resample_filter,offset.x-0.5,offset.y-0.5, &pixel); SetPixelPacket(resize_image,&pixel,q,resize_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) break; proceed=SetImageProgress(image,AdaptiveResizeImageTag,y,image->rows); if (proceed == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); resize_view=DestroyCacheView(resize_view); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0: % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % MagickRealType BesselOrderOne(MagickRealType x) % % A description of each parameter follows: % % o x: MagickRealType value. % */ #undef I0 static MagickRealType I0(MagickRealType x) { MagickRealType sum, t, y; register long i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((MagickRealType) i*i); } return(sum); } #undef J1 static MagickRealType J1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static MagickRealType P1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static MagickRealType Q1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static MagickRealType BesselOrderOne(MagickRealType x) { MagickRealType p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the AcquireResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); resize_filter->signature=(~MagickSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickExport MagickRealType GetResizeFilterSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter, % const MagickRealType x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickExport MagickRealType GetResizeFilterWeight( const ResizeFilter *resize_filter,const MagickRealType x) { MagickRealType blur, scale; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); blur=fabs(x)/resize_filter->blur; /* X position with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point/Box Filter -- avoid division by zero */ else { scale=resize_filter->scale/resize_filter->window_support; scale=resize_filter->window(blur*scale,resize_filter); } /* Weighting for the filter at this position. */ return(scale*resize_filter->filter(blur,resize_filter)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() is a convenience method that scales an image proportionally % to twice its size. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { Image *magnify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); magnify_image=ResizeImage(image,2*image->columns,2*image->rows,CubicFilter, 1.0,exception); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally % to half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,CubicFilter, 1.0,exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterTypes filter,const double blur, ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; unsigned long height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=(unsigned long) (x_resolution*image->columns/ (image->x_resolution == 0.0 ? 72.0 : image->x_resolution)+0.5); height=(unsigned long) (y_resolution*image->rows/ (image->y_resolution == 0.0 ? 72.0 : image->y_resolution)+0.5); resample_image=ResizeImage(image,width,height,filter,blur,exception); if (resample_image != (Image *) NULL) { resample_image->x_resolution=x_resolution; resample_image->y_resolution=y_resolution; } return(resample_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image, % const unsigned long columns,const unsigned long rows, % const double delta_x,const double rigidity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image, const unsigned long columns,const unsigned long rows, const double delta_x,const double rigidity,ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" const char *map; guchar *packet; Image *rescale_image; int x, y; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MagickPixelPacket pixel; register IndexPacket *rescale_indexes; register PixelPacket *q; unsigned char *pixels; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ZoomImage(image,columns,rows,exception)); if ((columns >= (2*image->columns)) || (rows >= (2*image->rows))) { Image *resize_image; unsigned long height, width; /* Honor liquid resize size limitations. */ for (width=image->columns; columns >= (2*width-1); width*=2); for (height=image->rows; rows >= (2*height-1); height*=2); resize_image=ResizeImage(image,width,height,image->filter,image->blur, exception); if (resize_image == (Image *) NULL) return((Image *) NULL); rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x, rigidity,exception); resize_image=DestroyImage(resize_image); return(rescale_image); } map="RGB"; if (image->matte == MagickFalse) map="RGBA"; if (image->colorspace == CMYKColorspace) { map="CMYK"; if (image->matte == MagickFalse) map="CMYKA"; } pixels=(unsigned char *) AcquireQuantumMemory(image->columns,image->rows* strlen(map)*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) return((Image *) NULL); status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel, pixels,exception); if (status == MagickFalse) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } carver=lqr_carver_new(pixels,image->columns,image->rows,strlen(map)); if (carver == (LqrCarver *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,columns,rows); rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse) { InheritException(exception,&rescale_image->exception); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } GetMagickPixelPacket(rescale_image,&pixel); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan(carver,&x,&y,&packet) != 0) { q=QueueAuthenticPixels(rescale_image,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; rescale_indexes=GetAuthenticIndexQueue(rescale_image); pixel.red=QuantumRange*(packet[0]/255.0); pixel.green=QuantumRange*(packet[1]/255.0); pixel.blue=QuantumRange*(packet[2]/255.0); if (image->colorspace != CMYKColorspace) { if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[3]/255.0); } else { pixel.index=QuantumRange*(packet[3]/255.0); if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[4]/255.0); } SetPixelPacket(rescale_image,&pixel,q,rescale_indexes); if (SyncAuthenticPixels(rescale_image,exception) == MagickFalse) break; } /* Relinquish resources. */ lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const unsigned long magick_unused(columns), const unsigned long magick_unused(rows),const double magick_unused(delta_x), const double magick_unused(rigidity),ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using % the given filter (see AcquireFilterInfo() ). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const unsigned long columns, % const unsigned long rows,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. % Typically set this to 1.0. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { MagickRealType weight; long pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register long i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (long) GetPixelCacheMaximumThreads(); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishMagickMemory( contribution[i]); return((ContributionInfo **) RelinquishMagickMemory(contribution)); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register long i; ContributionInfo **contribution; unsigned long number_threads; number_threads=GetPixelCacheMaximumThreads(); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (long) number_threads; i++) { contribution[i]=(ContributionInfo *) AcquireQuantumMemory(count, sizeof(**contribution)); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType x_factor, const MagickSizeType span,MagickOffsetType *quantum,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" ClassType storage_class; ContributionInfo **contribution; long x; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ViewInfo *image_view, *resize_view; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contribution=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contribution == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=1.0/scale; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireCacheView(image); resize_view=AcquireCacheView(resize_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (x=0; x < (long) resize_image->columns; x++) { long id, n, start, stop; MagickRealType center, density; register const IndexPacket *indexes; register const PixelPacket *p; register IndexPacket *resize_indexes; register long y; register PixelPacket *q; center=(MagickRealType) (x+0.5)/x_factor; start=(long) (MagickMax(center-support-MagickEpsilon,0.0)+0.5); stop=(long) (MagickMin(center+support,(double) image->columns)+0.5); density=0.0; if (status == MagickFalse) continue; id=GetPixelCacheThreadId(); for (n=0; n < (stop-start); n++) { contribution[id][n].pixel=start+n; contribution[id][n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-center+0.5)); density+=contribution[id][n].weight; } if ((density != 0.0) && (density != 1.0)) { register long i; /* Normalize. */ density=1.0/density; for (i=0; i < n; i++) contribution[id][i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[id][0].pixel,0, (unsigned long) (contribution[id][n-1].pixel-contribution[id][0].pixel+1), image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (y=0; y < (long) resize_image->rows; y++) { long j; MagickPixelPacket pixel; MagickRealType alpha; register long i; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+ (contribution[id][i].pixel-contribution[id][0].pixel); alpha=contribution[id][i].weight; pixel.red+=alpha*(p+j)->red; pixel.green+=alpha*(p+j)->green; pixel.blue+=alpha*(p+j)->blue; pixel.opacity+=alpha*(p+j)->opacity; } q->red=RoundToQuantum(pixel.red); q->green=RoundToQuantum(pixel.green); q->blue=RoundToQuantum(pixel.blue); q->opacity=RoundToQuantum(pixel.opacity); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+ (contribution[id][i].pixel-contribution[id][0].pixel); alpha=contribution[id][i].weight; pixel.index+=alpha*indexes[j]; } resize_indexes[y]=(IndexPacket) RoundToQuantum(pixel.index); } } else { MagickRealType gamma; gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+ (contribution[id][i].pixel-contribution[id][0].pixel); alpha=contribution[id][i].weight*QuantumScale*((MagickRealType) QuantumRange-(p+j)->opacity); pixel.red+=alpha*(p+j)->red; pixel.green+=alpha*(p+j)->green; pixel.blue+=alpha*(p+j)->blue; pixel.opacity+=contribution[id][i].weight*(p+j)->opacity; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); q->red=RoundToQuantum(gamma*pixel.red); q->green=RoundToQuantum(gamma*pixel.green); q->blue=RoundToQuantum(gamma*pixel.blue); q->opacity=RoundToQuantum(pixel.opacity); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+ (contribution[id][i].pixel-contribution[id][0].pixel); alpha=contribution[id][i].weight*QuantumScale*((MagickRealType) QuantumRange-(p+j)->opacity); gamma+=alpha; } resize_indexes[y]=(IndexPacket) RoundToQuantum(gamma*pixel.index); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(long) (MagickMin(MagickMax(center,(double) start),(double) stop- 1.0)+0.5); j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+ (contribution[id][i-start].pixel-contribution[id][0].pixel); resize_indexes[y]=indexes[j]; } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(image,ResizeImageTag,(*quantum)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contribution=DestroyContributionThreadSet(contribution); return(status); } static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType y_factor, const MagickSizeType span,MagickOffsetType *quantum,ExceptionInfo *exception) { ClassType storage_class; ContributionInfo **contribution; long y; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ViewInfo *image_view, *resize_view; /* Apply filter to resize vertically from image to resize_image. */ scale=MagickMax(1.0/y_factor,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contribution=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contribution == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=1.0/scale; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireCacheView(image); resize_view=AcquireCacheView(resize_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (long) resize_image->rows; y++) { long id, n, start, stop; MagickRealType center, density; register const IndexPacket *indexes; register const PixelPacket *p; register IndexPacket *resize_indexes; register long x; register PixelPacket *q; center=(MagickRealType) (y+0.5)/y_factor; start=(long) (MagickMax(center-support-MagickEpsilon,0.0)+0.5); stop=(long) (MagickMin(center+support,(double) image->rows)+0.5); density=0.0; if (status == MagickFalse) continue; id=GetPixelCacheThreadId(); for (n=0; n < (stop-start); n++) { contribution[id][n].pixel=start+n; contribution[id][n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-center+0.5)); density+=contribution[id][n].weight; } if ((density != 0.0) && (density != 1.0)) { register long i; /* Normalize. */ density=1.0/density; for (i=0; i < n; i++) contribution[id][i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[id][0].pixel, image->columns,(unsigned long) (contribution[id][n-1].pixel- contribution[id][0].pixel+1),exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (x=0; x < (long) resize_image->columns; x++) { long j; MagickPixelPacket pixel; MagickRealType alpha; register long i; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(long) ((contribution[id][i].pixel-contribution[id][0].pixel)* image->columns+x); alpha=contribution[id][i].weight; pixel.red+=alpha*(p+j)->red; pixel.green+=alpha*(p+j)->green; pixel.blue+=alpha*(p+j)->blue; pixel.opacity+=alpha*(p+j)->opacity; } q->red=RoundToQuantum(pixel.red); q->green=RoundToQuantum(pixel.green); q->blue=RoundToQuantum(pixel.blue); q->opacity=RoundToQuantum(pixel.opacity); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(long) ((contribution[id][i].pixel-contribution[id][0].pixel)* image->columns+x); alpha=contribution[id][i].weight; pixel.index+=alpha*indexes[j]; } resize_indexes[x]=(IndexPacket) RoundToQuantum(pixel.index); } } else { MagickRealType gamma; gamma=0.0; for (i=0; i < n; i++) { j=(long) ((contribution[id][i].pixel-contribution[id][0].pixel)* image->columns+x); alpha=contribution[id][i].weight*QuantumScale*((MagickRealType) QuantumRange-(p+j)->opacity); pixel.red+=alpha*(p+j)->red; pixel.green+=alpha*(p+j)->green; pixel.blue+=alpha*(p+j)->blue; pixel.opacity+=contribution[id][i].weight*(p+j)->opacity; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); q->red=RoundToQuantum(gamma*pixel.red); q->green=RoundToQuantum(gamma*pixel.green); q->blue=RoundToQuantum(gamma*pixel.blue); q->opacity=RoundToQuantum(pixel.opacity); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(long) ((contribution[id][i].pixel-contribution[id][0].pixel)* image->columns+x); alpha=contribution[id][i].weight*QuantumScale*((MagickRealType) QuantumRange-(p+j)->opacity); pixel.index+=alpha*indexes[j]; } resize_indexes[x]=(IndexPacket) RoundToQuantum(gamma*pixel.index); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(long) (MagickMin(MagickMax(center,(double) start),(double) stop- 1.0)+0.5); j=(long) ((contribution[id][i-start].pixel-contribution[id][0].pixel)* image->columns+x); resize_indexes[x]=indexes[j]; } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(image,ResizeImageTag,(*quantum)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contribution=DestroyContributionThreadSet(contribution); return(status); } MagickExport Image *ResizeImage(const Image *image,const unsigned long columns, const unsigned long rows,const FilterTypes filter,const double blur, ExceptionInfo *exception) { FilterTypes filter_type; Image *filter_image, *resize_image; MagickRealType x_factor, y_factor; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; MagickOffsetType quantum; /* Acquire resize filter. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter) && (blur == 1.0)) return(CloneImage(image,0,0,MagickTrue,exception)); x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse, exception); /* Resize image. */ quantum=0; if ((columns*((MagickSizeType) image->rows+rows)) > (rows*((MagickSizeType) image->columns+columns))) { filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return((Image *) NULL); } span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &quantum,exception); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image != (Image *) NULL) status|=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&quantum,exception); } else { filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return((Image *) NULL); } span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &quantum,exception); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image != (Image *) NULL) status|=HorizontalFilter(resize_filter,filter_image,resize_image, x_factor,span,&quantum,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { if (resize_image != (Image *) NULL) resize_image=DestroyImage(resize_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" Image *sample_image; long j, *x_offset, y, *y_offset; MagickBooleanType proceed; register const IndexPacket *indexes; register const PixelPacket *pixels; register IndexPacket *sample_indexes; register long x; register PixelPacket *sample_pixels; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Allocate scan line buffer and column offset buffers. */ x_offset=(long *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); y_offset=(long *) AcquireQuantumMemory((size_t) sample_image->rows, sizeof(*y_offset)); if ((x_offset == (long *) NULL) || (y_offset == (long *) NULL)) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize pixel offsets. */ for (x=0; x < (long) sample_image->columns; x++) x_offset[x]=(long) (((MagickRealType) x+0.5)*image->columns/ sample_image->columns); for (y=0; y < (long) sample_image->rows; y++) y_offset[y]=(long) (((MagickRealType) y+0.5)*image->rows/ sample_image->rows); /* Sample each row. */ j=(-1); pixels=GetVirtualPixels(image,0,0,image->columns,1,exception); indexes=GetVirtualIndexQueue(image); for (y=0; y < (long) sample_image->rows; y++) { sample_pixels=QueueAuthenticPixels(sample_image,0,y,sample_image->columns,1, exception); if (sample_pixels == (PixelPacket *) NULL) break; sample_indexes=GetAuthenticIndexQueue(sample_image); if (j != y_offset[y]) { /* Read a scan line. */ j=y_offset[y]; pixels=GetVirtualPixels(image,0,j,image->columns,1,exception); if (pixels == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); } /* Sample each column. */ for (x=0; x < (long) sample_image->columns; x++) sample_pixels[x]=pixels[x_offset[x]]; if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) for (x=0; x < (long) sample_image->columns; x++) sample_indexes[x]=indexes[x_offset[x]]; if (SyncAuthenticPixels(sample_image,exception) == MagickFalse) break; proceed=SetImageProgress(image,SampleImageTag,y,image->rows); if (proceed == MagickFalse) break; } y_offset=(long *) RelinquishMagickMemory(y_offset); x_offset=(long *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" Image *scale_image; long number_rows, y; MagickBooleanType next_column, next_row, proceed; MagickPixelPacket pixel, *scale_scanline, *scanline, *x_vector, *y_vector, zero; MagickRealType alpha, gamma; PointInfo scale, span; register const IndexPacket *indexes; register const PixelPacket *p; register IndexPacket *scale_indexes; register long i, x; register MagickPixelPacket *s, *t; register PixelPacket *q; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse) { InheritException(exception,&scale_image->exception); scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*scanline)); scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) scale_image->columns,sizeof(*scale_scanline)); y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*y_vector)); if ((scanline == (MagickPixelPacket *) NULL) || (scale_scanline == (MagickPixelPacket *) NULL) || (x_vector == (MagickPixelPacket *) NULL) || (y_vector == (MagickPixelPacket *) NULL)) { scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) ResetMagickMemory(y_vector,0,(size_t) image->columns* sizeof(*y_vector)); GetMagickPixelPacket(image,&pixel); (void) ResetMagickMemory(&zero,0,sizeof(zero)); i=0; for (y=0; y < (long) scale_image->rows; y++) { q=QueueAuthenticPixels(scale_image,0,y,scale_image->columns,1,exception); if (q == (PixelPacket *) NULL) break; scale_indexes=GetAuthenticIndexQueue(scale_image); if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetVirtualPixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (long) image->rows)) { /* Read a new scanline. */ p=GetVirtualPixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } number_rows++; } for (x=0; x < (long) image->columns; x++) { y_vector[x].red+=scale.y*x_vector[x].red; y_vector[x].green+=scale.y*x_vector[x].green; y_vector[x].blue+=scale.y*x_vector[x].blue; if (scale_image->matte != MagickFalse) y_vector[x].opacity+=scale.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) y_vector[x].index+=scale.y*x_vector[x].index; } span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (long) image->rows)) { /* Read a new scanline. */ p=GetVirtualPixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } number_rows++; next_row=MagickFalse; } s=scanline; for (x=0; x < (long) image->columns; x++) { pixel.red=y_vector[x].red+span.y*x_vector[x].red; pixel.green=y_vector[x].green+span.y*x_vector[x].green; pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue; if (image->matte != MagickFalse) pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index=y_vector[x].index+span.y*x_vector[x].index; s->red=pixel.red; s->green=pixel.green; s->blue=pixel.blue; if (scale_image->matte != MagickFalse) s->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) s->index=pixel.index; s++; y_vector[x]=zero; } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ s=scanline; for (x=0; x < (long) scale_image->columns; x++) { q->red=RoundToQuantum(s->red); q->green=RoundToQuantum(s->green); q->blue=RoundToQuantum(s->blue); if (scale_image->matte != MagickFalse) q->opacity=RoundToQuantum(s->opacity); if (scale_indexes != (IndexPacket *) NULL) scale_indexes[x]=(IndexPacket) RoundToQuantum(s->index); q++; s++; } } else { /* Scale X direction. */ pixel=zero; next_column=MagickFalse; span.x=1.0; s=scanline; t=scale_scanline; for (x=0; x < (long) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { pixel=zero; t++; } pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { pixel=zero; next_column=MagickFalse; t++; } pixel.red+=scale.x*s->red; pixel.green+=scale.x*s->green; pixel.blue+=scale.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=scale.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=scale.x*s->index; span.x-=scale.x; } s++; } if (span.x > 0) { s--; pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; } if ((next_column == MagickFalse) && ((long) (t-scale_scanline) < (long) scale_image->columns)) { t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; } /* Transfer scanline to scaled image. */ t=scale_scanline; for (x=0; x < (long) scale_image->columns; x++) { alpha=1.0; if (image->matte != MagickFalse) alpha=(MagickRealType) (QuantumScale*(QuantumRange-t->opacity)); gamma=1.0/(fabs((double) alpha) <= MagickEpsilon ? 1.0 : alpha); q->red=RoundToQuantum(gamma*t->red); q->green=RoundToQuantum(gamma*t->green); q->blue=RoundToQuantum(gamma*t->blue); if (scale_image->matte != MagickFalse) q->opacity=RoundToQuantum(t->opacity); if (scale_indexes != (IndexPacket *) NULL) scale_indexes[x]=(IndexPacket) RoundToQuantum(gamma*t->index); t++; q++; } } if (SyncAuthenticPixels(scale_image,exception) == MagickFalse) break; proceed=SetImageProgress(image,ScaleImageTag,y,image->rows); if (proceed == MagickFalse) break; } /* Free allocated memory. */ y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector); scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline); x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResizeFilterSupport() specifies which IR filter to use to window % % The format of the SetResizeFilterSupport method is: % % void SetResizeFilterSupport(ResizeFilter *resize_filter, % const MagickRealType support) % % A description of each parameter follows: % % o resize_filter: the resize filter. % % o support: the filter spport radius. % */ MagickExport void SetResizeFilterSupport(ResizeFilter *resize_filter, const MagickRealType support) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); resize_filter->support=support; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image, const unsigned long columns,const unsigned long rows,ExceptionInfo *exception) { char value[MaxTextExtent]; const char *attribute; Image *sample_image, *thumbnail_image; MagickRealType x_factor, y_factor; struct stat attributes; unsigned long version; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if ((x_factor*y_factor) > 0.1) { thumbnail_image=ZoomImage(image,columns,rows,exception); if (thumbnail_image != (Image *) NULL) (void) StripImage(thumbnail_image); return(thumbnail_image); } sample_image=SampleImage(image,5*columns,5*rows,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ZoomImage(sample_image,columns,rows,exception); sample_image=DestroyImage(sample_image); if (thumbnail_image == (Image *) NULL) return(thumbnail_image); if (thumbnail_image->matte == MagickFalse) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; (void) StripImage(thumbnail_image); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (strstr(image->magick_filename,"///") == (char *) NULL) (void) FormatMagickString(value,MaxTextExtent,"file:///%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (GetPathAttributes(image->filename,&attributes) != MagickFalse) { (void) FormatMagickString(value,MaxTextExtent,"%ld",(long) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value); } (void) FormatMagickString(value,MaxTextExtent,"%ld",(long) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value); (void) FormatMagickString(value,MaxTextExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value); attribute=GetImageProperty(image,"comment"); if ((attribute != (const char *) NULL) && (value != (char *) NULL)) (void) SetImageProperty(thumbnail_image,"description",value); (void) SetImageProperty(thumbnail_image,"software", GetMagickVersion(&version)); (void) FormatMagickString(value,MaxTextExtent,"%lu",image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value); (void) FormatMagickString(value,MaxTextExtent,"%lu",image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::height",value); (void) FormatMagickString(value,MaxTextExtent,"%lu", GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value); return(thumbnail_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z o o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZoomImage() creates a new image that is a scaled size of an existing one. % It allocates the memory necessary for the new Image structure and returns a % pointer to the new image. The Point filter gives fast pixel replication, % Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower, % very high-quality results. See Graphic Gems III for details on this % algorithm. % % The filter member of the Image structure specifies which image filter to % use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp. % % The format of the ZoomImage method is: % % Image *ZoomImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: An integer that specifies the number of columns in the zoom % image. % % o rows: An integer that specifies the number of rows in the scaled % image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ZoomImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { Image *zoom_image; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); return(zoom_image); }
image.h
#pragma once #include <vector> #include <cmath> #include <map> #include <string> #include <algorithm> #include <omp.h> #include "CImg.h" #undef max #undef min typedef double Float; enum class ColorMode {GRAY, LUMIANCE, R, G, B, AVG}; struct Image { // Image() {} Image(int w, int h, Float default_val = 0):W(w), H(h) { data = new Float* [H]; for (int i = 0; i < H; ++i) data[i] = new Float[W]; Clear(default_val); } Image(const Image& img) { for (int i = 0; i < H; ++i) delete[] data[i]; delete[] data; H = img.H; W = img.W; data = new Float * [H]; for (int i = 0; i < H; ++i) data[i] = new Float[W]; #pragma omp parallel for num_threads(8) for (int i = 0; i < H; ++i) { for (int j = 0; j < W; ++j) { data[i][j] = img.data[i][j]; } } } ~Image() { for (int i = 0; i < H; ++i) delete[] data[i]; delete[] data; } Float Get(int x, int y) const { return data[y][x]; } Float& Get(int x, int y) { return data[y][x]; } Image& operator=(const Image& img) { if (H != img.H || W != img.W) { for (int i = 0; i < H; ++i) delete[] data[i]; delete[] data; H = img.H; W = img.W; data = new Float * [H]; for (int i = 0; i < H; ++i) data[i] = new Float[W]; } #pragma omp parallel for num_threads(8) for (int i = 0; i < H; ++i) { for (int j = 0; j < W; ++j) { data[i][j] = img.data[i][j]; } } return *this; } void Clear(Float default_val) { #pragma omp parallel for num_threads(8) for (int i = 0; i < H; ++i) { for (int j = 0; j < W; ++j) { data[i][j] = default_val; } } } void UpdatePixel(int x, int y, Float length) { #pragma omp atomic data[y][x] += length; } void DrawLineLength(const std::pair<int, int>& p1, const std::pair<int, int>& p2) { int x1 = p1.first, y1 = p1.second; int x2 = p2.first, y2 = p2.second; if (x1 == x2) { int y_min = std::min(y1, y2); int y_max = std::max(y1, y2); for (int y = y_min; y <= y_max; ++y) { UpdatePixel(x1, y, 1); } return; } if (y1 == y2) { int x_min = std::min(x1, x2); int x_max = std::max(x1, x2); for (int x = x_min; x <= x_max; ++x) { UpdatePixel(x, y1, 1); } return; } Float k = (y2 - y1) / (x2 - x1); if ((x1 <= x2 && y1 <= y2) || (x1 >= x2 && y1 >= y2)) { int x_min = std::min(x1, x2); int x_max = std::max(x1, x2); int y_min = std::min(y1, y2); int y_max = std::max(y1, y2); Float ky = y_max - y_min + 1; Float kx = x_max - x_min + 1; int x = x_min, y = y_min; Float xx = x, yy = y; while (x <= x_max && y <= y_max) { int tmp_x = x, tmp_y = y; Float delta_x = x + 1 - xx; Float delta_y = y + 1 - yy; Float tx = delta_x / kx; Float ty = delta_y / ky; Float len; if (tx - ty > 1e-5) { // UP y += 1; len = std::sqrt((y - yy) * (y - yy) + (ty * kx) * (ty * kx)); xx += ty * kx; yy = y; } else if (ty - tx > 1e-5) { // RIGHT x += 1; len = std::sqrt((x - xx) * (x - xx) + (tx * ky) * (tx * ky)); xx = x; yy += tx * ky; } else { // UP-RIGHT x += 1; y += 1; len = std::sqrt((x - xx) * (x - xx) + (y - yy) * (y - yy)); xx = x; yy = y; } UpdatePixel(tmp_x, tmp_y, len); } } else { // [xmin, ymax] => [xmax, ymin] int x_min = std::min(x1, x2); int x_max = std::max(x1, x2); int y_min = std::min(y1, y2); int y_max = std::max(y1, y2); int x = x_min, y = y_max; Float xx = x, yy = y; Float ky = y_max - y_min + 1; Float kx = x_max - x_min + 1; while (x <= x_max && y >= y_min) { int tmp_x = x, tmp_y = y; Float delta_x = x + 1 - xx; Float delta_y = yy - (y - 1); Float tx = delta_x / kx; Float ty = delta_y / ky; Float len; if (tx - ty > 1e-5) { // DOWN y -= 1; len = std::sqrt((y - yy) * (y - yy) + (ty * kx) * (ty * kx)); xx += ty * kx; yy = y; } else if (ty - tx > 1e-5) { // RIGHT x += 1; len = std::sqrt((x - xx) * (x - xx) + (tx * ky) * (tx * ky)); xx = x; yy -= tx * ky; } else { // DOWN-RIGHT x += 1; y -= 1; len = std::sqrt((x - xx) * (x - xx) + (y - yy) * (y - yy)); xx = x; yy = y; } UpdatePixel(tmp_x, tmp_y, len); } } } void DrawLineCount(const std::pair<int, int>& p1, const std::pair<int, int>& p2) { int x1 = p1.first, y1 = p1.second; int x2 = p2.first, y2 = p2.second; int dx, dy, i, e; int incx, incy, inc1, inc2; int x, y; dx = x2 - x1; dy = y2 - y1; if (dx < 0) dx = -dx; if (dy < 0) dy = -dy; incx = 1; if (x2 < x1) incx = -1; incy = 1; if (y2 < y1) incy = -1; x = x1; y = y1; if (dx > dy) { UpdatePixel(x, y, 1); e = 2 * dy - dx; inc1 = 2 * (dy - dx); inc2 = 2 * dy; for (i = 0; i < dx; i++) { if (e >= 0) { y += incy; e += inc1; } else e += inc2; x += incx; UpdatePixel(x, y, 1); } } else { UpdatePixel(x, y, 1); e = 2 * dx - dy; inc1 = 2 * (dx - dy); inc2 = 2 * dx; for (i = 0; i < dy; i++) { if (e >= 0) { x += incx; e += inc1; } else e += inc2; y += incy; UpdatePixel(x, y, 1); } } } void DrawLineBinary(const std::pair<int, int>& p1, const std::pair<int, int>& p2) { int x1 = p1.first, y1 = p1.second; int x2 = p2.first, y2 = p2.second; int dx, dy, i, e; int incx, incy, inc1, inc2; int x, y; dx = x2 - x1; dy = y2 - y1; if (dx < 0) dx = -dx; if (dy < 0) dy = -dy; incx = 1; if (x2 < x1) incx = -1; incy = 1; if (y2 < y1) incy = -1; x = x1; y = y1; if (dx > dy) { data[y][x] = 0; e = 2 * dy - dx; inc1 = 2 * (dy - dx); inc2 = 2 * dy; for (i = 0; i < dx; i++) { if (e >= 0) { y += incy; e += inc1; } else e += inc2; x += incx; data[y][x] = 0; } } else { //data[y][x] += 1.0 / 255; data[y][x] = 0; e = 2 * dx - dy; inc1 = 2 * (dx - dy); inc2 = 2 * dx; for (i = 0; i < dy; i++) { if (e >= 0) { x += incx; e += inc1; } else e += inc2; y += incy; //data[y][x] += 1.0 / 255; data[y][x] = 0; } } } Float Max() const { Float shared_max = -1; #pragma omp parallel { Float max_value = -1; #pragma omp for nowait for (int i = 0; i < H; ++i) { for (int j = 0; j < W; ++j) { max_value = std::max(max_value, data[i][j]); } } #pragma omp critical { shared_max = std::max(shared_max, max_value); } } return shared_max; } Float Min() const { Float shared_min = std::numeric_limits<Float>::max(); #pragma omp parallel { Float min_value = std::numeric_limits<Float>::max(); #pragma omp for nowait for (int i = 0; i < H; ++i) { for (int j = 0; j < W; ++j) { min_value = std::min(min_value, data[i][j]); } } #pragma omp critical { shared_min = std::min(shared_min, min_value); } } return shared_min; } Float Mean() const { return Sum() / Float(W * H); } Float Sum() const { Float sum = 0; #pragma omp parallel for reduction(+:sum) num_threads(8) for (int i = 0; i < H; ++i) { for (int j = 0; j < W; ++j) { sum += data[i][j]; } } return sum; } void Write(const std::string& name, int _W=-1, int _H=-1) const { cimg_library::CImg<Float> image(W, H, 1, 1); for (int j = 0; j < H; j++) { for (int i = 0; i < W; i++) { Float c = data[j][i]; c = c > 0 ? c : 0; c = c < 1 ? c : 1; c = c * 255; image(i, j, 0, 0) = c; } } if (_W > 0 && _H > 0) { image = image.resize(_W, _H, -100, -100, 6); } image.save(name.c_str()); } void Load(const std::string& name, ColorMode mode = ColorMode::LUMIANCE) const { cimg_library::CImg<Float> image(name.c_str()); if (image.width() != W || image.height() != H) { printf("Resize input image from [%d, %d] to [%d, %d].\n", image.width(), image.height(), W, H); image = image.resize(W, H, -100, -100, 6); } for (int j = 0; j < H; j++) { for (int i = 0; i < W; i++) { Float r = image(i, j, 0, 0) / 255.0; Float g = image(i, j, 0, 1) / 255.0; Float b = image(i, j, 0, 2) / 255.0; Float pixel_value; switch (mode) { case ColorMode::GRAY: pixel_value = 0.2989 * r + 0.5870 * g + 0.1140 * b; break; case ColorMode::LUMIANCE: pixel_value = 0.212671 * r + 0.715160 * g + 0.072169 * b; break; case ColorMode::R: pixel_value = r; break; case ColorMode::G: pixel_value = g; break; case ColorMode::B: pixel_value = b; break; case ColorMode::AVG: pixel_value = (r + g + b) / 3.0; break; default: pixel_value = 0.212671 * r + 0.715160 * g + 0.072169 * b; break; } data[j][i] = pixel_value; } } } Float** data; int W, H; };
bicg.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* bicg.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "bicg.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE POLYBENCH_2D(A, N, M, n, m), DATA_TYPE POLYBENCH_1D(r, N, n), DATA_TYPE POLYBENCH_1D(p, M, m)) { int i, j; for (i = 0; i < m; i++) p[i] = (DATA_TYPE)(i % m) / m; for (i = 0; i < n; i++) { r[i] = (DATA_TYPE)(i % n) / n; for (j = 0; j < m; j++) A[i][j] = (DATA_TYPE) (i * (j + 1) % n) / n; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, int n, DATA_TYPE POLYBENCH_1D(s, M, m), DATA_TYPE POLYBENCH_1D(q, N, n)) { int i; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("s"); for (i = 0; i < m; i++) { if (i % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, s[i]); } POLYBENCH_DUMP_END("s"); POLYBENCH_DUMP_BEGIN("q"); for (i = 0; i < n; i++) { if (i % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, q[i]); } POLYBENCH_DUMP_END("q"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_bicg(int m, int n, DATA_TYPE POLYBENCH_2D(A, N, M, n, m), DATA_TYPE POLYBENCH_1D(s, M, m), DATA_TYPE POLYBENCH_1D(q, N, n), DATA_TYPE POLYBENCH_1D(p, M, m), DATA_TYPE POLYBENCH_1D(r, N, n)) { int i, j; #pragma omp parallel for default(shared) private(i) firstprivate(m) for (i = 0; i < _PB_M; i++) s[i] = 0; #pragma omp parallel for default(shared) private(i, j) firstprivate(n, m, r, A, p) reduction(+ : s[:1900]) for (i = 0; i < _PB_N; i++) { q[i] = SCALAR_VAL(0.0); for (j = 0; j < _PB_M; j++) { s[j] = s[j] + r[i] * A[i][j]; q[i] = q[i] + A[i][j] * p[j]; } } } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, M, n, m); POLYBENCH_1D_ARRAY_DECL(s, DATA_TYPE, M, m); POLYBENCH_1D_ARRAY_DECL(q, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(p, DATA_TYPE, M, m); POLYBENCH_1D_ARRAY_DECL(r, DATA_TYPE, N, n); /* Initialize array(s). */ init_array (m, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(r), POLYBENCH_ARRAY(p)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_bicg (m, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(s), POLYBENCH_ARRAY(q), POLYBENCH_ARRAY(p), POLYBENCH_ARRAY(r)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, n, POLYBENCH_ARRAY(s), POLYBENCH_ARRAY(q))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(s); POLYBENCH_FREE_ARRAY(q); POLYBENCH_FREE_ARRAY(p); POLYBENCH_FREE_ARRAY(r); return 0; }
GB_binop__bxnor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxnor_uint32 // A.*B function (eWiseMult): GB_AemultB__bxnor_uint32 // A*D function (colscale): GB_AxD__bxnor_uint32 // D*A function (rowscale): GB_DxB__bxnor_uint32 // C+=B function (dense accum): GB_Cdense_accumB__bxnor_uint32 // C+=b function (dense accum): GB_Cdense_accumb__bxnor_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_uint32 // C=scalar+B GB_bind1st__bxnor_uint32 // C=scalar+B' GB_bind1st_tran__bxnor_uint32 // C=A+scalar GB_bind2nd__bxnor_uint32 // C=A'+scalar GB_bind2nd_tran__bxnor_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ~((x) ^ (y)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_UINT32 || GxB_NO_BXNOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxnor_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxnor_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxnor_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__bxnor_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__bxnor_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bxnor_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxnor_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxnor_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxnor_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB_bind1st_tran__bxnor_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB_bind2nd_tran__bxnor_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
clustering.c
/* * Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University * Berlin, 14195 Berlin, Germany. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define NO_IMPORT_ARRAY #include <clustering.h> #include <assert.h> float euclidean_distance(float *SKP_restrict a, float *SKP_restrict b, size_t n, float *buffer_a, float *buffer_b) { double sum; size_t i; sum = 0.0; for(i=0; i<n; ++i) { sum += (a[i]-b[i])*(a[i]-b[i]); } return sqrt(sum); } float minRMSD_distance(float *SKP_restrict a, float *SKP_restrict b, size_t n, float *SKP_restrict buffer_a, float *SKP_restrict buffer_b) { float msd; float trace_a, trace_b; memcpy(buffer_a, a, n*sizeof(float)); memcpy(buffer_b, b, n*sizeof(float)); inplace_center_and_trace_atom_major(buffer_a, &trace_a, 1, n/3); inplace_center_and_trace_atom_major(buffer_b, &trace_b, 1, n/3); msd = msd_atom_major(n/3, n/3, buffer_a, buffer_b, trace_a, trace_b, 0, NULL); return sqrt(msd); } int c_assign(float *chunk, float *centers, npy_int32 *dtraj, char* metric, Py_ssize_t N_frames, Py_ssize_t N_centers, Py_ssize_t dim) { int ret; float d, mindist; size_t argmin; float *buffer_a, *buffer_b; float (*distance)(float*, float*, size_t, float*, float*); buffer_a = NULL; buffer_b = NULL; ret = ASSIGN_SUCCESS; /* init metric */ if(strcmp(metric,"euclidean")==0) { distance = euclidean_distance; } else if(strcmp(metric,"minRMSD")==0) { distance = minRMSD_distance; buffer_a = malloc(dim*sizeof(float)); buffer_b = malloc(dim*sizeof(float)); if(!buffer_a || !buffer_b) { ret = ASSIGN_ERR_NO_MEMORY; goto error; } } else { ret = ASSIGN_ERR_INVALID_METRIC; goto error; } /* do the assignment */ { Py_ssize_t i,j; // #pragma omp for private(j, argmin, mindist) for(i = 0; i < N_frames; ++i) { mindist = FLT_MAX; argmin = -1; for(j = 0; j < N_centers; ++j) { d = distance(&chunk[i*dim], &centers[j*dim], dim, buffer_a, buffer_b); // #pragma omp critical { if(d<mindist) { mindist = d; argmin = j; } } } dtraj[i] = argmin; } } error: free(buffer_a); free(buffer_b); return ret; } PyObject *assign(PyObject *self, PyObject *args) { PyObject *py_centers, *py_res; PyArrayObject *np_chunk, *np_centers, *np_dtraj; Py_ssize_t N_centers, N_frames, dim; float *chunk; float *centers; npy_int32 *dtraj; char *metric; py_centers = NULL; py_res = NULL; np_chunk = NULL; np_dtraj = NULL; centers = NULL; metric=""; chunk = NULL; dtraj = NULL; if (!PyArg_ParseTuple(args, "O!OO!s", &PyArray_Type, &np_chunk, &py_centers, &PyArray_Type, &np_dtraj, &metric)) goto error; /* ref:borr. */ /* import chunk */ if(PyArray_TYPE(np_chunk)!=NPY_FLOAT32) { PyErr_SetString(PyExc_ValueError, "dtype of \"chunk\" isn\'t float (32)."); goto error; }; if(!PyArray_ISCARRAY_RO(np_chunk) ) { PyErr_SetString(PyExc_ValueError, "\"chunk\" isn\'t C-style contiguous or isn\'t behaved."); goto error; }; if(PyArray_NDIM(np_chunk)!=2) { PyErr_SetString(PyExc_ValueError, "Number of dimensions of \"chunk\" isn\'t 2."); goto error; }; N_frames = np_chunk->dimensions[0]; dim = np_chunk->dimensions[1]; if(dim==0) { PyErr_SetString(PyExc_ValueError, "chunk dimension must be larger than zero."); goto error; } chunk = PyArray_DATA(np_chunk); /* import dtraj */ if(PyArray_TYPE(np_dtraj)!=NPY_INT32) { PyErr_SetString(PyExc_ValueError, "dtype of \"dtraj\" isn\'t int (32)."); goto error; }; if(!PyArray_ISBEHAVED_RO(np_dtraj) ) { PyErr_SetString(PyExc_ValueError, "\"dtraj\" isn\'t behaved."); goto error; }; if(PyArray_NDIM(np_dtraj)!=1) { PyErr_SetString(PyExc_ValueError, "Number of dimensions of \"dtraj\" isn\'t 1."); goto error; }; if(np_chunk->dimensions[0]!=N_frames) { PyErr_SetString(PyExc_ValueError, "Size of \"dtraj\" differs from number of frames in \"chunk\"."); goto error; } dtraj = (npy_int32*)PyArray_DATA(np_dtraj); /* import list of cluster centers */ np_centers = (PyArrayObject*)PyArray_ContiguousFromAny(py_centers, NPY_FLOAT32, 2, 2); if(!np_centers) { PyErr_SetString(PyExc_ValueError, "Could not convert \"centers\" to two-dimensional C-contiguous behaved ndarray of float (32)."); goto error; } N_centers = np_centers->dimensions[0]; if(N_centers==0) { PyErr_SetString(PyExc_ValueError, "centers must contain at least one element."); goto error; } if(np_centers->dimensions[1]!=dim) { PyErr_SetString(PyExc_ValueError, "Dimension of cluster centers doesn\'t match dimension of frames."); goto error; } centers = (float*)PyArray_DATA(np_centers); /* do the assignment */ switch(c_assign(chunk, centers, dtraj, metric, N_frames, N_centers, dim)) { case ASSIGN_ERR_INVALID_METRIC: PyErr_SetString(PyExc_ValueError, "metric must be one of \"euclidean\" or \"minRMSD\"."); goto error; case ASSIGN_ERR_NO_MEMORY: PyErr_NoMemory(); goto error; } py_res = Py_BuildValue(""); /* =None */ /* fall through */ error: return py_res; }
taskgroup-1.c
/* { dg-do compile } */ void foo (int x) { bad1: #pragma omp taskgroup goto bad1; // { dg-error "invalid branch to/from OpenMP structured block" } goto bad2; // { dg-error "invalid entry to OpenMP structured block" } #pragma omp taskgroup { bad2: ; } #pragma omp taskgroup { int i; goto ok1; for (i = 0; i < 10; ++i) { ok1: break; } } switch (x) // { dg-error "invalid entry to OpenMP structured block" } { #pragma omp taskgroup // { dg-warning "statement will never be executed" } { case 0:; } } }
GB_unaryop__identity_uint16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_int32 // op(A') function: GB_tran__identity_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_int32 ( uint16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
implied_volatility_newton.c
// // implied_volatility_newton.c // // // Created by Domenico Natella on 10/25/16. // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <string.h> #include <mpi.h> #include <omp.h> #define SIZE 110 #define MAX_ITERATIONS 1000000 struct option{ double V_market[SIZE][2]; double K[SIZE]; double implied_vol[SIZE]; double T; double S; double r; }; struct tm create_tm(int year, int month, int day){ struct tm my_time = { .tm_year=year, .tm_mon=month, .tm_mday=day, .tm_hour=0, .tm_min=0, .tm_sec=0 }; return my_time; } struct option load(char* filename){ FILE* file = fopen(filename, "r"); struct option op; fscanf(file, "%lf", &op.S); char tmp[12],cp[2]; fscanf(file, "%s", tmp); char s[2] = "/"; char *token; token = strtok(tmp, s); int date[3]={0,0,0}; int i = 0; while( token != NULL ){ date[i] = atoi(token); token = strtok(NULL, s); i++; } time_t now; time(&now); struct tm option_t = create_tm(date[0]-1900, date[1]-1, date[2]); time_t opt_t_conv = mktime(&option_t); double diff_t = difftime(opt_t_conv, now); op.T = (diff_t/86400)/365.; i=0; while(fscanf(file, "%s", tmp)!=EOF){ if(strcmp(tmp, "c")==0 | strcmp(tmp, "p")==0) strcpy(cp,tmp); else{ op.K[i] = atof(strtok(tmp,s)); op.V_market[i][0] = atof(strtok(NULL,s)); if(strcmp(cp, "c")==0) op.V_market[i][1] = 0.; else if(strcmp(cp, "p")==0) op.V_market[i][1] = 1.; } i++; } op.r = 0.03; return op; } double pdf(const double x) { return (1.0/(pow(2*M_PI,0.5)))*exp(-0.5*x*x); } double cdf(double x){ double RT2PI = sqrt(4.0*acos(0.0)); static const double SPLIT = 7.07106781186547; static const double N0 = 220.206867912376; static const double N1 = 221.213596169931; static const double N2 = 112.079291497871; static const double N3 = 33.912866078383; static const double N4 = 6.37396220353165; static const double N5 = 0.700383064443688; static const double N6 = 3.52624965998911e-02; static const double M0 = 440.413735824752; static const double M1 = 793.826512519948; static const double M2 = 637.333633378831; static const double M3 = 296.564248779674; static const double M4 = 86.7807322029461; static const double M5 = 16.064177579207; static const double M6 = 1.75566716318264; static const double M7 = 8.83883476483184e-02; const double z = fabs(x); double c = 0.0; if(z<=37.0){ const double e = exp(-z*z/2.0); if(z<SPLIT){ const double n = (((((N6*z + N5)*z + N4)*z + N3)*z + N2)*z + N1)*z + N0; const double d = ((((((M7*z + M6)*z + M5)*z + M4)*z + M3)*z + M2)*z + M1)*z + M0; c = e*n/d;} else{ const double f = z + 1.0/(z + 2.0/(z + 3.0/(z + 4.0/(z + 13.0/20.0)))); c = e/(RT2PI*f);} } return x<=0.0 ? c : 1-c; } double d_j(int j, double S, double K, double r, double sigma, double T){ double d1 = (log(S/K) + (r + 0.5*sigma*sigma)*T)/(sigma*(pow(T,0.5))); if(j==1) return d1; else return d1-sigma*pow(T,0.5); } double call_price(double S, double K, double r, double sigma, double T, double type){ if(type==0.) return S * cdf(d_j(1, S, K, r, sigma, T))-K*exp(-r*T) * cdf(d_j(2, S, K, r,sigma, T)); else return K*exp(-r*T) * cdf(d_j(2, S, K, r,sigma, T)) - S * cdf(d_j(1, S, K, r, sigma, T)) ; } double call_vega(const double S, const double K, const double r, const double sigma, const double T) { return S * sqrt(T) * pdf(d_j(1, S, K, r, sigma, T)); } double newton_raphson(double y_target, double init, double epsilon, double S, double K, double r, double T, double type){ double x = init; double y = call_price(S, K, r, x, T,type); int i=0; while (fabs(y-y_target) > epsilon) { if(i >= MAX_ITERATIONS) break; double d_x = call_vega(S, K, r, x, T); x += (y-y_target)/d_x; y = call_price(S,K,r,x,T,type); i++; } if(isnan(x)!=0) return 0.; else return fabs(x); } int main(int argc, char** argv){ // First we create the parameter list // S: Underlying spot price // K: Strike price // r: Risk-free rate (5%) // T: One year until expiry // C_M: Option market price int rank,size,i,j,len=7; double low_vol = 0.3, epsilon = 0.001; struct option op[len]; int err = MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size < 1) { fprintf(stderr, "World size must be greater than 1 for %s\n", argv[0]); MPI_Abort(MPI_COMM_WORLD, 1); } int blocklen[6] = {SIZE*2,SIZE,SIZE,1,1,1}; MPI_Datatype types[6] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE,MPI_DOUBLE,MPI_DOUBLE,MPI_DOUBLE}; MPI_Datatype mpi_op_type; MPI_Aint offsets[6]; offsets[0] = offsetof(struct option, V_market); offsets[1] = offsetof(struct option, K); offsets[2] = offsetof(struct option, implied_vol); offsets[3] = offsetof(struct option, T); offsets[4] = offsetof(struct option, S); offsets[5] = offsetof(struct option, r); MPI_Type_create_struct(6, blocklen, offsets, types, &mpi_op_type); MPI_Type_commit(&mpi_op_type); if (rank==0) { op[0] = load("./OPT_AAPL/Options_20161118.txt"); op[1] = load("./OPT_AAPL/Options_2017120.txt"); op[2] = load("./OPT_AAPL/Options_2017317.txt"); op[3] = load("./OPT_AAPL/Options_2017421.txt"); op[4] = load("./OPT_AAPL/Options_2017616.txt"); op[5] = load("./OPT_AAPL/Options_20171117.txt"); op[6] = load("./OPT_AAPL/Options_2018119.txt"); } int elements_per_proc = len / size; int difference = len - elements_per_proc * size; int* chunk_sizes = (int*)(malloc(sizeof(int) * size)); int* displ = (int*)(malloc(sizeof(int) * size)); for(i = 0; i < size; ++i) { chunk_sizes[i] = elements_per_proc; displ[i] = i * elements_per_proc; } chunk_sizes[size - 1] += difference; int current_recv_size = elements_per_proc + (rank == size - 1 ? difference : 0); struct option toReturn[current_recv_size]; MPI_Scatterv(&op,chunk_sizes,displ,mpi_op_type,&toReturn,current_recv_size,mpi_op_type,0,MPI_COMM_WORLD); #pragma omp parallel for default(private) shared(low_vol, epsilon, toReturn) schedule(guided) for(j=0; j<current_recv_size; j++){ for(i=0; i<14; i++) toReturn[j].implied_vol[i] = newton_raphson(toReturn[j].V_market[i][0], low_vol, epsilon, toReturn[j].S, toReturn[j].K[i], toReturn[j].r, toReturn[j].T, toReturn[j].V_market[i][1]); } MPI_Gatherv(&toReturn,current_recv_size,mpi_op_type,&op,chunk_sizes,displ,mpi_op_type,0,MPI_COMM_WORLD); if(rank == 0){ for(i=0; i<len; i++){ for(j=0; j<14; j++) printf("Implied vol. for time %.2f is %.2f%% \n", (op[i].T), op[i].implied_vol[j]); } fflush(stdout); } free(chunk_sizes); free(displ); MPI_Type_free(&mpi_op_type); MPI_Finalize(); return 0; }
general_basis_rep.h
#ifndef _GENERAL_BASIS_REP_H #define _GENERAL_BASIS_REP_H #include <complex> #include <limits> #include <iostream> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "misc.h" #include "openmp.h" namespace basis_general { template<class I,class J> int general_normalization(general_basis_core<I> *B, I s[], J n[], const npy_intp Ns ) { int err = 0; #pragma omp parallel { const npy_intp chunk = std::max(Ns/(100*omp_get_num_threads()),(npy_intp)1); // check_state has variable workload #pragma omp parallel for schedule(dynamic,chunk) for(npy_intp i=0;i<Ns;i++){ if(err != 0){ continue; } double norm = B->check_state(s[i]); npy_intp int_norm = norm; // checks if data type is large enough if(!check_nan(norm) && int_norm>0 ){ if( (npy_uintp)int_norm > std::numeric_limits<J>::max() ){err = 1;} n[i] = (J)norm; } else{ n[i] = 0; } } } return err; } template<class I> void general_representative(general_basis_core<I> *B, const I s[], I r[], int *g_out_ptr, npy_int8 *sign_out_ptr, const npy_intp Ns ) { const int nt = B->get_nt(); if(g_out_ptr && sign_out_ptr){ #pragma omp parallel { #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for(npy_intp i=0;i<Ns;i++){ int temp_sign = 1; r[i] = B->ref_state(s[i],&g_out_ptr[i*nt],temp_sign); sign_out_ptr[i] = temp_sign; } } } else if(g_out_ptr){ #pragma omp parallel { #pragma omp parallel for schedule(static) // NOTE: refstate time has a constant workload for(npy_intp i=0;i<Ns;i++){ int temp_sign = 1; r[i] = B->ref_state(s[i],&g_out_ptr[i*nt],temp_sign); } } } else if(sign_out_ptr){ #pragma omp parallel { int g[__GENERAL_BASIS_CORE__max_nt]; #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for(npy_intp i=0;i<Ns;i++){ int temp_sign = 1; r[i] = B->ref_state(s[i],g,temp_sign); sign_out_ptr[i] = temp_sign; } } } else{ #pragma omp parallel { int g[__GENERAL_BASIS_CORE__max_nt]; #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for(npy_intp i=0;i<Ns;i++){ int temp_sign = 1; r[i] = B->ref_state(s[i],g,temp_sign); } } } } } #endif
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride); const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride); typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); std::size_t sizeA = kc*mc; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0); ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0); RhsScalar* blockB = blocking.blockB(); eigen_internal_assert(blockB!=0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing A'. pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc); // Pack B_k to B' in a parallel fashion: // each thread packs the sub block B_k,j to B'_j where j is the thread id. // However, before copying to B'_j, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += (int)threads; pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length); // Notify the other threads that the part B'_j is ready to go. info[tid].sync = (int)k; // Computes C_i += A' * B' per B'_j for(Index shift=0; shift<threads; ++shift) { Index j = (tid+shift)%threads; // At this point we have to make sure that B'_j has been updated by the thread j, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if(shift>0) while(info[j].sync!=k) {} gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w); } // Then keep going as usual with the remaining A' for(Index i=mc; i<rows; i+=mc) { const Index actual_mc = (std::min)(i+mc,rows)-i; // pack A_i,k to A' pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc); // C_i += A' * B' gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w); } // Release all the sub blocks B'_j of B' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index j=0; j<threads; ++j) #pragma omp atomic --(info[j].users); } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*cols; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW()); // For each horizontal panel of the rhs, and corresponding panel of the lhs... // (==GEMM_VAR1) for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack rhs's panel into a sequential chunk of memory (L2 caching) // Note that this panel will be read as many times as the number of blocks in the lhs's // vertical panel which is, in practice, a very low number. pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols); // For each mc x kc block of the lhs's vertical panel... // (==GEPP_VAR1) for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; // We pack the lhs's block into a sequential chunk of memory (L1 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro vertical panel of the large rhs's panel (e.g., cols/4 times). pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc); // Everything is packed, we can now call the block * panel kernel: gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW); } } } } }; /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Lhs, typename Rhs> struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> > : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> > {}; template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession() const { m_blocking.allocateB(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(), /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; RhsScalar* m_blockW; DenseIndex m_mc; DenseIndex m_nc; DenseIndex m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {} inline DenseIndex mc() const { return m_mc; } inline DenseIndex nc() const { return m_nc; } inline DenseIndex kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } inline RhsScalar* blockW() { return m_blockW; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth, SizeW = MaxDepth * Traits::WorkSpaceFactor }; EIGEN_ALIGN16 LhsScalar m_staticA[SizeA]; EIGEN_ALIGN16 RhsScalar m_staticB[SizeB]; EIGEN_ALIGN16 RhsScalar m_staticW[SizeW]; public: gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; this->m_blockW = m_staticW; } inline void allocateA() {} inline void allocateB() {} inline void allocateW() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; DenseIndex m_sizeA; DenseIndex m_sizeB; DenseIndex m_sizeW; public: gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; m_sizeW = this->m_kc*Traits::WorkSpaceFactor; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateW() { if(this->m_blockW==0) this->m_blockW = aligned_new<RhsScalar>(m_sizeW); } void allocateAll() { allocateA(); allocateB(); allocateW(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); aligned_delete(this->m_blockW, m_sizeW); } }; } // end namespace internal template<typename Lhs, typename Rhs> class GeneralProduct<Lhs, Rhs, GemmProduct> : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> { enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef Scalar ResScalar; GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); } template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols()); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); } }; } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
test8.c
void foo (int a) { 0; if (1) { 2; #pragma omp barrier 3; } else { 4; foo(3); 5; } } int main() { #pragma omp parallel { int x; 6; if (7) { 8; foo(9); 10; } else { 11; #pragma omp barrier 12; #pragma omp barrier 13; } 14; } }
8608.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <[email protected]> * * Copyright 2013, The University of Delaware */ #define EXTRALARGE_DATASET #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(1) { #pragma omp for schedule(static, 1) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for schedule(static, 1) for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for schedule(static, 1) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ #pragma omp for schedule(static, 1) for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
SymbolicDerivatives.h
#ifndef _SymbolicDerivatives_H_ #define _SymbolicDerivatives_H_ using namespace std; #ifdef _OPENMP #include <omp.h> #endif #define WITH_MMVII false #define WITH_EIGEN false #if WITH_EIGEN #include "ExternalInclude/Eigen/Dense" // TODO => replace with standard eigen file #define EIGEN_ALLIGNMENT_IN_MMVII EIGEN_MAKE_ALIGNED_OPERATOR_NEW #else #define EIGEN_ALLIGNMENT_IN_MMVII #endif /* */ /** \file SymbolicDerivates.h \brief File for generating symbolic derivate Classes for generated symbolic derivative. All classes are single template classes. The template parameter indicate the numerical type used for storage/computation (float, double ...) This file is the only file to include. It contains : * declaration of operators * definition of "main" classes : cFormula , cCoordinatorF , cImplemF " ; * the 3 class for Atomic formula who will (probably) stay the same : Unkown, Observation, Constants This file include 2 files corresponding to following type of formula : * classes for "unary" formulas in "MMVII_FormDer_UnaryOp.h" * classes for "binary" formulas in "MMVII_FormDer_BinaryOp.h" These 2 files have "vocation" to be extended during the future. ------------------------------------------------- * cFormula<Type> : represent a mathematicall formula; as in math : - if F is a formula, exp(F), log(F) ....are formulas - if F1 and F2 are formulas, F1+F2 , F1*F2 ... are formulas - there exist some atomic formulas like constants, unknown and observations - if F is a formula F->Derivate(k) is a formula corresponding to is derivate dF/dXk Formulas are a complete algebric type. * cCoordinatorF<Type> : is the "coordinator" class. This class has, between others, the responsability of : - creating the initial atomic formula corresponding to unknowns and observation - maintain an inventory of existing formulas for efficiency purpose * Using this library is mainly : - create a coordinator with a given number of unkown and observations - create a formula using atoms an operator, generally the user function creating a formula will be a template that can operate on any complete algebric type (double, float, Formula , jets ...) - indicate to the coordinator the formula you want work on, with generally its derivate - evaluate the values of the formula for given unknows and observations cFormula<Type> is no more than an encapsulation of a pointer on the "concrete" class cImplemF. * cImplemF<Type> : is the mother class of all the formula. It's a pure abstract class, it contains several pure virtual methods. The two main methods are "Derivate" and "ComputeBuf", this is the two methods the users will have to define when extension to the library with new operator is required. - cFormula<Type> Derivate(int aK) return the formula of its derivate by Xk. Heres is two example extract from the code, one for multiplication, other from unknowns : o return mF2*mF1->Derivate(aK) + mF1*mF2->Derivate(aK); // From cMulF : (FG)' = F'G + FG' o return (aK==mNumUnk) ? tImplemF::mCoordF->Cste1() : tImplemF::mCoordF->Cste0(); // from cUnknownF - void ComputeBuf(int aK0,int aK1) : update the buffer of its data, once it subformula has been updated, this is method that does the real job. Here an extract from cExpF and cDivF : o for (int aK=aK0 ; aK<aK1 ; aK++) mDataBuf[aK] = std::exp(mDataF[aK]); o for (int aK=aK0 ; aK<aK1 ; aK++) mDataBuf[aK] = mDataF1[aK] / mDataF2[aK]; */ #include "SymbDer_Common.h" #if (WITH_MMVII) #include "include/MMVII_all.h" #include "include/MMVII_Derivatives.h" using namespace MMVII; #else //========================================================== WITH_MMVI class cMemCheck { }; #include <memory> #include <map> #include <iostream> #include <cassert> #include "memory.h" #include <memory> #include <iostream> #include <fstream> #include <string> #include <typeinfo> #include <vector> #include <list> #include <map> #include <ctime> #include <chrono> #include <math.h> #include <cmath> #include <algorithm> #include <sstream> #include <iomanip> #endif //========================================================== WITH_MMVI // REDUCTION RULES // TODO => REPLACE BY METHOD ON COORDINATOR WHEN THEY IMPROVE THINGS .... #define DOREDUCE false #define REDUCE_CSTE true // Cste+Cste => cste #define REDUCE_MM DOREDUCE // - - x => x ; a-(-b) => a+b #define REDUCE_ASSOCP DOREDUCE /* B + (A + C) = > A + ( B + C), more generally order the + operator, could be done with '*' */ #define REDUCE_DISTRIB DOREDUCE // A#B ~ A#C=> A#(B~C) ; # in "*/" and ~ in "+-" #define REDUCE_ApA DOREDUCE // A+A => 2*A, not good by itself, but may creat other reduc #define REDUCE_DIST1 DOREDUCE // A + A*C => A *(1+C) si C est csteto have all constant close static inline void SHOW_REDUCE(const std::string & aMes) {} // std::cout << "REDUCE " << aMes << "\n";} namespace NS_SymbolicDerivative { /* *************************************************** */ /* */ /* P0-Definition of global functions */ /* */ /* *************************************************** */ /// The CreateCste is required for formula, so we need it also on num type template <class Type> inline Type CreateCste(const Type & aV,const Type &) { return aV; } /// because pow is defined in std and there is cast int->float that would make it unaccessible template <class Type> inline Type pow(const Type & aV,const int & aExp) { return std::pow(aV,Type(aExp)); } //============= BASIC ERROR HANDLING ============== /** This function computes derivates by finites difference It is used in the tests to check correction of symbolic derivatives. Also used in didactic parts. */ template <class Type,class TypeFct> std::vector<Type> NumericalDerivate ( TypeFct & aFctr, ///< Function const std::vector<Type> & aVUk, ///< Unknown const std::vector<Type> & aVObs, ///< Observations int aNumVar, ///< Num of unknown we derivate by const Type & aEpsilon ///< "Small" number to compute variations ) { std::vector<Type> aVPlus = aVUk; aVPlus.at(aNumVar) += aEpsilon; std::vector<Type> aResPlus = aFctr( aVPlus,aVObs); std::vector<Type> aVMinus = aVUk; aVMinus.at(aNumVar) -= aEpsilon; std::vector<Type> aResMinus = aFctr( aVMinus,aVObs); std::vector<Type> aDerivate; for (size_t aK=0 ; aK<aResPlus.size() ; aK++) aDerivate.push_back((aResPlus.at(aK)-aResMinus.at(aK)) / (2*aEpsilon)); return aDerivate; } /* *************************************************** */ /* *************************************************** */ /* * * */ /* * Main user interace * */ /* * * */ /* *************************************************** */ /* *************************************************** */ // ------------- The two classes visible by user are cFormula and cCoordinatorF ------ /** Abstraction of mathemicall formula, this the object manipulated by user, its has all algerbric operation required. This object is just an encapsulation of a pointer on cImplemF. */ template <class TypeElem> class cFormula ; /** Class for managing the "context", i.e. coordinating all the formula and their derivative corresponding to a single use . */ template <class TypeElem> class cCoordinatorF; // -------- Declaration all binary operators ---------------- // For each operator with have the 3 versions "Formula x Formula" , // "Number x Formula" and "Formula x Number" , the two last are rather // syntactic suggar (i.e. make usage easier, but do not extend the library power) // Operator + template <class TypeElem> cFormula <TypeElem> operator +(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> operator +(const TypeElem & aV1,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> operator +(const cFormula <TypeElem> & aF1,const TypeElem & aV2); // Operator * template <class TypeElem> cFormula <TypeElem> operator *(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> operator *(const TypeElem & aV1,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> operator *(const cFormula <TypeElem> & aF1,const TypeElem & aV2); // Operator - template <class TypeElem> cFormula <TypeElem> operator -(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> operator -(const TypeElem & aV1,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> operator -(const cFormula <TypeElem> & aF1,const TypeElem & aV2); // Operator / template <class TypeElem> cFormula <TypeElem> operator /(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> operator /(const TypeElem & aV1,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> operator /(const cFormula <TypeElem> & aF1,const TypeElem & aV2); // pow template <class TypeElem> cFormula <TypeElem> pow (const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2); template <class TypeElem> cFormula <TypeElem> pow (const TypeElem & aV1,const cFormula <TypeElem> & aF2); /// This one defined in MMVII_FormDer_UnaryOp.h template <class TypeElem> cFormula <TypeElem> pow (const cFormula <TypeElem> & aF1,const TypeElem & aV2); template <class TypeElem> cFormula <TypeElem> pow (const cFormula <TypeElem> & aF1,const int & aV2); // -------- integer low power ---------------- template <class TypeElem> cFormula <TypeElem> square(const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> cube(const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> pow4(const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> pow5(const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> pow6(const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> pow7(const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> pow8(const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> pow9(const cFormula <TypeElem> & aF); // --- other unary operator template <class TypeElem> cFormula <TypeElem> exp(const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> operator - (const cFormula <TypeElem> & aF); template <class TypeElem> cFormula <TypeElem> log(const cFormula <TypeElem> & aF); // ---- sometime we need a templetized way to create constants template <class T> cFormula<T> CreateCste(const T & aV,const cFormula<T> & aF); /// --- powI , return pow of integral exponent, template <class Type> Type powI(const Type & aV,const int & aExp) { switch (aExp) { // case 0 : return Type(1.0); case 0 : return CreateCste(1.0,aV); case 1 : return aV; case 2 : return square(aV); case 3 : return cube(aV); case 4 : return pow4(aV); case 5 : return pow5(aV); case 6 : return pow6(aV); case 7 : return pow7(aV); case 8 : return pow8(aV); case 9 : return pow9(aV); } // else use the classical pow return pow(aV,aExp); } // -------- Declaration of Coordinator class ---------------- template <class TypeElem> class cCoordinatorF : public cCalculator<TypeElem>,public cMemCheck { public : typedef cFormula <TypeElem> tFormula; typedef std::vector<TypeElem> tOneRes; // --------------------------- Constructors / Destructor ------------------- /// Constructor with explicit Id for Unknown/Observation. Used if we want to analyze the generated code inline cCoordinatorF(const string &aName, int SzBuf, const std::vector<std::string> & aVecUK, const std::vector<std::string> & aVecObs); /// Constructor with basic Id (used if we dont generate code, or dont want to analyse it by human) inline cCoordinatorF(const string &aName, int SzBuf,int aNbUnknown,int aNbObservation); /// Destructeur will free allocated formulas virtual ~cCoordinatorF(); /// Copies are not allowed on this kind of object. cCoordinatorF(const cCoordinatorF<TypeElem> &) = delete; // --------------------------- Accessors to Atomic Formulas ------------------- const std::vector<tFormula>& VUk() const {return mVFormUnknowns;} ///< Unknowns const std::vector<tFormula>& VObs() const {return mVFormObservations;} ///< Observations // --------------------------- Manipulation ------------------- /// Set the formulas that with be used for computation void SetCurFormulas(const std::vector<tFormula> &); /** SetCurFormulas + all its derivative , order of storage will be VF0 dVF0/dX0 dVF0/dX1 .... VF1 dVF1/dX0 ... */ void SetCurFormulasWithDerivative(const std::vector<tFormula> & aVF); // ---------- Code generator --------------- /** Generate code, class cName , file cName.h, cName.cpp. Return filename w/o ext, or "" if error */ std::string GenerateCode(const std::string &aFilePrefix="CodeGen_") const { return GenCodeShortExpr(aFilePrefix); } std::string GenerateCodeTemplate(const std::string &aFilePrefix="CodeGen_") const { return GenCodeShortExprTemplate(aFilePrefix); } std::string GenerateCodeForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const { return GenCodeShortExprForType(aTypeName,aFilePrefix); } std::string GenCodeShortExpr(const std::string &aFilePrefix="CodeGen_") const { return GenCodeCommon(aFilePrefix, "", true); } std::string GenCodeLonExpr(const std::string &aFilePrefix="CodeGen_") const { return GenCodeCommon(aFilePrefix, "", false); } std::string GenCodeShortExprTemplate(const std::string &aFilePrefix="CodeGen_") const { return GenCodeCommon(aFilePrefix, "template<>", true); } std::string GenCodeLonExprTemplate(const std::string &aFilePrefix="CodeGen_") const { return GenCodeCommon(aFilePrefix, "template<>", false); } std::string GenCodeShortExprForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const { return GenCodeCommon(aFilePrefix, aTypeName, true); } std::string GenCodeLonExprForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const { return GenCodeCommon(aFilePrefix, aTypeName, false); } // =========== Parametrisation of the generated code ========= /// The default value is not always adequate "SymbDer/SymbDer_Common.h" void SetHeaderIncludeSymbDer(const std::string &aH) {mHeaderIncludeSymbDer= aH;} void SetDirGenCode(const std::string &aDir) {mDirGenCode= aDir;} void SetUseAllocByName(bool aUse) {mUseAllocByName= aUse;} private : // END-USER /* ================================================================================= ABOVE WAS THE REAL PUBLIC PART OF cCoordinatorF FOR USER OF LIBRARY. THE REST IS PUBLIC FOR IMPLEMENTERS BUT NOT NEEDED BY USER =====================================================================================*/ public : // Result of several evaluation are stored in a buffer, Eigen vector are used // as they implement efficiently arithmetical operation // typedef Eigen::Array<TypeElem, 1, Eigen::Dynamic> tBuf; typedef std::vector<TypeElem> tBuf; // --------------------------- Acces to function from names, values ------------------- /// Indicate if the formula corresponding to a given string already exist inline bool ExistFunc(const std::string & aName) const { return (mDicoFunc.find(aName) != mDicoFunc.end()); } /// Func of given name, Error if don't exist inline tFormula FuncOfName(const std::string & aName) const ; /// Add a function (put it in dico), Error if already exist inline void AddFormula(tFormula aPF) { if (ExistFunc(aPF->Name())) InternalError ("Multiple add of identic name :[" + aPF->Name() + "]",this->Name()); mDicoFunc[aPF->Name()] = aPF; mVAllFormula.push_back(aPF); aPF->TryReducAssoc(); } /// Func of given constant, create if don't exist inline tFormula CsteOfVal(const TypeElem & aCste) ; tFormula Cste0() const {return mCste0;} ///< Acces to a current constant tFormula Cste1() const {return mCste1;} ///< Another Acces to a current constant tFormula Cste2() const {return mCste2;} ///< Yet another Acces to a current constant /// Tuning --- Print the stack of function as a tree inline void ShowStackFunc() const; /// Formula used for computation, const std::vector<tFormula>& VReached() const {return mVReachedF;} // Current (top) formulas const std::vector<tFormula>& VCurrent() const {return mVCurF;} size_t NbCurFonc() const {return mVAllFormula.size();} private : /// Called by cCalculator::PushNewEvals to Set Unknown/Observations virtual void SetNewUks(const std::vector<TypeElem> &aVUks) override; virtual void SetNewObs(const std::vector<TypeElem> &aVObs) override; /** Make the evaluation of current functions on pushed values */ virtual void DoEval() override; /// Used to generate automatically Id for Unknown/Observatio, when we dont need to control them explicitely static std::vector<std::string> MakeAutomId(const std::string & aPrefix,int aNb); std::string GenCodeCommon(const string &aPrefix, string aTypeName, bool isShortExpr) const; std::string TypeElemName() const; size_t mNbCste; ///< Number Cste std::vector<tFormula> mVFormUnknowns; ///< Vector of All Unknowns std::vector<tFormula> mVFormObservations; ///< Vector of All Observations std::map<std::string,tFormula> mDicoFunc; ///< Map Name => Func std::vector<tFormula> mVAllFormula; ///< Vector of All Func, allow to parse them in creation order std::map<TypeElem,tFormula> mDicoCste; ///< Map Value => Func Constant tFormula mCste0; ///< Fonc constant null tFormula mCste1; ///< Fonc constant 1 tFormula mCste2; ///< Fonc constant 1 std::vector<tFormula> mVCurF; ///< Current evaluted formulas std::vector<tFormula> mVReachedF; ///< Formula "reachable" i.e. necessary to comput mVCurF std::string mHeaderIncludeSymbDer; ///< Compilation environment may want to change it std::string mDirGenCode; ///< Want to put generated code in a fixed folde ? bool mUseAllocByName; ///< Do we generated code for allocatin frpm name (with cName2Calc) }; /* ************************************************** * * * Pre-Declaration of all classes * * Not required by compilation * * (Except for cImplemF )but I like to have * * a quick view of all existing classes * * * * **************************************************/ /** "Mother" Interface class of all classes implementing the service , abstract class with pure virtual method */ template <class TypeElem> class cImplemF ; // --------------- "Atomic" function : Unknown, constant, observation----------------- template <class TypeElem> class cAtomicF ; ///< Mother Class of all atomic formulas /// "Observations" corresponding to user constant (change for each evaluation) template <class TypeElem> class cObservationF ; /// "Constant" function template <class TypeElem> class cConstantF ; /// "Unknown" for representing coordinates function X0,X1,X2 .... template <class TypeElem> class cUnknownF; // ----------------------------- Unary operator ------------------------------------ template <class TypeElem> class cUnaryF ; ///< Mother Class of all unary operator template <class TypeElem> class cSquareF ; ///< Class for square operator template <class TypeElem> class cExpF ; ///< Class for exponential operator template <class TypeElem> class cMin1F ; ///< Class for Unary Minus template <class TypeElem> class cLogF ; ///< Class for neperien log // -------------------------------- Binary operator ------------------------------------- template <class TypeElem> class cBinaryF ; ///< Mother class of binary operators template <class TypeElem> class cSumF ; ///< Class for sum of 2 functions template <class TypeElem> class cMulF ; ///< Class for multiplication of 2 functions template <class TypeElem> class cSubF ; ///< Class for substraction of 2 functions template <class TypeElem> class cDivF ; ///< Class for division of 2 functions template <class TypeElem> class cPowF ; ///< Class for division of 2 functions /* *************************************************** */ /* *************************************************** */ /* * * */ /* * Definition of all classes * */ /* * * */ /* *************************************************** */ /* *************************************************** */ // ------------------- 2 "Main" Classes ------------------------- // cFormula / cImplemF // ---------------------------------------------------------------- template <class TypeElem> class cImplemF : public cMemCheck { public : // See eigen documentation, this macro is mandatory for alignment reason // EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_ALLIGNMENT_IN_MMVII typedef TypeElem tElem; typedef cCoordinatorF<TypeElem> tCoordF; typedef typename tCoordF::tBuf tBuf; typedef typename tCoordF::tFormula tFormula; //----------- For derivation and reduction-------------- virtual bool IsCste(const TypeElem &) const {return false;} ///< To redefine in constant func, Used for simplification in "/ * + -" virtual bool IsDistribInt() const {return false;} ///< To redefine in *,/ for distributivity virtual tFormula Derivate(int aK) const = 0; ///< Compute the formula of it's derivative to Kth unknown /** In this functionwe try to make reduction using associativity (and maybe others), as we want to do it only on maximal chains of + (or *) this has to be run by the father of the chain */ void TryReducAssoc(); virtual cImplemF<TypeElem> * ReducAssoc() {return this;} virtual bool IsMult() const {return false;} virtual bool IsSum() const {return false;} bool ReducAssocTried() const {return mReducAssocTried;} virtual cFormula<TypeElem> VOper2(const tFormula &,const tFormula &) const; ///< Use in distributive reducion to recal the operator binaire if suitable // -------------- For Computation ------------------------- /// Method that wil compute data inside mBuf virtual void ComputeBuf(int aK0,int aK1) =0; /// Return "Sub"-formula referenced virtual std::vector<tFormula> Ref() const =0; // ---------- Accessors --------------- const std::string & Name() const {return mName;} ///< Standard accessor tCoordF * CoordF() const {return mCoordF;} ///< Standard accesor int NumGlob() const {return mNumGlob;} ///< Standard accessor // ---------- Acces to Buf data --------------- void SetBuf(size_t anIndex,const TypeElem & aVal) {mBuf.at(anIndex) = aVal;} const TypeElem & GetBuf(size_t anIndex) {return mBuf.at(anIndex);} TypeElem * DataBuf() {return mDataBuf;} // ---------- Reached Flag --------------- bool Reached() const {return mReached;} ///< Standard accessor void SetReached(bool IsReached) {mReached = IsReached;} ///< Fix Reached /// Compute in the reference graphe and put formula explored in VReached void CalcRecursiveDepth(std::vector<tFormula> & VReached) ; int Depth() const {return mDepth;} ///< Standard accessor void SetDepth(int aDepth) {mDepth = aDepth;} ///< Fix Reached // ---------- Code gen ----------------------- virtual bool isAtomic() const { return false;} virtual std::string GenCodeFormName() const {return NameGlob();} // Name of formula, referenced value for Atomic virtual std::string GenCodeShortExpr() const = 0; // N-Addresses code generation virtual std::string GenCodeDef() const = 0; // Formula definition generation virtual std::string GenCodeRef() const; // Formula reference generation int UsedCnt() const {return mUsedCnt;} ///< Standard accessor // ---------- Tuning / Debugging / Analysing --------------- /// Used to print constant from generic formula virtual const TypeElem * ValCste() const {return nullptr;} /// Infixed "Pretty" Print . For tuning and checking (i.e correction of reduction, derivative, rewrite ...) virtual std::string InfixPPrint() const =0; /// Number of reference that would occur without reduction on identic formula (to test performance in paper) int RecursiveRec() const; // Every where a reference name is needed std::string NameGlob() const { return "F" + std::to_string(NumGlob());} /// Access at global level is 4 reducing, also it is used 4 implemant in Unary & Binary virtual const std::string & NameOperator() const = 0; // -------------------- Destructor / Constructor -------------------------- virtual ~cImplemF () {} ///< Add a virtual ~X() when we have virtual methods, who knows ... protected : inline cImplemF (tCoordF * aCoordF,const std::string & aName) : mCoordF (aCoordF), mBuf (mCoordF->SzBuf(),TypeElem(0.0)), mDataBuf (mBuf.data()), mName (aName), mNumGlob (mCoordF->NbCurFonc()), mReached (false), mDepth (-1), mUsedCnt (0), mReducAssocTried (false) { } tCoordF * mCoordF; ///< Coordinator that manage all the funcion cooperating tBuf mBuf; ///< Buf to store values TypeElem * mDataBuf; ///< Raw pointer const std::string mName; ///< string represention of the formula as for ex : C2, X1, V0 , square F3, F18/F3 ... int mNumGlob; ///< Global number (!= Num in class) bool mReached; ///< Flag to know if a formula is usefull for compute current int mDepth; ///< Used for topological sort private : cImplemF (const cImplemF<TypeElem> &) = delete; ///< No Copy unsigned mUsedCnt; bool mReducAssocTried; }; template <class TypeElem> class cFormula { public : typedef cCoordinatorF<TypeElem> tCoordF; typedef cImplemF<TypeElem> tImplemF; typedef typename tCoordF::tFormula tFormula; // -------------------- constructor ------------------- /// Construct from a pointer, standard cFormula (tImplemF * aRawPtr) : mPtr (aRawPtr) { } /// Default constructor, required by some code (vector ?) cFormula (): cFormula <TypeElem> (nullptr) { } // --------------- operator on pointer --------------------- // UNUSED 4 NOW tImplemF & operator*() const {return *mPtr;} ///< Standard behaviour of a pointer tImplemF * operator->() const {return mPtr;} ///< Standard behaviour of a pointer tImplemF * RawPtr() const {return mPtr;} ///< Explicit acces // DO NOT WORK const std::unique_ptr<tImplemF> operator->() const {return std::unique_ptr<mPtr>;} bool IsNull() const {return mPtr==nullptr;} ///< Safer than giving acces to raw pointer // --------------- Naming --------------------- /// Generate the unique indentifier of a binary expression std::string NameFormulaBin(const std::string & aNameOper,const tFormula & aF2) const { return (*this)->NameGlob() + aNameOper + aF2->NameGlob(); } /// Generate the unique indentifier of a unary expression std::string NameFormulaUn(const std::string & aNameOper) const { return aNameOper + " " + (*this)->NameGlob(); } /// To allow destruction without giving access to raw pointer void FreeMem() {delete mPtr; mPtr=nullptr;} private : tImplemF* mPtr; ///< Faster than shared and deallocation is easy as object controlled by context }; /* *************************************************** */ /* *************************************************** */ /* * * */ /* * ATOMIC FORMULA * */ /* * * */ /* *************************************************** */ /* *************************************************** */ /* ---------------------------------------------------------- Class for atomic formula MOTHER CLASS : cAtomicF DERIVED : cUnknownF / cObservationF / cConstantF ----------------------------------------------------------------*/ template <class TypeElem> class cAtomicF : public cImplemF<TypeElem> { public : typedef cImplemF<TypeElem> tImplemF; typedef typename tImplemF::tCoordF tCoordF; typedef typename tCoordF::tFormula tFormula; /// Should work always std::string InfixPPrint() const override {return tImplemF::Name();} /// Rule deriv=0 , work by default (constant and observations) tFormula Derivate(int aK) const override {return tImplemF::mCoordF->Cste0();} /// Generally nothing to do in atomic, their buffer has been filled witj adequate values void ComputeBuf(int aK0,int aK1) override { } std::vector<tFormula> Ref() const override{return std::vector<tFormula>();} protected : bool isAtomic() const override { return true;} std::string GenCodeFormName() const override { return this->Name();} std::string GenCodeShortExpr() const override { return this->GenCodeFormName();} std::string GenCodeRef() const override { return this->GenCodeFormName();} std::string GenCodeDef() const override { return mCodeValue;} inline cAtomicF(tCoordF * aCoordF,const std::string& aName) : tImplemF (aCoordF,aName) { } std::string mCodeValue; }; template <class TypeElem> class cUnknownF : public cAtomicF<TypeElem> { public : typedef cAtomicF<TypeElem> tAtom; typedef typename tAtom::tImplemF tImplemF; typedef typename tImplemF::tCoordF tCoordF; typedef typename tCoordF::tFormula tFormula; const std::string & NameOperator() const override {static std::string s("UK"); return s;} std::string InfixPPrint() const override {return tImplemF::Name();} /// rule : dXi/dXj = delta(i,j) tFormula Derivate(int aK) const override { return (aK==mNumUnk) ? tImplemF::mCoordF->Cste1() : tImplemF::mCoordF->Cste0(); } friend tCoordF; private : inline cUnknownF(tCoordF * aCoordF,const std::string& aName,int aNum) : tAtom (aCoordF,aName), mNumUnk (aNum) { this->mCodeValue = "this->mVUk[aK][" + std::to_string(mNumUnk) + "]"; } int mNumUnk; ///< Number of the Unknown; like : 0 for X0, 1 for X1 ... }; template <class TypeElem> class cObservationF : public cAtomicF<TypeElem> { public : typedef cAtomicF<TypeElem> tAtom; typedef typename tAtom::tImplemF tImplemF; typedef typename tImplemF::tCoordF tCoordF; typedef typename tCoordF::tFormula tFormula; friend tCoordF; const std::string & NameOperator() const override {static std::string s("Obs"); return s;} private : inline cObservationF(tCoordF * aCoordF,const std::string & aName,int aNum) : tAtom (aCoordF,aName), mNum (aNum) { this->mCodeValue = "this->mVObs[aK][" + std::to_string(mNum) + "]"; } int mNum; ///< Number of the Observation; like : 0 for V0, 1 for V1 ... }; template <class TypeElem> class cConstantF : public cAtomicF<TypeElem> { public : typedef cAtomicF<TypeElem> tAtom; typedef typename tAtom::tImplemF tImplemF; typedef typename tImplemF::tCoordF tCoordF; typedef typename tCoordF::tFormula tFormula; typedef typename tCoordF::tBuf tBuf; friend tCoordF; bool IsCste(const TypeElem &K) const override {return mVal==K;} ///< Here we know if we are a constant of value K const TypeElem * ValCste() const override {return &mVal;} const std::string & NameOperator() const override {static std::string s("Cste"); return s;} protected : inline cConstantF(tCoordF * aCoordF,const std::string & aName,int aNum,const TypeElem& aVal) : tAtom (aCoordF,aName), mNum (aNum), mVal (aVal) { for (auto & aV : tImplemF::mBuf) aV = aVal; // Initialize buf with const val std::stringstream ss; // Precision that ensures that Num0 -> ASCII -> Num1 => Num1 == Num0 // May cause some odd but correct value for non exactly representable numbers ss << std::setprecision(std::numeric_limits<decltype(mVal)>::max_digits10) << mVal; this->mCodeValue = ss.str(); } std::string GenCodeFormName() const override { return this->mCodeValue;} int mNum; const TypeElem mVal; }; /* *************************************************** */ /* *************************************************** */ /* * * */ /* * cFormula / cImplemF / cCoordinatorF * */ /* * External Definition of methods * */ /* * * */ /* *************************************************** */ /* *************************************************** */ /* ---------------------- */ /* cFormula */ /* ---------------------- */ template <class T> cFormula<T> CreateCste(const T & aV,const cFormula<T> & aF) { return aF->CoordF()->CsteOfVal(aV); } /* ---------------------- */ /* cImplemF */ /* ---------------------- */ template <class TypeElem> int cImplemF<TypeElem>::RecursiveRec() const { int aRes = 1; for (const auto & aF : Ref()) { aRes += aF->RecursiveRec(); } return aRes; } template <class TypeElem> void cImplemF<TypeElem>::CalcRecursiveDepth(std::vector<tFormula> & aVReached) { if (mDepth != -1) { mUsedCnt++; return; // if we were already here , nothing to do } mUsedCnt = 1; for (const auto & aF : Ref()) { aF->CalcRecursiveDepth(aVReached); // parse sub formula mDepth = std::max(mDepth,aF->mDepth); // Memo max depth } mDepth++; // my depth is 1 + max of depth of my referenced formulas aVReached.push_back(tFormula(this)); } template <class TypeElem> void cImplemF<TypeElem>::TryReducAssoc() { for (auto & aF : Ref()) { // F will not belong to the terminal command that will have to reparsed // If we are in the config (A+B) + .. maybe the chain will grow later if (aF->NameOperator() != NameOperator()) { aF = aF->ReducAssoc(); } aF->mReducAssocTried = true; } } template <class TypeElem> cFormula<TypeElem> cImplemF<TypeElem>::VOper2(const tFormula & aF1,const tFormula &) const { InternalError("Incorrect virtual binary operation",this->mCoordF->Name()); return aF1; } template <class TypeElem> std::string cImplemF<TypeElem>::GenCodeRef() const { if (UsedCnt() == 1) { return GenCodeDef(); } else { return GenCodeFormName(); } } /* ---------------------- */ /* cCoordinatorF */ /* ---------------------- */ template <class TypeElem> std::vector<std::string> cCoordinatorF<TypeElem>::MakeAutomId(const std::string & aPrefix,int aNb) { std::vector<std::string> aRes; for (int aK=0 ; aK<aNb ; aK++) aRes.push_back(aPrefix+ std::to_string(aK)); return aRes; } template <class TypeElem> cCoordinatorF<TypeElem>::cCoordinatorF ( const std::string & aName, int aSzBuf, const std::vector<std::string> & aVNameUK, const std::vector<std::string> & aVNameObs ) : cCalculator<TypeElem>(aName,aSzBuf,aVNameUK.size(),aVNameObs.size()), mNbCste (0), mCste0 (CsteOfVal(0.0)), mCste1 (CsteOfVal(1.0)), mCste2 (CsteOfVal(2.0)), mHeaderIncludeSymbDer ("SymbDer/SymbDer_Common.h"), mDirGenCode (""), mUseAllocByName (false) // For strict compatibility with previous Jo's code { // Generate all the function corresponding to unknown for (size_t aNumUK=0 ; aNumUK<this->mNbUK ; aNumUK++) { tFormula aFuncUK(new cUnknownF<TypeElem>(this,aVNameUK[aNumUK],aNumUK)); // Create it mVFormUnknowns.push_back(aFuncUK); // Push it in vector of coordinat func AddFormula(aFuncUK); // Add to all func } // Generate all the function corresponding to observations for (size_t aNumObs=0 ; aNumObs<this->mNbObs ; aNumObs++) { tFormula aFuncObs(new cObservationF<TypeElem>(this,aVNameObs[aNumObs],aNumObs)); // Create it mVFormObservations.push_back(aFuncObs); // Push it in vector of coordinat func AddFormula(aFuncObs); // Add to all func } } template <class TypeElem> cCoordinatorF<TypeElem>::cCoordinatorF(const string &aName, int aSzBuf, int aNbUK, int aNbObs) : cCoordinatorF<TypeElem>(aName,aSzBuf,MakeAutomId("X",aNbUK),MakeAutomId("V",aNbObs)) { } template <class TypeElem> cCoordinatorF<TypeElem>::~cCoordinatorF() { for (auto & aForm : mVAllFormula) { aForm.FreeMem(); } } template <class TypeElem> cFormula<TypeElem> cCoordinatorF<TypeElem>::CsteOfVal(const TypeElem & aCste) { tFormula & aRef = mDicoCste[aCste]; if (aRef.IsNull()) // If it was not existing, the map contain now the def element { // The ! is used to make constant first in alphab order, used for reduction ? aRef=tFormula(new cConstantF<TypeElem>(this,"_C"+std::to_string(mNbCste),mNbCste,aCste)); mNbCste++; AddFormula(aRef); } return aRef; } template <class TypeElem> cFormula <TypeElem> cCoordinatorF<TypeElem>::FuncOfName(const std::string & aName) const { const auto & anIt = mDicoFunc.find(aName); if (anIt == mDicoFunc.end()) InternalError ("Try to acces non existing name :[" + aName + "]",this->Name()); return anIt->second; } template <class TypeElem> void cCoordinatorF<TypeElem>::SetNewUks(const std::vector<TypeElem> & aVUks) { for (size_t aK=0 ; aK<aVUks.size() ; aK++) // Init Vals of formulas buffer { mVFormUnknowns[aK]->SetBuf(this->mNbInBuf,aVUks[aK]); } } template <class TypeElem> void cCoordinatorF<TypeElem>::SetNewObs(const std::vector<TypeElem> & aVObs) { for (size_t aK=0 ; aK<aVObs.size() ; aK++) // Init Vals of formulas buffer { mVFormObservations[aK]->SetBuf(this->mNbInBuf,aVObs[aK]); } } template <class TypeElem> void cCoordinatorF<TypeElem>::SetCurFormulasWithDerivative(const std::vector<tFormula> & aVF) { std::vector<tFormula> aVWDer; for (const auto & aF : aVF) { aVWDer.push_back(aF); for (size_t aUK=0 ; aUK<this->mNbUK ; aUK++) { aVWDer.push_back(aF->Derivate(aUK)); } } SetCurFormulas(aVWDer); this->mWithDer = true; this->mSzInterval = 1+this->mNbUK; this->mNbElem = aVF.size(); } template <class TypeElem> void cCoordinatorF<TypeElem>::SetCurFormulas(const std::vector<tFormula> & aVF0) { std::vector<tFormula> aVF; for(auto aF : aVF0) { if (! aF->ReducAssocTried()) { aF = tFormula(aF->ReducAssoc()); // std::cout << "GGGGGGGG " << aF->Name() << " \n"; } aVF.push_back(aF); } this->mWithDer=false; this->mSzInterval = 1; this->mNbElem = aVF0.size(); mVCurF = aVF; // Erase previous for (auto & aF : mVReachedF) aF->SetDepth(-1); mVReachedF.clear(); // Compute depth for topologicall sort for (auto & aF : mVCurF) { aF->CalcRecursiveDepth(mVReachedF); } // Use depth to have topological sort // In fact it is probably not necessary to make this sort, initial order of reaching order // should work; by the way : no dammage .. std::sort ( mVReachedF.begin(), mVReachedF.end(), [](const tFormula & aF1,const tFormula &aF2) {return aF1->Depth() < aF2->Depth();} ); // Make Buf of Res to have right size for (auto & aLine : this->mBufLineRes) { aLine.resize(mVCurF.size()); } } template <class TypeElem> void cCoordinatorF<TypeElem>::DoEval() { // Make the real hard stuff, compute the data, the depedancy ordering should make it coherent #ifdef _OPENMP #pragma omp parallel { size_t thread_num = omp_get_thread_num(); size_t num_threads = omp_get_num_threads(); size_t start = thread_num * this->mNbInBuf / num_threads; size_t end = (thread_num + 1) * this->mNbInBuf / num_threads; if (end>start) { for (auto & aF : mVReachedF) { aF->ComputeBuf(start,end); } } } #else for (auto & aF : mVReachedF) { aF->ComputeBuf(0,this->mNbInBuf); } #endif for (size_t aKLine=0 ; aKLine<this->mNbInBuf ; aKLine++) { std::vector<TypeElem> & aLine = this->mBufLineRes[aKLine]; for (size_t aKFunc=0 ; aKFunc< mVCurF.size() ; aKFunc++) aLine[aKFunc] = mVCurF[aKFunc]->GetBuf(aKLine); } } template <class TypeElem> void cCoordinatorF<TypeElem>::ShowStackFunc() const { for (const auto & aForm : mVAllFormula) { if (aForm->Depth()==-1) std::cout << "---" ; else std::cout << "-" << aForm->Depth() << "-"; std::cout << aForm->UsedCnt() << "- "; std::cout << aForm->NameGlob() << " => " << aForm->Name(); const TypeElem * aPV = aForm->ValCste(); if (aPV) std::cout << " ; Val=" << *aPV; std::cout << "\n"; } std::cout << "REACHED "; for (const auto & aForm : mVReachedF) { std::cout << aForm->NumGlob() << " "; } std::cout << "\n"; std::cout << "CUR "; for (const auto & aForm : mVCurF) { std::cout << aForm->NumGlob() << " "; } std::cout << "\n"; } template <class TypeElem> std::string cCoordinatorF<TypeElem>::GenCodeCommon(const std::string& aPrefix, std::string aTypeName, bool isShortExpr) const { std::string aName = this->Name(); if (aName.size() == 0) UserSError("Formula name is empty.",this->Name()); for (auto &c : aName) { if (!std::isalnum(c) && c != '_') UserSError("Formula name is not a valid C++ identifier: '_,a..z,A..Z,0..9' only.",this->Name()); } std::string aClassName = "c" + aName; if (aTypeName.size()==0) aTypeName = this->TypeElemName(); bool isTemplated = aTypeName=="template<>"; if (isTemplated) aTypeName = "TypeElem"; std::string aVectorName = "std::vector<" + aTypeName + ">"; if (! isShortExpr) aClassName = aClassName + "LongExpr"; std::string aParentClass = "cCalculator<" + aTypeName + ">"; std::string aFileName = aPrefix + aClassName; std::ofstream aOs(mDirGenCode + aFileName + ".h"); if (!aOs) return ""; aOs << "#ifdef _OPENMP\n" "#include <omp.h>\n" "#endif\n" "#include \"" << mHeaderIncludeSymbDer << "\"\n" "\n" "namespace NS_SymbolicDerivative {\n\n"; if (isTemplated) { aOs << "template<typename TypeElem>\n"; } aOs << "class " << aClassName << " : public " << aParentClass << "\n" "{\n" "public:\n" " typedef " << aParentClass << " Super;\n" " " << aClassName << "(size_t aSzBuf) : \n" " Super(\"" << aName << "\", aSzBuf," << this->mNbUK << "," << this->mNbObs << "," << this->mWithDer << "," << this->mSzInterval << "),\n" " mVUk(aSzBuf),mVObs(aSzBuf)\n" " {\n" " this->mNbElem = " << this->mNbElem << ";\n" " for (auto& line : this->mBufLineRes)\n" " line.resize(" << mVCurF.size() << ");\n" " for (auto& aUk : this->mVUk)\n" " aUk.resize(this->NbUk());\n" " for (auto& aObs : this->mVObs)\n" " aObs.resize(this->NbObs());\n" " }\n" " static std::string FormulaName() { return \"" << aName << "\";}\n" "protected:\n" " virtual void SetNewUks(const " << aVectorName << " & aVUks) override\n" " {\n" " for (size_t i=0; i<this->NbUk(); i++)\n" " this->mVUk[this->mNbInBuf][i] = aVUks[i];\n" " }\n" " virtual void SetNewObs(const " << aVectorName << " & aVObs) override\n" " {\n" " for (size_t i=0; i<this->NbObs(); i++)\n" " this->mVObs[this->mNbInBuf][i] = aVObs[i];\n" " }\n" " virtual void DoEval() override;\n" " std::vector<" << aVectorName << "> mVUk;\n" " std::vector<" << aVectorName << "> mVObs;\n" "};\n" "\n"; if (! isTemplated) { aOs << "} // namespace NS_SymbolicDerivative\n"; aOs = std::ofstream(mDirGenCode+aFileName + ".cpp"); if (!aOs) return ""; aOs << "#include \"" + aFileName + ".h\"\n" "\n" "namespace NS_SymbolicDerivative {\n" "\n" "void " << aClassName << "::DoEval()\n"; } else { aOs << "\n" "template<typename TypeElem>\n" "void " << aClassName << "<TypeElem>::DoEval()\n"; } aOs << "{\n" "#ifdef _OPENMP\n" "#pragma omp parallel for\n" "#endif\n" " for (size_t aK=0; aK < this->mNbInBuf; aK++) {\n" "// Declare local vars in loop to make them per thread\n"; for (auto & aForm : mVFormUnknowns) aOs << " " << aTypeName << " &" << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n"; for (const auto & aForm : mVFormObservations) aOs << " " << aTypeName << " &" << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n"; if (isShortExpr) { for (const auto & aForm : mVReachedF) { if (!aForm->isAtomic()) aOs << " " << aTypeName << " " << aForm->GenCodeFormName() << " = " << aForm->GenCodeShortExpr() << ";\n"; } for (size_t i=0; i<mVCurF.size(); i++) aOs << " this->mBufLineRes[aK][" << i << "] = " << mVCurF[i]->GenCodeFormName() << ";\n"; } else { for (const auto & aForm : mVReachedF) { if (aForm->UsedCnt() != 1 && !aForm->isAtomic()) { aOs << " " << aTypeName << " " << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n"; } } for (size_t i=0; i<mVCurF.size(); i++) aOs << " this->mBufLineRes[aK][" << i << "] = " << mVCurF[i]->GenCodeRef() << ";\n"; } aOs << " }\n" "}\n\n"; if (mUseAllocByName) { aOs << "cCalculator<" << aTypeName << "> * Alloc_" << aName << "(int aSzBuf)\n" << "{\n" << " return new c" << aName << "(aSzBuf);\n" << "}\n\n" << "cName2Calc<" << aTypeName << "> TheNameAlloc_" << aName <<"(\""<< aName <<"\",Alloc_" << aName<< ");\n\n"; } aOs << "} // namespace NS_SymbolicDerivative\n"; return aFileName; } template<> inline std::string cCoordinatorF<double>::TypeElemName() const {return "double";} template<> inline std::string cCoordinatorF<float>::TypeElemName() const {return "float";} template<typename T> struct Detect_if_TypeElemName_is_defined : std::false_type { }; template<class TypeElem> inline std::string cCoordinatorF<TypeElem>::TypeElemName() const { static_assert( Detect_if_TypeElemName_is_defined<TypeElem>::value , "** You must define cCoordinatorF::TypeElemName() for you type **"); return ""; } } // NS_Symbolic_Derivative #include "SymbDer_UnaryOp.h" #include "SymbDer_BinaryOp.h" /* https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml http://en.wikipedia.org/wiki/Automatic_differentiation https://git.irc.umbc.edu/photorig/openMVG/blob/260584fda68dce095e279362efd24a2d7d7cf5d9/src/third_party/ceres-solver/include/ceres/jet.h https://mc-stan.org/ http://www.met.reading.ac.uk/clouds/adept/array_features.html http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.89.7749&rep=rep1&type=pdf http://www.autodiff.org/ */ #endif // _SymbolicDerivatives_H_
plot.h
#ifndef OPENMC_PLOT_H #define OPENMC_PLOT_H #include <unordered_map> #include <sstream> #include "pugixml.hpp" #include "xtensor/xarray.hpp" #include "hdf5.h" #include "openmc/position.h" #include "openmc/constants.h" #include "openmc/cell.h" #include "openmc/error.h" #include "openmc/geometry.h" #include "openmc/particle.h" #include "openmc/xml_interface.h" #include "openmc/random_lcg.h" namespace openmc { //=============================================================================== // Global variables //=============================================================================== class Plot; namespace model { extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index extern std::vector<Plot> plots; //!< Plot instance container extern uint64_t plotter_prn_seeds[N_STREAMS]; // Random number seeds used for plotter extern int plotter_stream; // Stream index used by the plotter } // namespace model //=============================================================================== // RGBColor holds color information for plotted objects //=============================================================================== struct RGBColor { //Constructors RGBColor() : red(0), green(0), blue(0) { }; RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { }; RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { }; RGBColor(const std::vector<int> &v) { if (v.size() != 3) { throw std::out_of_range("Incorrect vector size for RGBColor."); } red = v[0]; green = v[1]; blue = v[2]; } bool operator ==(const RGBColor& other) { return red == other.red && green == other.green && blue == other.blue; } // Members uint8_t red, green, blue; }; // some default colors const RGBColor WHITE {255, 255, 255}; const RGBColor RED {255, 0, 0}; typedef xt::xtensor<RGBColor, 2> ImageData; struct IdData { // Constructor IdData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids }; struct PropertyData { // Constructor PropertyData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data }; enum class PlotType { slice = 1, voxel = 2 }; enum class PlotBasis { xy = 1, xz = 2, yz = 3 }; enum class PlotColorBy { cells = 0, mats = 1 }; //=============================================================================== // Plot class //=============================================================================== class PlotBase { public: template<class T> T get_map() const; // Members public: Position origin_; //!< Plot origin in geometry Position width_; //!< Plot width in geometry PlotBasis basis_; //!< Plot basis (XY/XZ/YZ) std::array<size_t, 3> pixels_; //!< Plot size in pixels bool color_overlaps_; //!< Show overlapping cells? int level_; //!< Plot universe level }; template<class T> T PlotBase::get_map() const { size_t width = pixels_[0]; size_t height = pixels_[1]; // get pixel size double in_pixel = (width_[0])/static_cast<double>(width); double out_pixel = (width_[1])/static_cast<double>(height); // size data array T data(width, height); // setup basis indices and initial position centered on pixel int in_i, out_i; Position xyz = origin_; switch(basis_) { case PlotBasis::xy : in_i = 0; out_i = 1; break; case PlotBasis::xz : in_i = 0; out_i = 2; break; case PlotBasis::yz : in_i = 1; out_i = 2; break; default: UNREACHABLE(); } // set initial position xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.; xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.; // arbitrary direction Direction dir = {0.7071, 0.7071, 0.0}; #pragma omp parallel { Particle p; p.r() = xyz; p.u() = dir; p.coord_[0].universe = model::root_universe; int level = level_; int j{}; #pragma omp for for (int y = 0; y < height; y++) { p.r()[out_i] = xyz[out_i] - out_pixel * y; for (int x = 0; x < width; x++) { p.r()[in_i] = xyz[in_i] + in_pixel * x; p.n_coord_ = 1; // local variables bool found_cell = find_cell(p, 0); j = p.n_coord_ - 1; if (level >=0) {j = level + 1;} if (found_cell) { data.set_value(y, x, p, j); } if (color_overlaps_ && check_cell_overlap(p, false)) { data.set_overlap(y, x); } } // inner for } // outer for } // omp parallel return data; } class Plot : public PlotBase { public: // Constructor Plot(pugi::xml_node plot); // Methods private: void set_id(pugi::xml_node plot_node); void set_type(pugi::xml_node plot_node); void set_output_path(pugi::xml_node plot_node); void set_bg_color(pugi::xml_node plot_node); void set_basis(pugi::xml_node plot_node); void set_origin(pugi::xml_node plot_node); void set_width(pugi::xml_node plot_node); void set_universe(pugi::xml_node plot_node); void set_default_colors(pugi::xml_node plot_node); void set_user_colors(pugi::xml_node plot_node); void set_meshlines(pugi::xml_node plot_node); void set_mask(pugi::xml_node plot_node); void set_overlap_color(pugi::xml_node plot_node); // Members public: int id_; //!< Plot ID PlotType type_; //!< Plot type (Slice/Voxel) PlotColorBy color_by_; //!< Plot coloring (cell/material) int meshlines_width_; //!< Width of lines added to the plot int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot RGBColor meshlines_color_; //!< Color of meshlines on the plot RGBColor not_found_ {WHITE}; //!< Plot background color RGBColor overlap_color_ {RED}; //!< Plot overlap color std::vector<RGBColor> colors_; //!< Plot colors std::string path_plot_; //!< Plot output filename }; //=============================================================================== // Non-member functions //=============================================================================== //! Add mesh lines to image data of a plot object //! \param[in] plot object //! \param[out] image data associated with the plot object void draw_mesh_lines(Plot pl, ImageData& data); //! Write a ppm image to file using a plot object's image data //! \param[in] plot object //! \param[out] image data associated with the plot object void output_ppm(Plot pl, const ImageData& data); //! Initialize a voxel file //! \param[in] id of an open hdf5 file //! \param[in] dimensions of the voxel file (dx, dy, dz) //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to memory space of voxel data void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace, hid_t* dset, hid_t* memspace); //! Write a section of the voxel data to hdf5 //! \param[in] voxel slice //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to data to write void voxel_write_slice(int x, hid_t dspace, hid_t dset, hid_t memspace, void* buf); //! Close voxel file entities //! \param[in] data space to close //! \param[in] dataset to close //! \param[in] memory space to close void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace); //=============================================================================== // External functions //=============================================================================== //! Read plot specifications from a plots.xml file void read_plots_xml(); //! Create a ppm image for a plot object //! \param[in] plot object void create_ppm(Plot pl); //! Create an hdf5 voxel file for a plot object //! \param[in] plot object void create_voxel(Plot pl); //! Create a randomly generated RGB color //! \return RGBColor with random value RGBColor random_color(); } // namespace openmc #endif // OPENMC_PLOT_H
010_pi.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> double compute_partial_pi(long nr_tries, unsigned int *seed); int main() { const long nr_tries = 1000000; const int nr_blocks = 10; int num_threads = 1; double nr_success = 0.0; #pragma omp parallel default(none) shared(nr_success) shared(num_threads) { int thread_num = 0; unsigned int seed = 0; #ifdef __OPENMP thread_num = omp_get_thread_num(); num_threads = omp_get_num_threads(); seed = thread_num; #endif printf("thread %d of %d\n", thread_num, num_threads); long partial_nr_tries = nr_tries/(num_threads*nr_blocks); #pragma omp for reduction(+:nr_success) for (int i = 0; i < nr_blocks; i++) nr_success += compute_partial_pi(partial_nr_tries, &seed); } printf("pi = %.15lf\n", 4.0*nr_success/(num_threads*nr_blocks)); return 0; } double random_number(unsigned int *seed) { return ((double) rand_r(seed))/RAND_MAX; } double compute_partial_pi(long nr_tries, unsigned int *seed) { double x, y, nr_success = 0.0; for (long i = 0; i < nr_tries; i++) { x = random_number(seed); y = random_number(seed); if (x*x + y*y < 1.0) nr_success += 1.0; } return nr_success/nr_tries; }
GB_unaryop__minv_int32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_int16 // op(A') function: GB_tran__minv_int32_int16 // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_int16 ( int32_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int16) // C=scalar+B GB (_bind1st__bor_int16) // C=scalar+B' GB (_bind1st_tran__bor_int16) // C=A+scalar GB (_bind2nd__bor_int16) // C=A'+scalar GB (_bind2nd_tran__bor_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT16 || GxB_NO_BOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LG_check_tri.c
//------------------------------------------------------------------------------ // LG_check_tri: compute the number of triangles in a graph (simple method) //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // // See additional acknowledgments in the LICENSE file, // or contact [email protected] for the full terms. //------------------------------------------------------------------------------ // A very slow, bare-bones triangle count using a parallel dot-product method. // Computes the sum(sum((A'*A).*A)), in MATLAB notation, where A is symmetric // and treated as binary (only the structure is used). Diagonal entries are // ignored. In GraphBLAS notation, C{A} = A'*A followed by reduce(C) to scalar. // This method is for testing only, to check the result of other, faster // methods. Do not benchmark this method; it is slow and simple by design. #define LAGraph_FREE_WORK \ { \ LAGraph_Free ((void **) &Mark) ; \ } #define LAGraph_FREE_ALL \ { \ LAGraph_FREE_WORK ; \ LAGraph_Free ((void **) &Ap) ; \ LAGraph_Free ((void **) &Aj) ; \ LAGraph_Free ((void **) &Ax) ; \ } #include "LG_internal.h" #include "LG_test.h" int LG_check_tri // -1 if out of memory, 0 if successful ( // output uint64_t *ntri, // # of triangles in A // input LAGraph_Graph G, // the structure of G->A must be symmetric char *msg ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LG_CLEAR_MSG ; bool *restrict Mark = NULL ; GrB_Index *Ap = NULL, *Aj = NULL, *Ai = NULL ; void *Ax = NULL ; GrB_Index Ap_size, Aj_size, Ax_size, n, ncols, Ap_len, Aj_len, Ax_len ; LG_CHECK (ntri == NULL, -1003, "ntri is NULL") ; LG_CHECK (LAGraph_CheckGraph (G, msg), -1002, "graph is invalid") ; LG_CHECK (G->ndiag != 0, -104, "G->ndiag must be zero") ; if (G->kind == LAGRAPH_ADJACENCY_UNDIRECTED || (G->kind == LAGRAPH_ADJACENCY_DIRECTED && G->A_structure_is_symmetric == LAGRAPH_TRUE)) { // the structure of A is known to be symmetric ; } else { // A is not known to be symmetric LG_CHECK (false, -1005, "G->A must be symmetric") ; } GrB_TRY (GrB_Matrix_nrows (&n, G->A)) ; GrB_TRY (GrB_Matrix_ncols (&ncols, G->A)) ; LG_CHECK (n != ncols, -1001, "A must be square") ; //-------------------------------------------------------------------------- // unpack/export the matrix in CSR form //-------------------------------------------------------------------------- #if LG_SUITESPARSE bool iso, jumbled ; GrB_TRY (GxB_Matrix_unpack_CSR (G->A, &Ap, &Aj, &Ax, &Ap_size, &Aj_size, &Ax_size, &iso, &jumbled, NULL)) ; #else size_t typesize ; LAGraph_TRY (LG_check_export G, &Ap, &Aj, &Ax, &Ap_len, &Aj_len, &Ax_len, &typesize, msg) ; #endif //-------------------------------------------------------------------------- // compute the # of triangles (each triangle counted 6 times) //-------------------------------------------------------------------------- int64_t ntriangles = 0 ; Ai = Aj ; // pretend A is symmetric and in CSC format instead // masked dot-product method #pragma omp parallel for reduction(+:ntriangles) schedule(dynamic,64) for (int64_t j = 0 ; j < n ; j++) { // for each entry in the lower triangular part of A for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { const int64_t i = Ai [p] ; if (i > j) { // ntriangles += A(:,i)' * A(:,j) int64_t p1 = Ap [i] ; int64_t p1_end = Ap [i+1] ; int64_t p2 = Ap [j] ; int64_t p2_end = Ap [j+1] ; while (p1 < p1_end && p2 < p2_end) { int64_t i1 = Ai [p1] ; int64_t i2 = Ai [p2] ; if (i1 < i2) { // A(i1,i) appears before A(i2,j) p1++ ; } else if (i2 < i1) { // A(i2,j) appears before A(i1,i) p2++ ; } else // i1 == i2 == k { // A(k,i) and A(k,j) are the next entries to merge ntriangles++ ; p1++ ; p2++ ; } } } } } ntriangles = ntriangles / 3 ; #if 0 // saxpy-based method // The comments below are written as if A were in CSC format, but it's // symmetric, so the CSR and CSC formats are the same. Mark = (bool *) LAGraph_Calloc (n, sizeof (bool)) ; LG_CHECK (Mark == NULL, -1005, "out of memory") ; for (int64_t j = 0 ; j < n ; j++) { // scatter A(:,j) into Mark for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { Mark [Ai [p]] = 1 ; } // compute sum(C(:,j)) where C(:,j) = (A * A(:,j)) .* Mark for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { const int64_t k = Ai [p] ; // C(:,j) += (A(:,k) * A(k,j)) .* Mark for (int64_t pa = Ap [k] ; pa < Ap [k+1] ; pa++) { // C(i,j) += (A(i,k) * A(k,j)) .* Mark ntriangles += Mark [Ai [pa]] ; } } // clear A(:,j) from Mark for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { Mark [Ai [p]] = 0 ; } } ntriangles = ntriangles / 6 ; #endif //-------------------------------------------------------------------------- // repack the matrix in CSR form for SuiteSparse:GraphBLAS //-------------------------------------------------------------------------- #if LG_SUITESPARSE GrB_TRY (GxB_Matrix_pack_CSR (G->A, &Ap, &Aj, &Ax, Ap_size, Aj_size, Ax_size, iso, jumbled, NULL)) ; LG_CHECK (Ap != NULL || Aj != NULL || Ax != NULL, -1, "internal error") ; #endif //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- LAGraph_FREE_ALL ; (*ntri) = ntriangles ; return (0) ; }
layer_example.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif #if defined(USE_BLAS) || defined(USE_IM2COL) #include <mkl.h> #endif #define CHANNEL_BLOCKING 64 #define LP_BLOCKING 2 /* function-pointer to LIBXSMM kernel */ libxsmm_gemmfunction fwd_brgemmz; libxsmm_gemmfunction fwd_brgemma; libxsmm_blasint prec_bf16 = 0; typedef struct { int nImg; int nIfm; int nOfm; int ifhp; int ifwp; int ifh; int ifw; int ofhp; int ofwp; int ofh; int ofw; int pad_h; int pad_w; int pad_h_in; int pad_w_in; int pad_h_out; int pad_w_out; int kh; int kw; int stride_h; int stride_w; int RK; int Mh; int Mw; } naive_conv_t; typedef struct { int nImg; int nBIfm; int nbIfm; int nBOfm; int nbOfm; int nlpb; int ifhp; int ifwp; int ifh; int ifw; int ofhp; int ofwp; int ofh; int ofw; int pad_h; int pad_w; int pad_h_in; int pad_w_in; int pad_h_out; int pad_w_out; int kh; int kw; int stride_h; int stride_w; int RK; int Mh; int Mw; unsigned long long brcount; } gemm_conv_t; typedef struct { double max_rel_err; double max_abs_err; double l2_rel_err; double one_norm_ref; double one_norm_test; } correctness_t; LIBXSMM_INLINE void zero_buf(float* buf, long size) { int i; #if defined(_OPENMP) #pragma omp parallel for private(i) #endif for (i = 0; i < size; ++i) { buf[i] = 0.0f; } } LIBXSMM_INLINE void zero_buf_bf16(libxsmm_bfloat16* buf, size_t size) { int i; #if defined(_OPENMP) # pragma omp parallel for private(i) #endif for (i = 0; i < (int)size; ++i) { buf[i] = 0; } } LIBXSMM_INLINE void copy_buf(float* src, float* dst, long size) { int i; #if defined(_OPENMP) #pragma omp parallel for private(i) #endif for (i = 0; i < size; ++i) { dst[i] = src[i]; } } LIBXSMM_INLINE void init_buf(float* buf, long size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } LIBXSMM_INLINE void set_zeropad_nchw(float* nchw, int N, int C, int H, int W, int Mh, int RK, int pad_h, int pad_w) { LIBXSMM_VLA_DECL(6, float, input, nchw, C, H, W, Mh, RK); int n, h, w, c, m, rk; for ( n = 0; n < N; n++ ) { for ( c = 0; c < C; c++ ) { for ( h = 0; h < H; h++ ) { for ( w = 0; w < W; w++ ) { for ( m = 0; m < Mh; m++ ) { for ( rk = 0; rk < RK; rk++ ) { if(h < pad_h || h >= H-pad_h || w < pad_w || w >= W-pad_w) LIBXSMM_VLA_ACCESS(6, input, n, c, h, w, m, rk, C, H, W, Mh, RK) = 0.0; } } } } } } } LIBXSMM_INLINE void compare_buf(float* ref, float* test, long size, correctness_t* norms) { int i; double diff, rel_err; norms->max_rel_err = 0.; norms->max_abs_err = 0.; norms->l2_rel_err = 0.; norms->one_norm_ref = 0.; norms->one_norm_test = 0.; for (i = 0; i < size; ++i) { norms->one_norm_ref += (double)ref[i]; norms->one_norm_test += (double)test[i]; diff = fabs((double)ref[i] - (double)test[i]); norms->l2_rel_err += (diff*diff); rel_err = 0.0; if (diff > 0.0 ) { rel_err = diff/fabs((double)ref[i]); } if (rel_err > norms->max_rel_err) { norms->max_rel_err = rel_err; #if 0 printf("MISMATCH@ %3d: A=%12.8g B=%12.8g (E:%12.4e) (R:%12.4e)\n", i, ref[i], test[i], diff, rel_err); #endif } if (diff > norms->max_abs_err) { norms->max_abs_err = diff; } #if 0 if (diff > 1.0) { printf("MISMATCH@ %3d: A=%12.8g B=%12.8g (E:%12.4e)\n", i, ref[i], test[i], diff); } #endif } norms->l2_rel_err = sqrt(norms->l2_rel_err); } LIBXSMM_INLINE void copy_naiveP_to_GEMM(const float* nchw, float* gemm, int N, int H, int W, int C, int Mh, int RK) { LIBXSMM_VLA_DECL(7, float, output, gemm, C/CHANNEL_BLOCKING, Mh, RK, H, W, CHANNEL_BLOCKING); LIBXSMM_VLA_DECL(6, const float, input, nchw, H, W, C, Mh, RK); int n, h, w, c1, c2, m, rk; for ( n = 0; n < N; n++ ) { for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) { for ( m = 0; m < Mh; m++ ) { for ( rk = 0; rk < RK; rk++ ) { for ( h = 0; h < H; h++ ) { for ( w = 0; w < W; w++ ) { for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) { LIBXSMM_VLA_ACCESS(7, output, n, c1, m, rk, h, w, c2, C/CHANNEL_BLOCKING, Mh, RK, H, W, CHANNEL_BLOCKING) = LIBXSMM_VLA_ACCESS(6, input, n, h, w, (c1*CHANNEL_BLOCKING)+c2, m, rk, H, W, C, Mh, RK); } } } } } } } } LIBXSMM_INLINE void copy_GEMM_to_naiveV(const float* gemm, float* nchw, int N, int H, int W, int C, int Mh, int Mw) { LIBXSMM_VLA_DECL(7, const float, input, gemm, C/CHANNEL_BLOCKING, Mh, Mw, H, W, CHANNEL_BLOCKING); LIBXSMM_VLA_DECL(6, float, output, nchw, H, W, C, Mh, Mw); int n, h, w, c1, c2, mi, mj; for ( n = 0; n < N; n++ ) { for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) { for ( mj = 0; mj < Mh; mj++) { for ( mi = 0; mi < Mw; mi++) { for ( h = 0; h < H; h++ ) { for ( w = 0; w < W; w++ ) { for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) { LIBXSMM_VLA_ACCESS(6, output, n, h, w, (c1*CHANNEL_BLOCKING)+c2, mj, mi, H, W, C, Mh, Mw) = LIBXSMM_VLA_ACCESS(7, input, n, c1, mj, mi, h, w, c2, C/CHANNEL_BLOCKING, Mh, Mw, H, W, CHANNEL_BLOCKING); } } } } } } } } LIBXSMM_INLINE void copy_naiveF_to_GEMM(const float* kcrs, float* gemm, int R, int S, int C, int K, int RK, int Mw) { LIBXSMM_VLA_DECL(8, float, output, gemm, C/CHANNEL_BLOCKING, Mw, RK, R, S, CHANNEL_BLOCKING, CHANNEL_BLOCKING); LIBXSMM_VLA_DECL(6, const float, input, kcrs, K, R, S, RK, Mw); int r, s, c1, c2, k1, k2, rk, m; for ( k1 = 0; k1 < K/CHANNEL_BLOCKING; k1++ ) { for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) { for ( m = 0; m < Mw; m++ ) { for ( rk = 0; rk < RK; rk++ ) { for ( r = 0; r < R; r++ ) { for ( s = 0; s < S; s++ ) { for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) { for ( k2 = 0; k2 < CHANNEL_BLOCKING; k2++ ) { LIBXSMM_VLA_ACCESS(8, output, k1, c1, m, rk, r, s, c2, k2, C/CHANNEL_BLOCKING, Mw, RK, R, S, CHANNEL_BLOCKING, CHANNEL_BLOCKING) = LIBXSMM_VLA_ACCESS(6, input, (c1*CHANNEL_BLOCKING)+c2, (k1*CHANNEL_BLOCKING)+k2, r, s, rk, m, C, R, S, RK, Mw); } } } } } } } } } LIBXSMM_INLINE void copy_naiveP_to_GEMM_bf16(const libxsmm_bfloat16* nchw, libxsmm_bfloat16* gemm, int N, int H, int W, int C, int Mh, int RK) { LIBXSMM_VLA_DECL(7, libxsmm_bfloat16, output, gemm, C/CHANNEL_BLOCKING, Mh, RK, H, W, CHANNEL_BLOCKING); LIBXSMM_VLA_DECL(6, const libxsmm_bfloat16, input, nchw, H, W, C, Mh, RK); int n, h, w, c1, c2, m, rk; for ( n = 0; n < N; n++ ) { for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) { for ( m = 0; m < Mh; m++ ) { for ( rk = 0; rk < RK; rk++ ) { for ( h = 0; h < H; h++ ) { for ( w = 0; w < W; w++ ) { for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) { LIBXSMM_VLA_ACCESS(7, output, n, c1, m, rk, h, w, c2, C/CHANNEL_BLOCKING, Mh, RK, H, W, CHANNEL_BLOCKING) = LIBXSMM_VLA_ACCESS(6, input, n, h, w, (c1*CHANNEL_BLOCKING)+c2, m, rk, H, W, C, Mh, RK); } } } } } } } } LIBXSMM_INLINE void copy_GEMM_to_naiveV_bf16(const libxsmm_bfloat16* gemm, libxsmm_bfloat16* nchw, int N, int H, int W, int C, int Mh, int Mw) { LIBXSMM_VLA_DECL(7, const libxsmm_bfloat16, input, gemm, C/CHANNEL_BLOCKING, Mh, Mw, H, W, CHANNEL_BLOCKING); LIBXSMM_VLA_DECL(6, libxsmm_bfloat16, output, nchw, H, W, C, Mh, Mw); int n, h, w, c1, c2, mi, mj; for ( n = 0; n < N; n++ ) { for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) { for ( mj = 0; mj < Mh; mj++) { for ( mi = 0; mi < Mw; mi++) { for ( h = 0; h < H; h++ ) { for ( w = 0; w < W; w++ ) { for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) { LIBXSMM_VLA_ACCESS(6, output, n, h, w, (c1*CHANNEL_BLOCKING)+c2, mj, mi, H, W, C, Mh, Mw) = LIBXSMM_VLA_ACCESS(7, input, n, c1, mj, mi, h, w, c2, C/CHANNEL_BLOCKING, Mh, Mw, H, W, CHANNEL_BLOCKING); } } } } } } } } LIBXSMM_INLINE void copy_naiveF_to_GEMM_bf16(const libxsmm_bfloat16* kcrs, libxsmm_bfloat16* gemm, int R, int S, int C, int K, int RK, int Mw) { LIBXSMM_VLA_DECL(9, libxsmm_bfloat16, output, gemm, C/CHANNEL_BLOCKING, Mw, RK, R, S, CHANNEL_BLOCKING/LP_BLOCKING, CHANNEL_BLOCKING, LP_BLOCKING); LIBXSMM_VLA_DECL(6, const libxsmm_bfloat16, input, kcrs, K, R, S, RK, Mw); int r, s, c1, c2, c3, k1, k2, rk, m; for ( k1 = 0; k1 < K/CHANNEL_BLOCKING; k1++ ) { for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) { for ( m = 0; m < Mw; m++ ) { for ( rk = 0; rk < RK; rk++ ) { for ( r = 0; r < R; r++ ) { for ( s = 0; s < S; s++ ) { for ( c2 = 0; c2 < CHANNEL_BLOCKING/LP_BLOCKING; c2++ ) { for ( k2 = 0; k2 < CHANNEL_BLOCKING; k2++ ) { for ( c3 = 0; c3 < LP_BLOCKING; c3++ ) { LIBXSMM_VLA_ACCESS(9, output, k1, c1, m, rk, r, s, c2, k2, c3, C/CHANNEL_BLOCKING, Mw, RK, R, S, CHANNEL_BLOCKING/LP_BLOCKING, CHANNEL_BLOCKING, LP_BLOCKING) = LIBXSMM_VLA_ACCESS(6, input, (c1*CHANNEL_BLOCKING)+(c2*LP_BLOCKING)+c3, (k1*CHANNEL_BLOCKING)+k2, r, s, rk, m, C, R, S, RK, Mw); } } } } } } } } } } LIBXSMM_INLINE int is_a_ge_zero_and_a_lt_b(int a, int b) { return (unsigned int)a < (unsigned int)(b); } LIBXSMM_INLINE void naive_convcaps_fp(naive_conv_t* param, const float* input, float* output, const float* filter) { int nImg = param->nImg; int nIfm = param->nIfm; int nOfm = param->nOfm; int ifhp = param->ifhp; int ifwp = param->ifwp; int ofhp = param->ofhp; int ofwp = param->ofwp; int ofh = param->ofh; int ofw = param->ofw; int pad_h = param->pad_h; int pad_w = param->pad_w; int pad_h_in = param->pad_h_in; int pad_w_in = param->pad_w_in; int pad_h_out = param->pad_h_out; int pad_w_out = param->pad_w_out; int kh = param->kh; int kw = param->kw; int stride_h = param->stride_h; int stride_w = param->stride_w; int RK = param->RK; int Mh = param->Mh; int Mw = param->Mw; /* loop counters */ int img, ofm, ifm, oj, oi, ij, ii, kj, ki, rk, mj, mi; LIBXSMM_VLA_DECL(6, float, votes_t, output + (pad_w_out * ofwp + pad_h_out), ofhp, ofwp, nOfm, Mh, Mw); LIBXSMM_VLA_DECL(6, const float, poses_t, input + (pad_w_in * ifwp + pad_h_in), ifhp, ifwp, nIfm, Mh, RK); LIBXSMM_VLA_DECL(6, const float, filter_t, filter, nOfm, kh, kw, RK, Mw); #if defined(_OPENMP) # pragma omp parallel for LIBXSMM_OPENMP_COLLAPSE(2) private(img, ofm, ifm, oj, oi, ij, ii, kj, ki, rk, mj, mi) #endif for (img = 0; img < nImg; ++img) { for (ofm = 0; ofm < nOfm; ++ofm) { for (oj = 0; oj < ofh; ++oj) { ij = oj * stride_h - pad_h; for (oi = 0; oi < ofw; ++oi) { ii = oi * stride_w - pad_w; for (mj = 0; mj < Mh; ++mj ) { for (mi = 0; mi < Mw; ++mi ) { LIBXSMM_VLA_ACCESS( 6, votes_t, img, oj, oi, ofm, mj, mi, ofhp, ofwp, nOfm, Mh, Mw) = 0.0f; for (ifm = 0; ifm < nIfm; ++ifm) { for (kj = 0; kj < kh; ++kj) { /*if(ij+kj < 0 || ij+kj >= ifh) continue;*/ for (ki = 0; ki < kw; ++ki) { /*if(ii+ki < 0 || ii+ki >= ifw) continue;*/ for (rk = 0; rk < RK; ++rk ) { LIBXSMM_VLA_ACCESS( 6, votes_t, img, oj, oi, ofm, mj, mi, ofhp, ofwp, nOfm, Mh, Mw) += LIBXSMM_VLA_ACCESS( 6, poses_t, img, ij+kj, ii+ki, ifm, mj, rk, ifhp, ifwp, nIfm, Mh, RK) * LIBXSMM_VLA_ACCESS( 6, filter_t, ifm, ofm, kj, ki, rk, mi, nOfm, kh, kw, RK, Mw); } } } } } } } } } } } LIBXSMM_INLINE void gemm_convcaps_fp(gemm_conv_t* param, const float* input, float* output, const float* filter, unsigned long long* aoff, unsigned long long* boff) { int nImg = param->nImg; int nBIfm = param->nBIfm; int nbIfm = param->nbIfm; int nBOfm = param->nBOfm; int nbOfm = param->nbOfm; int ifhp = param->ifhp; int ifwp = param->ifwp; int ofhp = param->ofhp; int ofwp = param->ofwp; int ofh = param->ofh; int pad_h = param->pad_h; int pad_h_in = param->pad_h_in; int pad_w_in = param->pad_w_in; int pad_h_out = param->pad_h_out; int pad_w_out = param->pad_w_out; int kh = param->kh; int kw = param->kw; int stride_h = param->stride_h; int RK = param->RK; int Mh = param->Mh; int Mw = param->Mw; unsigned long long brcount = param->brcount; /* loop counters */ int img, ofm1, ifm1, oj, ij, rk, mj, mi; LIBXSMM_VLA_DECL(7, float, votes_t, output + (pad_w_out * ofwp + pad_h_out), nBOfm, Mh, Mw, ofhp, ofwp, nbOfm); LIBXSMM_VLA_DECL(7, const float, poses_t, input + (pad_w_in * ifwp + pad_h_in), nBIfm, Mh, RK, ifhp, ifwp, nbIfm); LIBXSMM_VLA_DECL(8, const float, filter_t, filter, nBIfm, Mw, RK, kh, kw, nbIfm, nbOfm); #if defined(_OPENMP) # pragma omp parallel for LIBXSMM_OPENMP_COLLAPSE(2) private(img, ofm1, ifm1, oj, ij, mj, mi, rk) #endif for (img = 0; img < nImg; ++img) { for (ofm1 = 0; ofm1 < nBOfm; ++ofm1) { for (mj = 0; mj < Mh; ++mj ) { for (mi = 0; mi < Mw; ++mi ) { for (ifm1 = 0; ifm1 < nBIfm; ++ifm1) { for (rk = 0; rk < RK; ++rk ) { for (oj = 0; oj < ofh; ++oj) { ij = oj * stride_h - pad_h; { libxsmm_gemm_param gemm_param; gemm_param.a.primary = (void*)&LIBXSMM_VLA_ACCESS(8, filter_t, ofm1, ifm1, mi, rk, 0, 0, 0, 0, nBIfm, Mw, RK, kh, kw, nbIfm, nbOfm); gemm_param.a.secondary = aoff; gemm_param.b.primary = (void*)&LIBXSMM_VLA_ACCESS(7, poses_t, img, ifm1, mj, rk, ij, 0, 0, nBIfm, Mh, RK, ifhp, ifwp, nbIfm); gemm_param.b.secondary = boff; gemm_param.c.primary = &LIBXSMM_VLA_ACCESS(7, votes_t, img, ofm1, mj, mi, oj, 0, 0, nBOfm, Mh, Mw, ofhp, ofwp, nbOfm); gemm_param.op.tertiary = &brcount; if ( rk == 0 && ifm1 == 0 ) { fwd_brgemmz( &gemm_param ); } else { fwd_brgemma( &gemm_param ); } } } } } } } } } } LIBXSMM_INLINE void gemm_convcaps_fp_bf16(gemm_conv_t* param, const libxsmm_bfloat16* input, libxsmm_bfloat16* output, const libxsmm_bfloat16* filter, unsigned long long* aoff, unsigned long long* boff) { int nImg = param->nImg; int nBIfm = param->nBIfm; int nbIfm = param->nbIfm; int nBOfm = param->nBOfm; int nbOfm = param->nbOfm; int nlpb = param->nlpb; int ifhp = param->ifhp; int ifwp = param->ifwp; int ofhp = param->ofhp; int ofwp = param->ofwp; int ofh = param->ofh; int pad_h = param->pad_h; int pad_h_in = param->pad_h_in; int pad_w_in = param->pad_w_in; int pad_h_out = param->pad_h_out; int pad_w_out = param->pad_w_out; int kh = param->kh; int kw = param->kw; int stride_h = param->stride_h; int RK = param->RK; int Mh = param->Mh; int Mw = param->Mw; unsigned long long brcount = param->brcount; /* loop counters */ int img, ofm1, ifm1, oj, ij, rk, mj, mi; LIBXSMM_VLA_DECL(7, libxsmm_bfloat16, votes_t, output + (pad_w_out * ofwp + pad_h_out), nBOfm, Mh, Mw, ofhp, ofwp, nbOfm); LIBXSMM_VLA_DECL(7, const libxsmm_bfloat16, poses_t, input + (pad_w_in * ifwp + pad_h_in), nBIfm, Mh, RK, ifhp, ifwp, nbIfm); LIBXSMM_VLA_DECL(9, const libxsmm_bfloat16, filter_t, filter, nBIfm, Mw, RK, kh, kw, nbIfm/nlpb, nbOfm, nlpb); #if defined(_OPENMP) # pragma omp parallel for LIBXSMM_OPENMP_COLLAPSE(2) private(img, ofm1, ifm1, oj, ij, mj, mi, rk) #endif for (img = 0; img < nImg; ++img) { for (ofm1 = 0; ofm1 < nBOfm; ++ofm1) { for (mj = 0; mj < Mh; ++mj ) { for (mi = 0; mi < Mw; ++mi ) { for (ifm1 = 0; ifm1 < nBIfm; ++ifm1) { for (rk = 0; rk < RK; ++rk ) { for (oj = 0; oj < ofh; ++oj) { ij = oj * stride_h - pad_h; { libxsmm_gemm_param gemm_param; gemm_param.a.primary = (void*)&LIBXSMM_VLA_ACCESS(9, filter_t, ofm1, ifm1, mi, rk, 0, 0, 0, 0, 0, nBIfm, Mw, RK, kh, kw, nbIfm/nlpb, nbOfm, nlpb); gemm_param.a.secondary = aoff; gemm_param.b.primary = (void*)&LIBXSMM_VLA_ACCESS(7, poses_t, img, ifm1, mj, rk, ij, 0, 0, nBIfm, Mh, RK, ifhp, ifwp, nbIfm); gemm_param.b.secondary = boff; gemm_param.c.primary = &LIBXSMM_VLA_ACCESS(7, votes_t, img, ofm1, mj, mi, oj, 0, 0, nBOfm, Mh, Mw, ofhp, ofwp, nbOfm); gemm_param.op.tertiary = &brcount; if ( rk == 0 && ifm1 == 0 ) { fwd_brgemmz( &gemm_param ); } else { fwd_brgemma( &gemm_param ); } } } } } } } } } } LIBXSMM_INLINE void compute_broff(gemm_conv_t* param, unsigned long long* aoff, unsigned long long* boff) { int nbIfm = param->nbIfm; int nbOfm = param->nbOfm; int ifwp = param->ifwp; int kh = param->kh; int kw = param->kw; /* loop counters */ int kj, ki, i; i = 0; for (kj = 0; kj < kh; ++kj) { for (ki = 0; ki < kw; ++ki) { aoff[i] = (kj*(kw*nbIfm*nbOfm) + ki*(nbIfm*nbOfm))*sizeof(float); boff[i] = (kj*(ifwp*nbIfm) + ki*(nbIfm))*sizeof(float); i++; } } } LIBXSMM_INLINE void compute_broff_bf16(gemm_conv_t* param, unsigned long long* aoff, unsigned long long* boff) { int nbIfm = param->nbIfm; int nbOfm = param->nbOfm; int ifwp = param->ifwp; int kh = param->kh; int kw = param->kw; /* loop counters */ int kj, ki, i; i = 0; for (kj = 0; kj < kh; ++kj) { for (ki = 0; ki < kw; ++ki) { aoff[i] = (kj*(kw*nbIfm*nbOfm) + ki*(nbIfm*nbOfm))*sizeof(libxsmm_bfloat16); boff[i] = (kj*(ifwp*nbIfm) + ki*(nbIfm))*sizeof(libxsmm_bfloat16); i++; } } } int main(int argc, char* argv[]) { float *naive_input, *naive_output, *naive_filter; libxsmm_bfloat16 *naive_input_bf16, *naive_output_bf16, *naive_filter_bf16; float *gemm_input, *gemm_output, *gemm_filter; libxsmm_bfloat16 *gemm_input_bf16, *gemm_output_bf16, *gemm_filter_bf16; float *check_output; libxsmm_bfloat16 *check_output_bf16; unsigned long long *aoff, *boff; int ifhp, ifwp, ofhp, ofwp, ofh, ofw; int stride_h, stride_w, pad_h_in, pad_w_in, pad_h_out, pad_w_out; int ldx; int brcount; libxsmm_gemm_shape l_shape; libxsmm_gemm_batch_reduce_config l_brconfig; libxsmm_bitfield l_flags = LIBXSMM_GEMM_FLAGS('N', 'N'); libxsmm_bitfield l_prefetch_flags = LIBXSMM_PREFETCH_NONE; naive_conv_t naive_param; gemm_conv_t gemm_param; correctness_t norms_fwd; /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int iters = 100; /* repetitions of benchmark */ int ifw = 16; /* input width, "W" */ int ifh = 16; /* input height, "H" */ int nImg = 128; /* mini-batch size, "N" */ int nIfm = 128; /* number of input feature maps, "C" */ int nOfm = 256; /* number of output feature maps, "K" */ int kh = 3; /* filter height, "R" */ int kw = 3; /* filter width, "S" */ int pad_h = 0; /* padding in output */ int pad_w = 0; /* padding in output */ int stride = 2; /* stride when accessing inputs */ int Mh = 4; int Mw = 4; int RK = 4; char type = 'F'; /* 'A': ALL, 'F': FP, 'B': BP, 'U', WU */ #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double flops = 0.0; int i; memset(&norms_fwd, 0, sizeof(norms_fwd)); naive_input = NULL; naive_output = NULL; naive_filter = NULL; naive_input_bf16 = NULL; naive_output_bf16 = NULL; naive_filter_bf16 = NULL; gemm_input = NULL; gemm_output = NULL; gemm_filter = NULL; gemm_input_bf16 = NULL; gemm_output_bf16 = NULL; gemm_filter_bf16 = NULL; check_output = NULL; check_output_bf16 = NULL; if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("\n\n\nUsage: %s iters H W N C K R S pad stride type(F,B,U,A)\n\n\n", argv[0]); return -1; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; if (argc > i) iters = atoi(argv[i++]); if (argc > i) ifw = atoi(argv[i++]); if (argc > i) ifh = atoi(argv[i++]); if (argc > i) nImg = atoi(argv[i++]); if (argc > i) nIfm = atoi(argv[i++]); if (argc > i) nOfm = atoi(argv[i++]); if (argc > i) kw = atoi(argv[i++]); if (argc > i) kh = atoi(argv[i++]); if (argc > i) pad_w = atoi(argv[i++]); if (argc > i) pad_h = atoi(argv[i++]); if (argc > i) stride = atoi(argv[i++]); if (argc > i) RK = atoi(argv[i++]); if (argc > i) Mw = atoi(argv[i++]); if (argc > i) Mh = atoi(argv[i++]); if (argc > i) type = *(argv[i++]); /* apply stride in both dimensions */ stride_w = stride; stride_h = stride; /* handle physical padding */ #ifdef USE_PHYSICAL_PADDING #error "physical padding is not supported right now!" pad_h_in = pad_h; pad_w_in = pad_w; pad_h_out = 0; pad_w_out = 0; #else pad_h_in = 0; pad_w_in = 0; pad_h_out = 0; pad_w_out = 0; #endif /* deriving some values image size */ ofh = (ifh + 2 * pad_h - kh) / stride_h + 1; ofw = (ifw + 2 * pad_w - kw) / stride_w + 1; ifhp = ifh + 2 * pad_h_in; ifwp = ifw + 2 * pad_w_in; ofhp = ofh + 2 * pad_h_out; ofwp = ofw + 2 * pad_w_out; /* set struct for naive convolution */ naive_param.nImg = nImg; naive_param.nIfm = nIfm; naive_param.nOfm = nOfm; naive_param.ifhp = ifhp; naive_param.ifwp = ifwp; naive_param.ofhp = ofhp; naive_param.ofwp = ofwp; naive_param.ifh = ifh; naive_param.ifw = ifw; naive_param.ofh = ofh; naive_param.ofw = ofw; naive_param.pad_h = pad_h; naive_param.pad_w = pad_w; naive_param.pad_h_in = pad_h_in; naive_param.pad_w_in = pad_w_in; naive_param.pad_h_out = pad_h_out; naive_param.pad_w_out = pad_w_out; naive_param.kh = kh; naive_param.kw = kw; naive_param.stride_h = stride_h; naive_param.stride_w = stride_w; naive_param.RK = RK; naive_param.Mh = Mh; naive_param.Mw = Mw; /* set struct for naive convolution */ gemm_param.nImg = nImg; gemm_param.nBIfm = nIfm/CHANNEL_BLOCKING; gemm_param.nbIfm = CHANNEL_BLOCKING; gemm_param.nBOfm = nOfm/CHANNEL_BLOCKING; gemm_param.nbOfm = CHANNEL_BLOCKING; if (prec_bf16 == 0 ) { gemm_param.nlpb = 1; } else { gemm_param.nlpb = LP_BLOCKING; } gemm_param.ifhp = ifhp; gemm_param.ifwp = ifwp; gemm_param.ofhp = ofhp; gemm_param.ofwp = ofwp; gemm_param.ifh = ifh; gemm_param.ifw = ifw; gemm_param.ofh = ofh; gemm_param.ofw = ofw; gemm_param.pad_h = pad_h; gemm_param.pad_w = pad_w; gemm_param.pad_h_in = pad_h_in; gemm_param.pad_w_in = pad_w_in; gemm_param.pad_h_out = pad_h_out; gemm_param.pad_w_out = pad_w_out; gemm_param.kh = kh; gemm_param.kw = kw; gemm_param.stride_h = stride_h; gemm_param.stride_w = stride_w; gemm_param.RK = RK; gemm_param.Mh = Mh; gemm_param.Mw = Mw; /* compute brcount */ brcount = kh*kw; gemm_param.brcount = brcount; /* some empty lines at the beginning */ printf("\n\n\n"); /* print some summary */ printf("##########################################\n"); printf("# Setting Up #\n"); printf("##########################################\n"); printf("PARAMS: W:%d H:%d N:%d C:%d K:%d R:%d S:%d P:%d Q:%d STRIDE: %d RK: %d Mh: %d Mw: %d\n", ifw, ifh, nImg, nIfm, nOfm, kw, kh, ofh, ofw, stride, RK, Mh, Mw); printf("PARAMS: ITERS:%d Threads:%d\n", iters, nThreads); printf(" InImg %dx%d Padded (%dx%d)\n", ifh, ifw, ifhp, ifwp); printf("OutImg %dx%d Padded (%dx%d)\n", ofh, ofw, ofhp, ofwp); printf("SIZE Poses (MB): %10.2f MiB\n", (double)(nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float))/(1024.0*1024.0) ); printf("SIZE Votes (MB): %10.2f MiB\n", (double)(nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float))/(1024.0*1024.0) ); printf("SIZE Poses (1): %10.2f MiB\n", (double)(1*nIfm*ifhp*ifwp*Mh*RK* sizeof(float))/(1024.0*1024.0) ); printf("SIZE Votes (1): %10.2f MiB\n", (double)(1*nOfm*ofhp*ofwp*Mh*Mw* sizeof(float))/(1024.0*1024.0) ); printf("SIZE Weight : %10.2f MiB\n", (double)(nIfm*nOfm*kw*kh*Mw*RK* sizeof(float))/(1024.0*1024.0) ); /* check for pass to run */ if (type != 'A' && type != 'F' && type != 'B' && type != 'U') { printf("\ntype needs to be 'A' (All), 'F' (FP only), 'B' (BP only), 'U' (WU only)\n\n\n"); return -1; } if ((nIfm % CHANNEL_BLOCKING != 0) || (nOfm % CHANNEL_BLOCKING != 0) ) { printf("\nThis code only works for ofm/ifm mod %i = 0!\n\n\n", CHANNEL_BLOCKING); return -1; } if (pad_w !=0 || pad_h !=0 || pad_h_in != 0 || pad_w_in != 0 || pad_h_out !=0 || pad_w_out != 0) { printf("\nThis code doesn't support padding right now\n!"); return -1; } /* apply stride in both dimensions */ /* JIT GEMM kernel */ ldx = stride_w*CHANNEL_BLOCKING; if ( prec_bf16 == 0 ) { l_shape = libxsmm_create_gemm_shape( CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, CHANNEL_BLOCKING, ldx, CHANNEL_BLOCKING, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 ); } else { l_flags |= LIBXSMM_GEMM_FLAG_VNNI_A; l_shape = libxsmm_create_gemm_shape( CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, CHANNEL_BLOCKING, ldx, CHANNEL_BLOCKING, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32 ); } l_brconfig = libxsmm_create_gemm_batch_reduce_config( LIBXSMM_GEMM_BATCH_REDUCE_OFFSET, 0, 0, brcount ); fwd_brgemma = libxsmm_dispatch_brgemm_v2( l_shape, l_flags, l_prefetch_flags, l_brconfig ); l_flags |= LIBXSMM_GEMM_FLAG_BETA_0; fwd_brgemmz = libxsmm_dispatch_brgemm_v2( l_shape, l_flags, l_prefetch_flags, l_brconfig ); printf("BRGEMM FWD col-major: m=%d, n=%d, k=%d, lda=%d, ldb=%d, ldc=%d, transa='n', transb='n', alpha=1.0, beta=1.0, brcount=%d\n", CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, CHANNEL_BLOCKING, stride_w*CHANNEL_BLOCKING, CHANNEL_BLOCKING, brcount); /* allocate data */ naive_input = (float*)libxsmm_aligned_malloc( nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float), 2097152); naive_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152); naive_filter = (float*)libxsmm_aligned_malloc( nOfm*nIfm*kh*kw*Mw*RK* sizeof(float), 2097152); if (prec_bf16 == 0) { gemm_input = (float*)libxsmm_aligned_malloc( nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float), 2097152); gemm_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152); gemm_filter = (float*)libxsmm_aligned_malloc( nOfm*nIfm*kh*kw*Mw*RK* sizeof(float), 2097152); } else { naive_input_bf16 = (libxsmm_bfloat16*)libxsmm_aligned_malloc( nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(libxsmm_bfloat16), 2097152); naive_output_bf16 = (libxsmm_bfloat16*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(libxsmm_bfloat16), 2097152); naive_filter_bf16 = (libxsmm_bfloat16*)libxsmm_aligned_malloc( nOfm*nIfm*kh*kw*Mw*RK* sizeof(libxsmm_bfloat16), 2097152); gemm_input_bf16 = (libxsmm_bfloat16*)libxsmm_aligned_malloc( nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(libxsmm_bfloat16), 2097152); gemm_output_bf16 = (libxsmm_bfloat16*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(libxsmm_bfloat16), 2097152); gemm_filter_bf16 = (libxsmm_bfloat16*)libxsmm_aligned_malloc( nOfm*nIfm*kh*kw*Mw*RK* sizeof(libxsmm_bfloat16), 2097152); check_output_bf16 = (libxsmm_bfloat16*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(libxsmm_bfloat16), 2097152); } check_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152); aoff = (unsigned long long*)libxsmm_aligned_malloc( brcount*sizeof(unsigned long long), 2097152); boff = (unsigned long long*)libxsmm_aligned_malloc( brcount*sizeof(unsigned long long), 2097152); /* initialize data */ init_buf(naive_input, nImg*nIfm*ifhp*ifwp*Mh*RK, 0, 0); set_zeropad_nchw(naive_input, nImg, nIfm, ifhp, ifwp, Mh, RK, pad_h_in, pad_w_in); init_buf(naive_filter, nOfm*nIfm*kh*kw*Mw*RK, 0, 0); zero_buf(naive_output, nImg*nOfm*ofhp*ofwp*Mw*Mh); if (prec_bf16 == 0) { /* copy data into GEMM optimized format */ copy_naiveP_to_GEMM(naive_input, gemm_input, nImg, ifhp, ifwp, nIfm, Mh, RK); copy_naiveF_to_GEMM(naive_filter, gemm_filter, kh, kw, nIfm, nOfm, RK, Mw); zero_buf(gemm_output, nImg*nOfm*ofhp*ofwp*Mw*Mh); /* compute BRGEMM offsets */ compute_broff( &gemm_param, aoff, boff ); } else { /* copy data to bf16 */ libxsmm_rne_convert_fp32_bf16( naive_input, naive_input_bf16, nImg*nIfm*ifhp*ifwp*Mh*RK ); libxsmm_rne_convert_fp32_bf16( naive_filter, naive_filter_bf16, nOfm*nIfm*kh*kw*Mw*RK ); /* copy data into GEMM optimized format */ copy_naiveP_to_GEMM_bf16(naive_input_bf16, gemm_input_bf16, nImg, ifhp, ifwp, nIfm, Mh, RK); copy_naiveF_to_GEMM_bf16(naive_filter_bf16, gemm_filter_bf16, kh, kw, nIfm, nOfm, RK, Mw); zero_buf_bf16(gemm_output_bf16, nImg*nOfm*ofhp*ofwp*Mw*Mh); /* compute BRGEMM offsets */ compute_broff_bf16( &gemm_param, aoff, boff ); } /* check correctness forward */ if (type == 'A' || type == 'F') { printf("##########################################\n"); printf("# Correctness - FWD (custom-Storage) #\n"); printf("##########################################\n"); /* run naive convolution */ naive_convcaps_fp(&naive_param, naive_input, naive_output, naive_filter); if (prec_bf16 == 0) { gemm_convcaps_fp(&gemm_param, gemm_input, gemm_output, gemm_filter, aoff, boff); copy_GEMM_to_naiveV(gemm_output, check_output, nImg, ofhp, ofwp, nOfm, Mh, Mw); } else { gemm_convcaps_fp_bf16(&gemm_param, gemm_input_bf16, gemm_output_bf16, gemm_filter_bf16, aoff, boff); copy_GEMM_to_naiveV_bf16(gemm_output_bf16, check_output_bf16, nImg, ofhp, ofwp, nOfm, Mh, Mw); /* copy data to FP32 */ libxsmm_convert_bf16_f32( check_output_bf16, check_output, nImg*nOfm*ofhp*ofwp*Mh*Mw ); } /* compare */ compare_buf(naive_output, check_output, nImg*nOfm*ofhp*ofwp*Mh*Mw, &norms_fwd); printf(" 1-norm of reference: %f\n", norms_fwd.one_norm_ref); printf(" 1-norm of GEMM-code: %f\n", norms_fwd.one_norm_test); printf(" L2-error-norm of GEMM-code: %f\n", norms_fwd.l2_rel_err); printf(" inf-norm of comp. rel. error: %f\n", norms_fwd.max_rel_err); printf(" inf-norm of comp. abs. error: %f\n", norms_fwd.max_abs_err); } /* benchmark forward */ if (type == 'A' || type == 'F') { printf("##########################################\n"); printf("# Performance - FWD (custom-Storage) #\n"); printf("##########################################\n"); /* run LIBXSMM convolution for performance */ l_start = libxsmm_timer_tick(); for (i = 0; i < iters; ++i) { if (prec_bf16 == 0) { gemm_convcaps_fp(&gemm_param, gemm_input, gemm_output, gemm_filter, aoff, boff); } else { gemm_convcaps_fp_bf16(&gemm_param, gemm_input_bf16, gemm_output_bf16, gemm_filter_bf16, aoff, boff); } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); flops = (double)nImg * (double)nIfm * (double)nOfm * (double)ofh * (double)ofw * (double)(2 * kh * kw) * (double)RK * (double)Mh * (double)Mw * (double)iters; printf("GFLOP = %.5g\n", flops*1e-9/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", (flops*1e-9)/l_total); printf("PERFDUMP,FP,%s,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%.5g,%.5g,%f,%f,%f,%f,%f\n", LIBXSMM_VERSION, nThreads, nImg, nIfm, nOfm, ifw, ifh, kw, kh, stride, pad_h, pad_w, RK, Mh, Mw, ((double)(l_total/iters)), (flops*1e-9)/l_total, norms_fwd.max_rel_err, norms_fwd.max_abs_err, norms_fwd.l2_rel_err, norms_fwd.one_norm_ref, norms_fwd.one_norm_test ); } /* deallocate data */ libxsmm_free(naive_input); libxsmm_free(naive_output); libxsmm_free(naive_filter); if (prec_bf16 == 0) { libxsmm_free(gemm_input); libxsmm_free(gemm_output); libxsmm_free(gemm_filter); } else { libxsmm_free(naive_input_bf16); libxsmm_free(naive_output_bf16); libxsmm_free(naive_filter_bf16); libxsmm_free(gemm_input_bf16); libxsmm_free(gemm_output_bf16); libxsmm_free(gemm_filter_bf16); libxsmm_free(check_output_bf16); } libxsmm_free(check_output); libxsmm_free(aoff); libxsmm_free(boff); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
mandel_par.c
/* ** PROGRAM: Mandelbrot area (solution) ** ** PURPOSE: Program to compute the area of a Mandelbrot set. ** The correct answer should be around 1.510659. ** ** USAGE: Program runs without input ... just run the executable ** ** ADDITIONAL EXERCISES: Experiment with the schedule clause to fix ** the load imbalance. Experiment with atomic vs. critical vs. ** reduction for numoutside. ** ** HISTORY: Written: (Mark Bull, August 2011). ** ** Changed "complex" to "d_complex" to avoid collsion with ** math.h complex type. Fixed data environment errors ** (Tim Mattson, September 2011) ** ** Changed "atomic" to "critical" to match Common Core ** (Helen He, November 2020) */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> # define NPOINTS 1000 # define MXITR 1000 struct d_complex { double r; double i; }; void testpoint(struct d_complex); struct d_complex c; int numoutside = 0; int main () { int i, j; double area, error, eps = 1.0e-5; // Loop over grid of points in the complex plane which contains the Mandelbrot set, // testing each point to see whether it is inside or outside the set. double initTime = omp_get_wtime(); #pragma omp parallel for private(c) firstprivate(eps) collapse(2) schedule(dynamic,100) for (i = 0; i < NPOINTS; i++) { for (j = 0; j < NPOINTS; j++) { c.r = -2.0 + 2.5 * (double)(i)/(double)(NPOINTS) + eps; c.i = 1.125 * (double)(j)/(double)(NPOINTS) + eps; testpoint(c); } } // Calculate area of set and error estimate and output the results area = 2.0 * 2.5 * 1.125 * (double)(NPOINTS * NPOINTS \ - numoutside)/(double)(NPOINTS * NPOINTS); error = area / (double)NPOINTS; double runtime = omp_get_wtime() - initTime; printf("runtime = %lf seconds with %d threads\n",runtime, omp_get_num_threads()); printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n",area,error); printf("Correct answer should be around 1.510659\n"); } void testpoint(struct d_complex c) { // Does the iteration z=z*z+c, until |z| > 2 when point is known to be outside set // If loop count reaches MAXITER, point is considered to be inside the set struct d_complex z; int iter; double temp; z = c; for (iter = 0; iter < MXITR; iter++) { temp = (z.r * z.r) - (z.i * z.i) + c.r; z.i = z.r * z.i * 2 + c.i; z.r = temp; if ((z.r * z.r + z.i * z.i) > 4.0) { #pragma omp critical numoutside++; break; } } }
MLDR.h
#include <complex> #include <algorithm> class MLDR { int cnt = 0; int ref_cnt = 1; private: int nShift; int fftSize; int nFreq; int nOverlap; int nChannel; const double min_pos = 1e-32; const double eps = 1e-16; int power_method_iter; // 10->1 double gamma; // forgetting factor for inverse matrix double gammaPre, gammaPost; int initFrame; double alpha; // smoothing factor for phiY double delta_MLDR; // diagonal loading for inverse matrix double epsi_MLDR; // floor value for lambdaY double delta_SVE; double theta_SVE; double epsi_SVE; double *phiY; double **W, **XPsi, **WNumer; double ***numer, ***Rx, ***PsiRx, ***Psi, ***PsiTmp; double* YPred, *denom, *WDenom; // nFreq(complex) double* lambdaY, * lambdaYInv;//nFreq(double) double* normRxTmp; double*** Rn; double*** RxTmp, *** RnTmp, ***XFrameTmp,*** RxTmp2, *** RnTmp2,***eigTmp ; //Nfreq*Nch*Nch(complex) double**maxEigVec, ** st,**eigVecTmp; //Nch*Nfreq(complex) double*LambdaX,*LambdaN, *PhiYHat,*tmpRe,*tmpIm; //Nfreq(real) //MLDR v2 double*** Phi, *** PhiRxPhi, *** PhiHat, *** PhiHatTmp, *** PhiRxPhiTmp2;//nFreq*Nch*NCh(complex) double* XPhiX, * YHat;//nFreq (complex) double* PsiY, * PsiYHat; //nFreq(double) double** XPhiXTmp, ** WDenomTmp, ** PhiRxPhiTmp; //nFreq * ch (complex) //SVE v7 double*** RsTmp; int* RsIdx; double *SHat; double **steerVector; int frame; public: inline MLDR(int fftSize_, int nChannel_, // See https://github.com/kooBH/MLDR/issues/1 for parameters history /** 20201203 **/ // double gammaPre_ = 0.3, // double gammaPost_ = 0.995, // int initFrame_ = 0, // double alpha_MLDR_ = 0.2, // double delta_MLDR_ = 1, // double epsi_MLDR_ = 1e-3, // double delta_SVE_ = 1, // double epsi_SVE_ = 1e-3, // double theta_SVE_ = 0.995, // double power_method_iter_ = 1 // /** 20210304 **/ double gammaPre_=0.995, double gammaPost_=0.995, int initFrame_=0, double alpha_MLDR_=0.2, double delta_MLDR_=1e-2, double epsi_MLDR_=1e-6, double delta_SVE_=0.0, double epsi_SVE_=1e-6, double theta_SVE_=0.995, double power_method_iter_=1 ); inline ~MLDR(); inline void Process(double** X); inline void Process(double** X,int target_channels); // initial inline void SVE_routine(double** X, double* SHat, double** steerVector, int freq); // update 20210303 inline void SVE_routine_v7(double** X, double* SHat, double** steerVector, int freq); // initial inline void MLDR_routine(double** X, double* SHat, double** steerVector,int freq); // reimpelemnted inline void MLDR_routine_v2(double** X, double* SHat, double** steerVector, int freq); inline void Clear(); }; inline MLDR::MLDR(int fftSize_, int nChannel_, double gammaPre_, double gammaPost_, int initFrame_, double alpha_MLDR_, double delta_MLDR_, double epsi_MLDR_, double delta_SVE_,double epsi_SVE_,double theta_SVE_,double power_method_iter_) { int channel, channel2, sample, freq; fftSize = fftSize_; nFreq = fftSize / 2 + 1; nChannel = nChannel_; gamma = gammaPost_; gammaPre = gammaPre_; gammaPost = gammaPost_; initFrame = initFrame_; alpha = alpha_MLDR_; delta_MLDR = delta_MLDR_; epsi_MLDR = epsi_MLDR_; delta_SVE = delta_SVE_; epsi_SVE = epsi_SVE_; theta_SVE = theta_SVE_; power_method_iter = power_method_iter_; frame = 0; YPred = new double[nFreq * 2]; memset(YPred, 0, sizeof(double) * nFreq * 2); lambdaY = new double[nFreq]; memset(lambdaY, 0, sizeof(double) * nFreq); lambdaYInv = new double[nFreq]; memset(lambdaYInv, 0, sizeof(double) * nFreq); phiY = new double[nFreq]; memset(phiY, 0, (nFreq) * sizeof(double)); PhiYHat = new double[nFreq]; memset(PhiYHat, 0, sizeof(double) * nFreq); W = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { W[channel] = new double[nFreq * 2]; memset(W[channel], 0, sizeof(double) * (nFreq * 2)); } WNumer = new double* [nFreq*2]; for (freq = 0; freq < nFreq * 2; freq++) { WNumer[freq] = new double[nChannel]; memset(WNumer[freq], 0, sizeof(double) * nChannel); } XPsi = new double* [nFreq *2]; for (freq = 0; freq < nFreq * 2; freq++) { XPsi[freq] = new double[nChannel]; memset(XPsi[freq], 0, sizeof(double) * nChannel); } PsiRx = new double** [nFreq *2]; for (freq = 0; freq < nFreq*2; freq++) { PsiRx[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { PsiRx[freq][channel] = new double[nChannel]; memset(PsiRx[freq][channel], 0, sizeof(double) * nChannel); } } numer = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { numer[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { numer[freq][channel] = new double[nChannel]; memset(numer[freq][channel], 0, sizeof(double) * nChannel); } } PsiTmp = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { PsiTmp[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { PsiTmp[freq][channel] = new double[nChannel]; memset(PsiTmp[freq][channel], 0, sizeof(double) * nChannel); } } normRxTmp = new double[nFreq]; // NOTE // Rx = repmat(deltaSt*eye(nSensor),[1 1 nFreq]); // Rn = repmat(deltaSt*eye(nSensor),[1 1 nFreq]); //================================================ Rx = new double** [nFreq*2]; for (freq = 0; freq < nFreq*2; freq++) { Rx[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { Rx[freq][channel] = new double[nChannel]; memset(Rx[freq][channel], 0, (nChannel) * sizeof(double)); for (channel2 = 0; channel2 < nChannel; channel2++) { if (channel == channel2) { if(freq%2==0) Rx[freq][channel][channel2] = delta_SVE; } } } } Rn = new double** [nFreq*2]; for (freq = 0; freq < nFreq*2; freq++) { Rn[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { Rn[freq][channel] = new double[nChannel]; memset(Rn[freq][channel], 0, (nChannel) * sizeof(double)); for (channel2 = 0; channel2 < nChannel; channel2++) { if (channel == channel2) { if (freq % 2 == 0) Rn[freq][channel][channel2] = delta_SVE; } } } } denom = new double[nFreq]; memset(denom, 0, sizeof(double) * nFreq); WDenom = new double[nFreq]; memset(WDenom, 0, sizeof(double) * nFreq); Psi = new double**[fftSize + 2]; for (freq = 0; freq < fftSize + 2; freq++) { Psi[freq] = new double*[nChannel]; for (channel = 0; channel < nChannel; channel++){ Psi[freq][channel] = new double[nChannel]; memset(Psi[freq][channel], 0, (nChannel) * sizeof(double)); if (freq % 2 == 0) { Psi[freq][channel][channel] = delta_MLDR; } } } RxTmp = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { RxTmp[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { RxTmp[freq][channel] = new double[nChannel]; memset(RxTmp[freq][channel], 0, sizeof(double) * nChannel); } } RnTmp = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { RnTmp[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { RnTmp[freq][channel] = new double[nChannel]; memset(RnTmp[freq][channel], 0, sizeof(double)* nChannel); } } // maxEigVec = ones(nSensor, nFreq)./nSensor; maxEigVec = new double*[nFreq * 2]; for (freq = 0; freq < nFreq; freq++) { maxEigVec[freq+ freq] = new double[nChannel]; maxEigVec[freq+ freq+1] = new double[nChannel]; for (channel = 0; channel < nChannel; channel++) { maxEigVec[freq+ freq][channel] = 1.0/nChannel; maxEigVec[freq+ freq+1][channel] = 0.0; } } eigVecTmp = new double* [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { eigVecTmp[freq] = new double[nChannel]; } st = new double* [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { st[freq] = new double[nChannel]; memset(st[freq], 0, sizeof(double) * nChannel); } XFrameTmp = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { XFrameTmp[freq] = new double* [nChannel]; printf(""); for (channel = 0; channel < nChannel; channel++) { XFrameTmp[freq][channel] = new double[nChannel]; memset(XFrameTmp[freq][channel], 0, sizeof(double)); } } RxTmp2 = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { RxTmp2[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { RxTmp2[freq][channel] = new double[nChannel]; memset(RxTmp2[freq][channel], 0, sizeof(double) * nChannel); } } RnTmp2 = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { RnTmp2[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { RnTmp2[freq][channel] = new double[nChannel]; memset(RnTmp2[freq][channel], 0, sizeof(double) * nChannel); } printf(""); } eigTmp = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { eigTmp[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { eigTmp[freq][channel] = new double[nChannel]; memset(eigTmp[freq][channel], 0, sizeof(double) * nChannel); } } LambdaX = new double[nFreq]; memset(LambdaX, 0, sizeof(double)* nFreq); LambdaN = new double[nFreq]; memset(LambdaN, 0, sizeof(double)* nFreq); tmpRe = new double[nFreq]; tmpIm = new double[nFreq]; SHat = new double [fftSize+2]; memset(SHat,0,sizeof(double)*(fftSize+2)); steerVector = new double*[nChannel]; for(channel=0;channel <nChannel;channel++){ steerVector[channel] = new double[nFreq *2]; for(freq=0;freq<nFreq;freq++){ steerVector[channel][freq+freq]=1.0/nChannel; steerVector[channel][freq+freq+1]=0.0; } } //MLDR v2 //double *** Phi,*** PhiRxPhi,*** PhiHat, *** PhiHatTmp;//nFreq*Nch*NCh(complex) PhiRxPhiTmp2 = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { PhiRxPhiTmp2[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { PhiRxPhiTmp2[freq][channel] = new double[nChannel]; memset(PhiRxPhiTmp2[freq][channel], 0, sizeof(double) * nChannel); } } Phi = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { Phi[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { Phi[freq][channel] = new double[nChannel]; memset(Phi[freq][channel], 0, sizeof(double) * nChannel); for (channel2 = 0; channel2 < nChannel; channel2++) { if (channel == channel2) { if (freq % 2 == 0) Phi[freq][channel][channel2] = delta_MLDR; } } } } PhiRxPhi = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { PhiRxPhi[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { PhiRxPhi[freq][channel] = new double[nChannel]; memset(PhiRxPhi[freq][channel], 0, sizeof(double) * nChannel); } } PhiHat = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { PhiHat[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { PhiHat[freq][channel] = new double[nChannel]; memset(PhiHat[freq][channel], 0, sizeof(double) * nChannel); } } PhiHatTmp = new double** [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { PhiHatTmp[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { PhiHatTmp[freq][channel] = new double[nChannel]; memset(PhiHatTmp[freq][channel], 0, sizeof(double) * nChannel); } } // nFreq * ch PhiRxPhiTmp = new double* [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { PhiRxPhiTmp[freq] = new double[nChannel]; memset(PhiRxPhiTmp[freq], 0, sizeof(double) * nChannel); } XPhiXTmp = new double* [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { XPhiXTmp[freq] = new double[nChannel]; memset(XPhiXTmp[freq], 0, sizeof(double) * nChannel); } WDenomTmp = new double* [nFreq * 2]; for (freq = 0; freq < nFreq * 2; freq++) { WDenomTmp[freq] = new double[nChannel]; memset(WDenomTmp[freq], 0, sizeof(double) * nChannel); } //double* XPhiX, * YHat;//nFreq (complex) //double* PsiY, * PsiYHat;; //nFreq XPhiX = new double[nFreq * 2]; memset(XPhiX, 0, sizeof(double) * (nFreq * 2)); YHat = new double[nFreq * 2]; memset(YHat, 0, sizeof(double) * (nFreq * 2)); PsiY = new double[nFreq]; memset(PsiY, 0, sizeof(double) * (nFreq)); PsiYHat = new double[nFreq]; memset(PsiYHat, 0, sizeof(double) * (nFreq)); // SVE v7 RsTmp = new double** [nFreq]; for (freq = 0; freq < nFreq; freq++) { RsTmp[freq] = new double* [nChannel]; for (channel = 0; channel < nChannel; channel++) { RsTmp[freq][channel] = new double[nChannel]; memset(RsTmp[freq][channel], 0, sizeof(double) * nChannel); } } RsIdx = new int[nFreq]; memset(RsIdx, 0, sizeof(int) * nFreq); } inline MLDR::~MLDR() { int channel, freq; delete[] YPred; delete[] lambdaY; delete[] lambdaYInv; delete[] phiY; delete[] denom; delete[] WDenom; delete[] normRxTmp; for (freq = 0; freq < fftSize + 2; freq++) { for (channel = 0; channel < nChannel; channel++) { delete[] Psi[freq][channel]; } delete[] Psi[freq]; } delete[] Psi; delete[] PhiYHat; for (channel = 0; channel < nChannel; channel++){ delete[] W[channel]; } delete[] W; for (freq = 0; freq < nFreq*2; freq++) { delete[] WNumer[freq]; delete[] XPsi[freq]; } delete[] WNumer; delete[] XPsi; for (freq = 0; freq < nFreq*2; freq++) { for (channel = 0; channel < nChannel; channel++) { delete[] PsiRx[freq][channel]; delete[] numer[freq][channel]; delete[] PsiTmp[freq][channel]; } delete[] PsiRx[freq]; delete[] numer[freq]; delete[] PsiTmp[freq]; } delete[] PsiRx; delete[] numer; delete[] PsiTmp; for (freq = 0; freq < nFreq*2; freq++) { for (channel = 0; channel < nChannel; channel++) { delete[] RxTmp[freq][channel]; delete[] RnTmp[freq][channel]; delete[] XFrameTmp[freq][channel]; delete[] RxTmp2[freq][channel]; delete[] RnTmp2[freq][channel]; delete[] eigTmp[freq][channel]; } delete[] RxTmp[freq]; delete[] RnTmp[freq]; delete[] XFrameTmp[freq]; delete[] RxTmp2[freq]; delete[] RnTmp2[freq]; delete[] eigTmp[freq]; } delete[] RxTmp; delete[] RnTmp; delete[] XFrameTmp; delete[] RxTmp2; delete[] RnTmp2; delete[] eigTmp; for (freq = 0; freq < nFreq*2; freq++) { delete[] maxEigVec[freq]; delete[] st[freq]; delete[] eigVecTmp[freq]; } delete[] maxEigVec; delete[] st; delete[] eigVecTmp; for (freq = 0; freq < nFreq*2; freq++) { for (channel = 0; channel < nChannel; channel++) { delete[] Rx[freq][channel]; delete[] Rn[freq][channel]; } delete[] Rx[freq]; delete[] Rn[freq]; } delete[] Rx; delete[] Rn; delete[] LambdaN; delete[] LambdaX; delete[] tmpRe; delete[] tmpIm; delete[] SHat; for(channel=0;channel<nChannel;channel++) delete[] steerVector[channel]; delete[] steerVector; //ver 2 for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { delete[] Phi[freq][channel]; delete[] PhiRxPhi[freq][channel]; delete[] PhiHat[freq][channel]; delete[] PhiHatTmp[freq][channel]; delete[] PhiRxPhiTmp2[freq][channel]; } delete[] Phi[freq]; delete[] PhiRxPhi[freq]; delete[] PhiHat[freq]; delete[] PhiHatTmp[freq]; delete[] PhiRxPhiTmp2[freq]; } delete[] Phi; delete[] PhiRxPhi; delete[] PhiHat; delete[] PhiHatTmp; delete[] PhiRxPhiTmp2; delete[] XPhiX; delete[] YHat; delete[] PsiY; delete[] PsiYHat; for (freq = 0; freq < nFreq * 2; freq++) { delete[] XPhiXTmp[freq]; delete[] WDenomTmp[freq]; delete[] PhiRxPhiTmp[freq]; } delete[] XPhiXTmp; delete[] WDenomTmp; delete[] PhiRxPhiTmp; //ver 7 for (freq = 0; freq < nFreq; freq++) { for (channel = 0; channel < nChannel; channel++) { delete[] RsTmp[freq][channel]; } delete[] RsTmp[freq]; } delete[] RsTmp; delete[] RsIdx; } inline void MLDR::Process(double** X){ cnt++; if (frame < initFrame) gamma = gammaPre; else gamma = gammaPost; int freq; //#pragma omp parallel for schedule(static,32) for (freq = 0; freq < nFreq; freq++){ int channel, channel2, channel3, re, im; re = freq + freq; im = freq + freq + 1; //[M] YHat = W(:,freq)'*XFrame(:,freq); YPred[re] = 0.0; YPred[im] = 0.0; for (channel = 0; channel < nChannel; channel++){ YPred[re] += W[channel][re] * X[channel][re] + W[channel][im] * X[channel][im]; YPred[im] += W[channel][re] * X[channel][im] - W[channel][im] * X[channel][re]; } /********** steering vector estimation *********/ // SVE_routine(X, SHat, steerVector, freq); SVE_routine_v7(X, SHat, steerVector, freq); /********** MLDR beamforming *********/ //MLDR_routine(X, SHat, steerVector, freq); MLDR_routine_v2(X, SHat, steerVector, freq); X[0][re] = SHat[re]; X[0][im] = SHat[im]; } } inline void MLDR::Process(double** X, int target_channels){ int tmp = nChannel; nChannel = target_channels; Process(X); nChannel = tmp; } inline void MLDR::SVE_routine_v7(double** X, double* SHat, double** steerVector, int freq) { int channel, channel2, channel3, re, im; re = freq + freq; im = freq + freq + 1; //[M]PhiYHat = abs(YHat). ^ 2; PhiYHat[freq] = YPred[re] * YPred[re] + YPred[im] * YPred[im]; //[M]lambdaY = max(PhiYHat, epsiSt); lambdaY[freq] = std::max(PhiYHat[freq], epsi_SVE); //[M]lambdaYInv = 1 ./ lambdaY; lambdaYInv[freq] = 1.0 / (lambdaY[freq]); // XFrameTmp = (XFrame(:,freq)*XFrame(:,freq)') for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { XFrameTmp[re][channel][channel2] = X[channel][re] * X[channel2][re] + X[channel][im] * X[channel2][im]; XFrameTmp[im][channel][channel2] = -X[channel][re] * X[channel2][im] + X[channel][im] * X[channel2][re]; } } //RxTmp = Rx(:,:,freq) * LambdaX(freq) / (LambdaX(freq) + 1) + (XFrame(:,freq)*XFrame(:,freq)') / (LambdaX(freq) + 1); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { // real RxTmp[re][channel][channel2] = Rx[re][channel][channel2] * LambdaX[freq] / (LambdaX[freq] + 1.0) + XFrameTmp[re][channel][channel2] / (LambdaX[freq] + 1.0); // imag RxTmp[im][channel][channel2] = Rx[im][channel][channel2] * LambdaX[freq] / (LambdaX[freq] + 1.0) + XFrameTmp[im][channel][channel2] / (LambdaX[freq] + 1.0); } } // RnTmp = Rn(:,:,freq) * LambdaN(freq) / (LambdaN(freq) + lambdaYInv) + lambdaYInv.*(XFrame(:,freq) * XFrame(:,freq)') / (LambdaN(freq) + lambdaYInv); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { //real RnTmp[re][channel][channel2] = Rn[re][channel][channel2] * LambdaN[freq] / (LambdaN[freq] + lambdaYInv[freq]) + lambdaYInv[freq] * XFrameTmp[re][channel][channel2] / (LambdaN[freq] + lambdaYInv[freq]); //imag RnTmp[im][channel][channel2] = Rn[im][channel][channel2] * LambdaN[freq] / (LambdaN[freq] + lambdaYInv[freq]) + lambdaYInv[freq] * XFrameTmp[im][channel][channel2] / (LambdaN[freq] + lambdaYInv[freq]); } } // RxTmp = 0.5 * (RxTmp + RxTmp'); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { RxTmp2[re][channel][channel2] = RxTmp[re][channel][channel2] + RxTmp[re][channel2][channel]; RxTmp2[re][channel][channel2] *= 0.5; if (channel != channel2) { RxTmp2[im][channel][channel2] = RxTmp[im][channel][channel2] - RxTmp[im][channel2][channel]; RxTmp2[im][channel][channel2] *= 0.5; } else { RxTmp2[im][channel][channel2] = 0.0; } } } // RnTmp = 0.5 * (RnTmp + RnTmp'); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { if (channel != channel2) { RnTmp2[re][channel][channel2] = RnTmp[re][channel][channel2] + RnTmp[re][channel2][channel]; RnTmp2[re][channel][channel2] *= 0.5; RnTmp2[im][channel][channel2] = RnTmp[im][channel][channel2] - RnTmp[im][channel2][channel]; RnTmp2[im][channel][channel2] *= 0.5; } else { RnTmp2[re][channel][channel2] = RnTmp[re][channel][channel2]; RnTmp2[im][channel][channel2] = 0.0; } } } //norm(RxTmp) normRxTmp[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { normRxTmp[freq] += RxTmp2[re][channel][channel2] * RxTmp2[re][channel][channel2] + RxTmp2[im][channel][channel2] * RxTmp2[im][channel][channel2]; } } //if norm(RxTmp) ~= 0 % to prevent division by zero (20201213) if (normRxTmp[freq] != 0) { // RsTmp = RxTmp - theta*RnTmp; for (channel = 0; channel < nChannel; channel++) memset(RsTmp[freq][channel], 0, sizeof(double) * nChannel); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { RsTmp[freq][channel][channel2] = RxTmp2[re][channel][channel2] - theta_SVE * RnTmp2[re][channel][channel2]; } } // findIndex = find(diag(RsTmp) < 0); RsIdx[freq] = 0; for (channel = 0; channel < nChannel; channel++) { if (RsTmp[freq][channel][channel] < 0) RsIdx[freq]++; } // if sum(findIndex) == 0 if (RsIdx[freq] == 0) { // for iterPM = 1 : maxIterPM // maxEigVec(:,freq) = (RxTmp - theta*RnTmp)*maxEigVec(:,freq); // maxEigVec(:, freq) = maxEigVec(:, freq). / norm(maxEigVec(:, freq)); // end for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { eigTmp[re][channel][channel2] = RxTmp2[re][channel][channel2] - theta_SVE * RnTmp2[re][channel][channel2]; eigTmp[im][channel][channel2] = RxTmp2[im][channel][channel2] - theta_SVE * RnTmp2[im][channel][channel2]; } } for (int i = 0; i < power_method_iter; i++) { for (channel = 0; channel < nChannel; channel++) { tmpRe[freq] = 0.0; tmpIm[freq] = 0.0; for (channel2 = 0; channel2 < nChannel; channel2++) { tmpRe[freq] += eigTmp[re][channel][channel2] * maxEigVec[re][channel2] - eigTmp[im][channel][channel2] * maxEigVec[im][channel2]; tmpIm[freq] += eigTmp[im][channel][channel2] * maxEigVec[re][channel2] + eigTmp[re][channel][channel2] * maxEigVec[im][channel2]; } eigVecTmp[re][channel] = tmpRe[freq]; eigVecTmp[im][channel] = tmpIm[freq]; } for (channel = 0; channel < nChannel; channel++) { maxEigVec[re][channel] = eigVecTmp[re][channel]; maxEigVec[im][channel] = eigVecTmp[im][channel]; } //norm tmpRe[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) tmpRe[freq] += maxEigVec[re][channel] * maxEigVec[re][channel] + maxEigVec[im][channel] * maxEigVec[im][channel]; tmpRe[freq] = sqrt(tmpRe[freq]); for (channel = 0; channel < nChannel; channel++) { maxEigVec[re][channel] /= (tmpRe[freq] + min_pos); maxEigVec[im][channel] /= (tmpRe[freq] + min_pos); } } // st(:,freq) = maxEigVec(:,freq)./maxEigVec(1,freq); tmpRe[freq] = maxEigVec[re][0] * maxEigVec[re][0] + maxEigVec[im][0] * maxEigVec[im][0]; for (channel = 0; channel < nChannel; channel++) { steerVector[channel][re] = (maxEigVec[re][channel] * maxEigVec[re][0] + maxEigVec[im][channel] * maxEigVec[im][0]) / (tmpRe[freq] + min_pos); steerVector[channel][im] = (maxEigVec[im][channel] * maxEigVec[re][0] - maxEigVec[re][channel] * maxEigVec[im][0]) / (tmpRe[freq] + min_pos); } // st(:, freq) = st(:, freq).*sqrt(nSensor / norm(st(:, freq))); tmpRe[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) tmpRe[freq] += steerVector[channel][re] * steerVector[channel][re] + steerVector[channel][im] * steerVector[channel][im]; tmpRe[freq] = sqrt(tmpRe[freq]); tmpRe[freq] = sqrt(nChannel / (tmpRe[freq] + min_pos)); for (channel = 0; channel < nChannel; channel++) { steerVector[channel][re] = steerVector[channel][re] * tmpRe[freq]; steerVector[channel][im] = steerVector[channel][im] * tmpRe[freq]; } //Rn(:, : , freq) = RnTmp; for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { Rn[re][channel][channel2] = RnTmp2[re][channel][channel2]; Rn[im][channel][channel2] = RnTmp2[im][channel][channel2]; } } // LambdaN(freq) = gamma * (LambdaN(freq) + lambdaYInv); LambdaN[freq] = gamma * (LambdaN[freq] + lambdaYInv[freq]); }//if (RsIdx == 0) // Rx(:,:,freq) = RxTmp; for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { Rx[re][channel][channel2] = RxTmp2[re][channel][channel2]; Rx[im][channel][channel2] = RxTmp2[im][channel][channel2]; } } // LambdaX(freq) = gamma * (LambdaX(freq) + 1); LambdaX[freq] = gamma * (LambdaX[freq] + 1.0); }//if (normRxTmp[freq] != 0) } inline void MLDR::Clear(){ int channel, channel2, sample, freq; memset(phiY, 0, (nFreq) * sizeof(double)); for (freq = 0; freq < nFreq * 2; freq++) { memset(WNumer[freq], 0, sizeof(double) * nChannel); } for (freq = 0; freq < nFreq * 2; freq++) { memset(XPsi[freq], 0, sizeof(double) * nChannel); } for (freq = 0; freq < nFreq*2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(PsiRx[freq][channel], 0, sizeof(double) * nChannel); } } for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(numer[freq][channel], 0, sizeof(double) * nChannel); } } for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(PsiTmp[freq][channel], 0, sizeof(double) * nChannel); } } for (freq = 0; freq < nFreq*2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(Rx[freq][channel], 0, (nChannel) * sizeof(double)); for (channel2 = 0; channel2 < nChannel; channel2++) { if (channel == channel2) { if(freq%2==0) Rx[freq][channel][channel2] = delta_SVE; } } } } for (freq = 0; freq < nFreq*2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(Rn[freq][channel], 0, (nChannel) * sizeof(double)); for (channel2 = 0; channel2 < nChannel; channel2++) { if (channel == channel2) { if (freq % 2 == 0) Rn[freq][channel][channel2] = delta_SVE; } } } } memset(denom, 0, sizeof(double) * nFreq); memset(WDenom, 0, sizeof(double) * nFreq); for (freq = 0; freq < fftSize + 2; freq++) { for (channel = 0; channel < nChannel; channel++){ memset(Psi[freq][channel], 0, (nChannel) * sizeof(double)); if (freq % 2 == 0) { Psi[freq][channel][channel] = delta_MLDR; } } } for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(RxTmp[freq][channel], 0, sizeof(double) * nChannel); } } for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(RnTmp[freq][channel], 0, sizeof(double)* nChannel); } } for (freq = 0; freq < nFreq; freq++) { for (channel = 0; channel < nChannel; channel++) { maxEigVec[freq+ freq][channel] = 1.0/nChannel; maxEigVec[freq+ freq+1][channel] = 0.0; } } for (freq = 0; freq < nFreq * 2; freq++) { memset(st[freq], 0, sizeof(double) * nChannel); } for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(XFrameTmp[freq][channel], 0, sizeof(double)); } } for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(RxTmp2[freq][channel], 0, sizeof(double) * nChannel); } } for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(RnTmp2[freq][channel], 0, sizeof(double) * nChannel); } } for (freq = 0; freq < nFreq * 2; freq++) { for (channel = 0; channel < nChannel; channel++) { memset(eigTmp[freq][channel], 0, sizeof(double) * nChannel); } } memset(LambdaX, 0, sizeof(double)* nFreq); memset(LambdaN, 0, sizeof(double)* nFreq); memset(SHat,0,sizeof(double)*(fftSize+2)); for(channel=0;channel <nChannel;channel++){ for(freq=0;freq<nFreq;freq++){ steerVector[channel][freq+freq]=1.0/nChannel; steerVector[channel][freq+freq+1]=0.0; } } } void MLDR::MLDR_routine_v2(double** X, double* SHat, double** steerVector, int freq) { int channel, channel2, channel3, re, im; double t_re, t_im; re = freq + freq; im = re + 1; //[M] PhiRxPhi = Phi(:, : , freq) * XFrame(:, freq) // * (XFrame(:, freq)')*Phi(:,:,freq); for (channel = 0; channel < nChannel; channel++) { t_re = 0.0; t_im = 0.0; for (channel2 = 0; channel2 < nChannel; channel2++) { t_re += Phi[re][channel][channel2] * X[channel2][re] - Phi[im][channel][channel2] * X[channel2][im]; t_im += Phi[re][channel][channel2] * X[channel2][im] + Phi[im][channel][channel2] * X[channel2][re]; } PhiRxPhiTmp[re][channel] = t_re; PhiRxPhiTmp[im][channel] = t_im; } for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { PhiRxPhiTmp2[re][channel][channel2] = PhiRxPhiTmp[re][channel] * X[channel2][re] + PhiRxPhiTmp[im][channel] * X[channel2][im]; PhiRxPhiTmp2[im][channel][channel2] = -PhiRxPhiTmp[re][channel] * X[channel2][im] + PhiRxPhiTmp[im][channel] * X[channel2][re]; } } for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { t_re = 0.0; t_im = 0.0; for (channel3 = 0; channel3 < nChannel; channel3++) { t_re += PhiRxPhiTmp2[re][channel][channel3] * Phi[re][channel3][channel2] - PhiRxPhiTmp2[im][channel][channel3] * Phi[im][channel3][channel2]; t_im += PhiRxPhiTmp2[re][channel][channel3] * Phi[im][channel3][channel2] + PhiRxPhiTmp2[im][channel][channel3] * Phi[re][channel3][channel2]; } PhiRxPhi[re][channel][channel2] = t_re; PhiRxPhi[im][channel][channel2] = t_im; } } //[M] XPhiX = (XFrame(:, freq)')*Phi(:,:,freq)*XFrame(:,freq); 1x6 6x6 6x1 : 1x1 for (channel = 0; channel < nChannel; channel++) { t_re = 0.0; t_im = 0.0; for (channel2 = 0; channel2 < nChannel; channel2++) { t_re += X[channel2][re] * Phi[re][channel2][channel] + X[channel2][im] * Phi[im][channel2][channel]; t_im += X[channel2][re] * Phi[im][channel2][channel] - X[channel2][im] * Phi[re][channel2][channel]; } XPhiXTmp[re][channel] = t_re; XPhiXTmp[im][channel] = t_im; } XPhiX[re] = 0.0; XPhiX[im] = 0.0; for (channel = 0; channel < nChannel; channel++) { XPhiX[re] += XPhiXTmp[re][channel] * X[channel][re] - XPhiXTmp[im][channel] * X[channel][im]; XPhiX[im] += XPhiXTmp[re][channel] * X[channel][im] + XPhiXTmp[im][channel] * X[channel][re]; } //[M] YHat = W(:, freq)'*XFrame(:,freq); 1x6 6x1 : 1x1 YHat[re] = 0.0; YHat[im] = 0.0; for (channel = 0; channel < nChannel; channel++) { YHat[re] += W[channel][re] * X[channel][re] + W[channel][im] * X[channel][im]; YHat[im] += +W[channel][re] * X[channel][im] - W[channel][im] * X[channel][re]; } //[M] PsiYHat = alpha * PsiY(freq) + (1 - alpha) * abs(YHat). ^ 2; PsiYHat[freq] = alpha * PsiY[freq] + (1 - alpha) * (YHat[re] * YHat[re] + YHat[im] * YHat[im]); //[M] lambdaY = max(PsiYHat, epsi); lambdaY[freq] = std::max(PsiYHat[freq], epsi_MLDR); //[M] denom = gamma * lambdaY + XPhiX; denom[freq] = gamma * lambdaY[freq] + XPhiX[re]; //[M] denom = real(denom); //[M] PhiHat = (Phi(:, : , freq) - (PhiRxPhi. / denom)). / gamma; for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { PhiHatTmp[re][channel][channel2] = (Phi[re][channel][channel2] - (PhiRxPhi[re][channel][channel2] / (denom[freq] + min_pos))) / gamma; PhiHatTmp[im][channel][channel2] = (Phi[im][channel][channel2] - (PhiRxPhi[im][channel][channel2] / (denom[freq] + min_pos))) / gamma; } } //[M] PhiHat = 0.5.*(PhiHat + PhiHat'); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { PhiHat[re][channel][channel2] = PhiHatTmp[re][channel][channel2] + PhiHatTmp[re][channel2][channel]; PhiHat[re][channel][channel2] *= 0.5; PhiHat[im][channel][channel2] = PhiHatTmp[im][channel][channel2] - PhiHatTmp[im][channel2][channel]; PhiHat[im][channel][channel2] *= 0.5; } } //[M] W(:, freq) = PhiHat * st(:, freq) // ./ real(st(:, freq)'*PhiHat*st(:,freq)); for (channel = 0; channel < nChannel; channel++) { t_re = 0.0; t_im = 0.0; for (channel2 = 0; channel2 < nChannel; channel2++) { t_re += PhiHat[re][channel][channel2] * steerVector[channel2][re] - PhiHat[im][channel][channel2] * steerVector[channel2][im]; t_im += PhiHat[re][channel][channel2] * steerVector[channel2][im] + PhiHat[im][channel][channel2] * steerVector[channel2][re]; } WNumer[re][channel] = t_re; WNumer[im][channel] = t_im; } for (channel = 0; channel < nChannel; channel++) { t_re = 0.0; t_im = 0.0; for (channel2 = 0; channel2 < nChannel; channel2++) { t_re += steerVector[channel2][re] * PhiHat[re][channel2][channel] + steerVector[channel2][im] * PhiHat[im][channel2][channel]; t_im += steerVector[channel2][re] * PhiHat[im][channel2][channel] - steerVector[channel2][im] * PhiHat[re][channel2][channel]; } WDenomTmp[re][channel] = t_re; WDenomTmp[im][channel] = t_im; } WDenom[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) { WDenom[freq] += WDenomTmp[re][channel] * steerVector[channel][re] - WDenomTmp[im][channel] * steerVector[channel][im]; } for (channel = 0; channel < nChannel; channel++) { W[channel][re] = (WNumer[re][channel]+min_pos) / (WDenom[freq] + min_pos); W[channel][im] =( WNumer[im][channel]+min_pos) / (WDenom[freq] + min_pos); } //[M] YFrame(freq) = W(:, freq)'*XFrame(:,freq); SHat[re] = 0.0; SHat[im] = 0.0; for (channel = 0; channel < nChannel; channel++) { SHat[re] += W[channel][re] * X[channel][re] + W[channel][im] * X[channel][im]; SHat[im] += W[channel][re] * X[channel][im] - W[channel][im] * X[channel][re]; } //[M] PsiY(freq) = alpha * PsiY(freq) + (1 - alpha) * abs(YFrame(freq)). ^ 2; PsiY[freq] = alpha * PsiY[freq] + (1 - alpha) * (SHat[re] * SHat[re] + SHat[im] * SHat[im]); //[M] Phi(:, : , freq) = PhiHat; PhiHat À» ¾È¾²°í ´Ù Phi ÇØµµ µÉµí. for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { Phi[re][channel][channel2] = PhiHat[re][channel][channel2]; Phi[im][channel][channel2] = PhiHat[im][channel][channel2]; } } } inline void MLDR::MLDR_routine(double** X, double* SHat, double** steerVector, int freq) { int channel, channel2, channel3, re, im; re = freq + freq; im = re + 1; phiY[freq] = alpha * phiY[freq] + (1 - alpha) * (YPred[re] * YPred[re] + YPred[im] * YPred[im]); lambdaY[freq] = std::max(phiY[freq], epsi_MLDR); #if !_SVE for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { XFrameTmp[re][channel][channel2] = X[channel][re] * X[channel2][re] + X[channel][im] * X[channel2][im]; XFrameTmp[im][channel][channel2] = -X[channel][re] * X[channel2][im] + X[channel][im] * X[channel2][re]; } } #endif // numer for (channel = 0; channel < nChannel; channel++) { memset(PsiRx[re][channel], 0, (nChannel) * sizeof(double)); memset(PsiRx[im][channel], 0, (nChannel) * sizeof(double)); memset(numer[re][channel], 0, (nChannel) * sizeof(double)); memset(numer[im][channel], 0, (nChannel) * sizeof(double)); // Psi * Rx => PsiRx for (channel2 = 0; channel2 < nChannel; channel2++) { for (channel3 = 0; channel3 < nChannel; channel3++) { PsiRx[re][channel][channel2] += Psi[re][channel][channel3] * XFrameTmp[re][channel3][channel2] - Psi[im][channel][channel3] * XFrameTmp[im][channel3][channel2]; PsiRx[im][channel][channel2] += Psi[re][channel][channel3] * XFrameTmp[im][channel3][channel2] + Psi[im][channel][channel3] * XFrameTmp[re][channel3][channel2]; //PsiRx[re][channel][channel2] += Psi[re][channel][channel3] * XFrameTmp[freq][channel3][channel2+ channel2] // - Psi[im][channel][channel3 + channel3+1] * XFrameTmp[freq][channel3][channel2+channel2+1]; //PsiRx[im][channel][channel2] += Psi[re][channel][channel3] * XFrameTmp[freq][channel3][channel2+ channel2+1] // + Psi[im][channel][channel3] * XFrameTmp[freq][channel3][channel2+ channel2]; } } // PsiRx * Psi => numer for (channel2 = 0; channel2 < nChannel; channel2++) { for (channel3 = 0; channel3 < nChannel; channel3++) { numer[re][channel][channel2] += PsiRx[re][channel][channel3] * Psi[re][channel3][channel2] - PsiRx[im][channel][channel3] * Psi[im][channel3][channel2]; numer[im][channel][channel2] += PsiRx[re][channel][channel3] * Psi[im][channel3][channel2] + PsiRx[im][channel][channel3] * Psi[re][channel3][channel2]; } } } //[M] denom = gamma * lambdaY + XPhiX; // denom // X' * Psi = XPsi memset(XPsi[re], 0, (nChannel) * sizeof(double)); memset(XPsi[im], 0, (nChannel) * sizeof(double)); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { XPsi[re][channel] += X[channel2][re] * Psi[re][channel2][channel] + X[channel2][im] * Psi[im][channel2][channel]; XPsi[im][channel] += X[channel2][re] * Psi[im][channel2][channel] - X[channel2][im] * Psi[re][channel2][channel]; } } // XPsi * X = denom (real) denom[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) { denom[freq] += XPsi[re][channel] * X[channel][re] - XPsi[im][channel] * X[channel][im]; } denom[freq] += gamma * lambdaY[freq]; //[M]PsiHat = (Psi(:,:,freq) - (PhiRxPhi./denom))./gamma; // Psi = PsiTmp for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { PsiTmp[re][channel][channel2] = (Psi[re][channel][channel2] - numer[re][channel][channel2] / denom[freq]) / gamma; PsiTmp[im][channel][channel2] = (Psi[im][channel][channel2] - numer[im][channel][channel2] / denom[freq]) / gamma; } } //[M]PsiHat = 0.5.*(PsiHat+PsiHat'); // Psi hermitian symmetry for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { Psi[re][channel][channel2] = 0.5 * (PsiTmp[re][channel][channel2] + PsiTmp[re][channel2][channel]); if (channel != channel2) { Psi[im][channel][channel2] = 0.5 * (PsiTmp[im][channel][channel2] - PsiTmp[im][channel2][channel]); } else { Psi[im][channel][channel2] = 0; } } } // Phi * steerVector => WNumer memset(WNumer[re], 0, (nChannel) * sizeof(double)); memset(WNumer[im], 0, (nChannel) * sizeof(double)); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { WNumer[re][channel] += Psi[re][channel][channel2] * steerVector[channel2][re] - Psi[im][channel][channel2] * steerVector[channel2][im]; WNumer[im][channel] += Psi[re][channel][channel2] * steerVector[channel2][im] + Psi[im][channel][channel2] * steerVector[channel2][re]; } } // steerVector' * WNumer(Phi * steerVector) => WDenom (real) WDenom[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) { WDenom[freq] += steerVector[channel][re] * WNumer[re][channel] + steerVector[channel][im] * WNumer[im][channel]; } // W for (channel = 0; channel < nChannel; channel++) { W[channel][re] = (WNumer[re][channel] + min_pos) / (WDenom[freq] + min_pos); W[channel][im] = (WNumer[im][channel] + min_pos) / (WDenom[freq] + min_pos); } // W ' X = SHat SHat[re] = 0.0; SHat[im] = 0.0; for (channel = 0; channel < nChannel; channel++) { SHat[re] += W[channel][re] * X[channel][re] + W[channel][im] * X[channel][im]; SHat[im] += W[channel][re] * X[channel][im] - W[channel][im] * X[channel][re]; } X[0][re] = SHat[re]; X[0][im] = SHat[im]; } inline void MLDR::SVE_routine(double** X, double* SHat, double** steerVector, int freq) { int channel, channel2, channel3, re, im; re = freq + freq; im = freq + freq + 1; //[M]PhiYHat = abs(YHat). ^ 2; PhiYHat[freq] = YPred[re] * YPred[re] + YPred[im] * YPred[im]; //[M]lambdaY = max(PhiYHat, epsiSt); lambdaY[freq] = std::max(PhiYHat[freq], epsi_SVE); //[M]lambdaYInv = 1 ./ lambdaY; lambdaYInv[freq] = 1.0 / (lambdaY[freq]); // XFrameTmp = (XFrame(:,freq)*XFrame(:,freq)') for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { XFrameTmp[re][channel][channel2] = X[channel][re] * X[channel2][re] + X[channel][im] * X[channel2][im]; XFrameTmp[im][channel][channel2] = -X[channel][re] * X[channel2][im] + X[channel][im] * X[channel2][re]; } } //RxTmp = Rx(:,:,freq) * LambdaX(freq) / (LambdaX(freq) + 1) + (XFrame(:,freq)*XFrame(:,freq)') / (LambdaX(freq) + 1); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { //real RxTmp[re][channel][channel2] = Rx[re][channel][channel2] * LambdaX[freq] / (LambdaX[freq] + 1.0) + XFrameTmp[re][channel][channel2] / (LambdaX[freq] + 1.0); //imag RxTmp[im][channel][channel2] = Rx[im][channel][channel2] * LambdaX[freq] / (LambdaX[freq] + 1.0) + XFrameTmp[im][channel][channel2] / (LambdaX[freq] + 1.0); } } //RnTmp = Rn(:,:,freq) * LambdaN(freq) / (LambdaN(freq) + lambdaYInv) + (XFrame(:,freq) * XFrame(:,freq)') / (LambdaN(freq) + lambdaYInv); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { //real RnTmp[re][channel][channel2] = Rn[re][channel][channel2] * LambdaN[freq] / (LambdaN[freq] + lambdaYInv[freq]) + XFrameTmp[re][channel][channel2] / (LambdaN[freq] + lambdaYInv[freq]); //imag RnTmp[im][channel][channel2] = Rn[im][channel][channel2] * LambdaN[freq] / (LambdaN[freq] + lambdaYInv[freq]) + XFrameTmp[im][channel][channel2] / (LambdaN[freq] + lambdaYInv[freq]); // printf("%e %+ei ", RnTmp[channel][channel2 + channel2], RnTmp[channel][channel2 + channel2 + 1]); } // printf("\n"); } // RxTmp = 0.5 * (RxTmp + RxTmp'); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { RxTmp2[re][channel][channel2] = RxTmp[re][channel][channel2] + RxTmp[re][channel2][channel]; RxTmp2[re][channel][channel2] *= 0.5; if (channel != channel2) { RxTmp2[im][channel][channel2] = RxTmp[im][channel][channel2] - RxTmp[im][channel2][channel]; RxTmp2[im][channel][channel2] *= 0.5; } else { RxTmp2[im][channel][channel2] = 0.0; } } } // RnTmp = 0.5 * (RnTmp + RnTmp'); for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { if (channel != channel2) { RnTmp2[re][channel][channel2] = RnTmp[re][channel][channel2] + RnTmp[re][channel2][channel]; RnTmp2[re][channel][channel2] *= 0.5; RnTmp2[im][channel][channel2] = RnTmp[im][channel][channel2] - RnTmp[im][channel2][channel]; RnTmp2[im][channel][channel2] *= 0.5; } else { RnTmp2[re][channel][channel2] = RnTmp[re][channel][channel2]; RnTmp2[im][channel][channel2] = 0.0; } } } //norm(RxTmp) normRxTmp[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { normRxTmp[freq] += RxTmp[re][channel][channel2] * RxTmp[re][channel][channel2] + RxTmp[im][channel][channel2] * RxTmp[im][channel][channel2]; } } //if norm(RxTmp) ~= 0 % to prevent division by zero (20201213) if (normRxTmp[freq] != 0) { // for iterPM = 1 : maxIterPM // maxEigVec(:,freq) = (RxTmp - theta*RnTmp)*maxEigVec(:,freq); // maxEigVec(:, freq) = maxEigVec(:, freq). / norm(maxEigVec(:, freq)); // end for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { eigTmp[re][channel][channel2] = RxTmp2[re][channel][channel2] - theta_SVE * RnTmp2[re][channel][channel2]; eigTmp[im][channel][channel2] = RxTmp2[im][channel][channel2] - theta_SVE * RnTmp2[im][channel][channel2]; } } for (int i = 0; i < power_method_iter; i++) { for (channel = 0; channel < nChannel; channel++) { tmpRe[freq] = 0.0; tmpIm[freq] = 0.0; for (channel2 = 0; channel2 < nChannel; channel2++) { tmpRe[freq] += eigTmp[re][channel][channel2] * maxEigVec[re][channel2] - eigTmp[im][channel][channel2] * maxEigVec[im][channel2]; tmpIm[freq] += eigTmp[im][channel][channel2] * maxEigVec[re][channel2] + eigTmp[re][channel][channel2] * maxEigVec[im][channel2]; } eigVecTmp[re][channel] = tmpRe[freq]; eigVecTmp[im][channel] = tmpIm[freq]; } for (channel = 0; channel < nChannel; channel++) { maxEigVec[re][channel] = eigVecTmp[re][channel]; maxEigVec[im][channel] = eigVecTmp[im][channel]; } //norm tmpRe[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) tmpRe[freq] += maxEigVec[re][channel] * maxEigVec[re][channel] + maxEigVec[im][channel] * maxEigVec[im][channel]; tmpRe[freq] = sqrt(tmpRe[freq]); for (channel = 0; channel < nChannel; channel++) { maxEigVec[re][channel] /= (tmpRe[freq] + min_pos); maxEigVec[im][channel] /= (tmpRe[freq] + min_pos); } } // st(:,freq) = maxEigVec(:,freq)./maxEigVec(1,freq); tmpRe[freq] = maxEigVec[re][0] * maxEigVec[re][0] + maxEigVec[im][0] * maxEigVec[im][0]; for (channel = 0; channel < nChannel; channel++) { steerVector[channel][re] = (maxEigVec[re][channel] * maxEigVec[re][0] + maxEigVec[im][channel] * maxEigVec[im][0]) / (tmpRe[freq] + min_pos); steerVector[channel][im] = (maxEigVec[im][channel] * maxEigVec[re][0] - maxEigVec[re][channel] * maxEigVec[im][0]) / (tmpRe[freq] + min_pos); } // st(:, freq) = st(:, freq).*sqrt(nSensor / norm(st(:, freq))); tmpRe[freq] = 0.0; for (channel = 0; channel < nChannel; channel++) tmpRe[freq] += steerVector[channel][re] * steerVector[channel][re] + steerVector[channel][im] * steerVector[channel][im]; tmpRe[freq] = sqrt(tmpRe[freq]); tmpRe[freq] = sqrt(nChannel / (tmpRe[freq] + min_pos)); for (channel = 0; channel < nChannel; channel++) { steerVector[channel][re] = steerVector[channel][re] * tmpRe[freq]; steerVector[channel][im] = steerVector[channel][im] * tmpRe[freq]; } } // else // st(:, freq) = ones(nSensor, 1) . / nSensor; else { for (channel = 0; channel < nChannel; channel++) { steerVector[channel][re] = 1 / nChannel; steerVector[channel][im] = 0.0; } } //Rx(:,:,freq) = RxTmp; for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { Rx[re][channel][channel2] = RxTmp2[re][channel][channel2]; Rx[im][channel][channel2] = RxTmp2[im][channel][channel2]; } } //Rn(:, : , freq) = RnTmp; for (channel = 0; channel < nChannel; channel++) { for (channel2 = 0; channel2 < nChannel; channel2++) { Rn[re][channel][channel2] = RnTmp2[re][channel][channel2]; Rn[im][channel][channel2] = RnTmp2[im][channel][channel2]; } } LambdaN[freq] = gamma * (LambdaN[freq] + lambdaYInv[freq]); LambdaX[freq] = gamma * (LambdaX[freq] + 1.0); }
memdbg.h
/* * Based on John the Ripper and modified to integrate with aircrack * * John the Ripper copyright and license. * * John the Ripper password cracker, * Copyright (c) 1996-2013 by Solar Designer. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * As a special exception to the GNU General Public License terms, * permission is hereby granted to link the code of this program, with or * without modification, with any version of the OpenSSL library and/or any * version of unRAR, and to distribute such linked combinations. You must * obey the GNU GPL in all respects for all of the code used other than * OpenSSL and unRAR. If you modify this program, you may extend this * exception to your version of the program, but you are not obligated to * do so. (In other words, you may release your derived work under pure * GNU GPL version 2 or later as published by the FSF.) * * (This exception from the GNU GPL is not required for the core tree of * John the Ripper, but arguably it is required for -jumbo.) * * Relaxed terms for certain components. * * In addition or alternatively to the license above, many components are * available to you under more relaxed terms (most commonly under cut-down * BSD license) as specified in the corresponding source files. * * For more information on John the Ripper licensing please visit: * * http://www.openwall.com/john/doc/LICENSE.shtml * * This header file should be the LAST header file included within every * .c file within the project. If there are .h files that have actual * code in them, then this header should be the last include within that * .h file, and that .h file should be the last one included within the * .c file. * ****** NOTE ***** */ #if !defined (__MEM_DBG_H_) #define __MEM_DBG_H_ // values to use within the MemDbg_Validate() function. #define MEMDBG_VALIDATE_MIN 0 #define MEMDBG_VALIDATE_DEEP 1 #define MEMDBG_VALIDATE_DEEPER 2 #define MEMDBG_VALIDATE_DEEPEST 3 #include <stdio.h> #include <stdlib.h> #if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER #include <unistd.h> #endif #include <string.h> #if defined (MEMDBG_ON) /* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2013. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2013 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ /* * memdbg.h * Memory management debugging (at runtime) * * memdbg contains routines detect, and report memory * problems, such as double frees, passing bad pointers to * free, most buffer overwrites. Also, tracking of non-freed * data, showing memory leaks, can also be shown. * * Compilation Options (provided from Makefile CFLAGS) * * MEMDBG_ON If this is NOT defined, then memdbg will * get out of your way, and most normal memory functions * will be called with no overhead at all. */ /* these functions can be called by client code. Normally Memdbg_Used() and * MemDbg_Display() would be called at program exit. That will dump a list * of any memory that was not released. The MemDbg_Validate() can be called * pretty much any time. That function will walk the memory allocation linked * lists, and sqwack if there are problems, such as overwrites, freed memory that * has been written to, etc. It would likely be good to call MemDbg_Validate() * within benchmarking, after every format is tested. * * TODO: Add a handle that can be passed to the MemDbg_Used() and MemDbg_Display() * and a function to get the 'current' state of memory as a handle. Thus, a * format self test could get a handle BEFORE starting, and then check after, and * ONLY show leaked memory from the time the handle was obtained, which was at the * start of the self test. Thus it would only show leaks from that format test. * * These functions are NOT thread safe. Do not call them within OMP blocks of code. * Normally, these would be called at program exit, or within things like format * self test code, etc, and not within OMP. But this warning is here, so that * it is known NOT to call within OMP. */ extern size_t MemDbg_Used(int show_freed); extern void MemDbg_Display(FILE *); extern void MemDbg_Validate(int level); extern void MemDbg_Validate_msg(int level, const char *pMsg); extern void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExData); /* these functions should almost NEVER be called by any client code. They * are listed here, because the macros need to know their names. Client code * should almost ALWAYS call malloc() like normal, vs calling MEMDBG_alloc() * If MEMDBG_alloc() was called, and MEMDBG_ON was not defined, then this * function would not be declared here, AND at link time, the function would * not be found. * NOTE, these functions should be thread safe in OMP builds (using #pragma omp atomic) * also note, memory allocation within OMP blocks SHOULD be avoided if possible. It is * very slow, and the thread safety required makes it even slow. This is not only talking * about these functions here, BUT malloc/free in general in OMP blocks. AVOID doing that * at almost all costs, and performance will usually go up. */ extern void *MEMDBG_alloc(size_t, char *, int); extern void *MEMDBG_realloc(const void *, size_t, char *, int); extern void MEMDBG_free(const void *, char *, int); extern char *MEMDBG_strdup(const char *, char *, int); #if !defined(__MEMDBG__) /* we get here on every file compiled EXCEPT memdbg.c */ #undef malloc #undef realloc #undef free #undef strdup #undef libc_free #undef libc_calloc #undef libc_malloc #define libc_free(a) do {if(a) MEMDBG_libc_free(a); a=0; } while(0) #define libc_malloc(a) MEMDBG_libc_alloc(a) #define libc_calloc(a) MEMDBG_libc_calloc(a) #define malloc(a) MEMDBG_alloc((a),__FILE__,__LINE__) #define calloc(a) MEMDBG_calloc((a),__FILE__,__LINE__) #define realloc(a,b) MEMDBG_realloc((a),(b),__FILE__,__LINE__) /* this code mimicks JtR's FREE_MEM(a) but does it for any MEMDBG_free(a,F,L) call (a hooked free(a) call) */ #define free(a) do { if (a) MEMDBG_free((a),__FILE__,__LINE__); a=0; } while(0) #define strdup(a) MEMDBG_strdup((a),__FILE__,__LINE__) #endif /* pass the file handle to write to (normally stderr) */ #define MEMDBG_PROGRAM_EXIT_CHECKS(a) do { \ if (MemDbg_Used(0) > 0) MemDbg_Display(a); \ MemDbg_Validate_msg2(MEMDBG_VALIDATE_DEEPEST, "At Program Exit", 1); } while(0) typedef struct MEMDBG_HANDLE_t { unsigned id; unsigned alloc_cnt; size_t mem_size; } MEMDBG_HANDLE; /* * these functions allow taking a memory snapshot, calling some code, then validating that memory * is the same after the code. This will help catch memory leaks and other such problems, within * formats and such. Simply get the snapshot, run self tests (or other), when it exits, check * the snapshot to make sure nothing leaked. */ /* returning a struct (or passing as params it not super efficient but this is done so infrequently that this is not an issue. */ MEMDBG_HANDLE MEMDBG_getSnapshot(int id); /* will not exit on leaks. Does exit, on memory overwrite corruption. */ void MEMDBG_checkSnapshot(MEMDBG_HANDLE); /* same as MEMDBG_checkSnapshot() but if exit_on_any_leaks is true, will also exit if leaks found. */ void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE, int exit_on_any_leaks); /* * the allocations from mem_alloc_tiny() must call this function to flag the memory they allocate * so it is not flagged as a leak, by these HANDLE snapshot functions. 'tiny' memory is expected * to leak, until program exit. At that time, any that was not freed, will be shown as leaked. * THIS function is also thread safe. The other checkSnapshot functions are NOT thread safe. */ void MEMDBG_tag_mem_from_alloc_tiny(void *); #else /* NOTE, we DO keep one special function here. We make free a little * smarter. this function gets used, even when we do NOT compile with * any memory debugging on. This makes free work more like C++ delete, * in that it is valid to call it on a NULL. Also, it sets the pointer * to NULL, so that we can call free(x) on x multiple times, without * causing a crash. NOTE, the multiple frees SHOULD be caught when * someone builds and runs with MEMDBG_ON. But when it is off, we do * try to protect the program. */ #undef libc_free #undef libc_calloc #undef libc_malloc #define libc_free(a) do {if(a) MEMDBG_libc_free(a); a=0; } while(0) #define libc_malloc(a) MEMDBG_libc_alloc(a) #define libc_calloc(a) MEMDBG_libc_calloc(a) #if !defined(__MEMDBG__) /* this code mimicks JtR's FREE_MEM(a) but does it for any normal free(a) call */ //extern void MEMDBG_off_free(void *a); //#define free(a) do { if(a) MEMDBG_off_free(a); a=0; } while(0) #endif #define MemDbg_Used(a) 0 #define MemDbg_Display(a) #define MemDbg_Validate(a) #define MemDbg_Validate_msg(a,b) #define MemDbg_Validate_msg2(a,b,c) #define MEMDBG_PROGRAM_EXIT_CHECKS(a) #define MEMDBG_tag_mem_from_alloc_tiny(a) #define MEMDBG_HANDLE int #define MEMDBG_getSnapshot(a) 0 #define MEMDBG_checkSnapshot(a) if(a) printf(" \b") #define MEMDBG_checkSnapshot_possible_exit_on_error(a, b) if(a) printf(" \b") #endif /* MEMDBG_ON */ extern void MEMDBG_libc_free(void *); extern void *MEMDBG_libc_alloc(size_t size); extern void *MEMDBG_libc_calloc(size_t size); #endif /* __MEMDBG_H_ */
ast-dump-openmp-begin-declare-variant_6.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int also_before(void) { return 0; } #pragma omp begin declare variant match(implementation={vendor(ibm)}) int also_after(void) { return 1; } int also_before(void) { return 2; } #pragma omp end declare variant int also_after(void) { return 0; } int main(void) { // Should return 0. return also_after() + also_before(); } // Make sure: // - we see the specialization in the AST // - we do use the original pointers for the calls as the variants are not applicable (this is not the ibm compiler). // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(ibm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(ibm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:25:1> line:22:5 main 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:16, line:25:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-CallExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_29:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_30:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_31:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
opencl_office2010_fmt_plug.c
/* MS Office 2010 cracker patch for JtR. Hacked together during March of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> * * OpenCL support by magnum. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum and it is hereby released to the general public * under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_office2010; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_office2010); #else #include "sha.h" #include "aes.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "unicode.h" #include "common-opencl.h" #include "office_common.h" #include "config.h" #define PLAINTEXT_LENGTH 51 #define UNICODE_LENGTH 104 /* In octets, including 0x80 */ #define FORMAT_LABEL "office2010-opencl" #define FORMAT_NAME "MS Office 2010" #define OCL_ALGORITHM_NAME "SHA1 OpenCL" #define CPU_ALGORITHM_NAME " AES" #define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME #define BENCHMARK_COMMENT " (100,000 iterations)" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_LENGTH 16 #define SALT_SIZE sizeof(*cur_salt) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { /* 2010-Default_myhovercraftisfullofeels_.docx */ {"$office$*2010*100000*128*16*213aefcafd9f9188e78c1936cbb05a44*d5fc7691292ab6daf7903b9a8f8c8441*46bfac7fb87cd43bd0ab54ebc21c120df5fab7e6f11375e79ee044e663641d5e", "myhovercraftisfullofeels"}, /* 2010-Default_myhovercraftisfullofeels_.dotx */ {"$office$*2010*100000*128*16*0907ec6ecf82ede273b7ee87e44f4ce5*d156501661638cfa3abdb7fdae05555e*4e4b64e12b23f44d9a8e2e00196e582b2da70e5e1ab4784384ad631000a5097a", "myhovercraftisfullofeels"}, /* 2010-Default_myhovercraftisfullofeels_.xlsb */ {"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*00780eeb9605c7e27227c5619e91dc21*90aaf0ea5ccc508e699de7d62c310f94b6798ae77632be0fc1a0dc71600dac38", "myhovercraftisfullofeels"}, /* 2010-Default_myhovercraftisfullofeels_.xlsx */ {"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*ef51883a775075f30d2207e87987e6a3*a867f87ea955d15d8cb08dc8980c04bf564f8af060ab61bf7fa3543853e0d11a", "myhovercraftisfullofeels"}, {NULL} }; static ms_office_custom_salt *cur_salt; static int *cracked, any_cracked; static char *saved_key; /* Password encoded in UCS-2 */ static int *saved_len; /* UCS-2 password length, in octets */ static char *saved_salt; static unsigned char *key; /* Output key from kernel */ static int new_keys, spincount; static struct fmt_main *self; static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_pwhash, cl_key, cl_spincount; static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_key; static cl_kernel GenerateSHA1pwhash, Generate2010key; #define HASH_LOOPS 500 /* Lower figure gives less X hogging */ #define ITERATIONS 100000 #define STEP 0 #define SEED 128 static const char * warn[] = { "xfer: ", ", xfer: ", ", init: ", ", loop: ", ", final: ", ", xfer: " }; static int split_events[] = { 3, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, GenerateSHA1pwhash); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, Generate2010key)); return s; } static void create_clobj(size_t gws, struct fmt_main *self) { int i; int bench_len = strlen(tests[0].plaintext) * 2; gws *= ocl_v_width; pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, UNICODE_LENGTH * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_key = (char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key"); memset(saved_key, 0, UNICODE_LENGTH * gws); pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_len = (int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len"); for (i = 0; i < gws; i++) saved_len[i] = bench_len; pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, SALT_LENGTH, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, SALT_LENGTH, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_salt = (char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, SALT_LENGTH, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt"); memset(saved_salt, 0, SALT_LENGTH); cl_pwhash = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_uint) * 6 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device state buffer"); pinned_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 32 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 32 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 32 * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory verifier keys"); memset(key, 0, 32 * gws); cl_spincount = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, sizeof(cl_int), &spincount, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping spincount"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 2, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 2"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 3, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 3"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(Generate2010key, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(Generate2010key, 1, sizeof(cl_mem), (void*)&cl_key), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(Generate2010key, 2, sizeof(cl_mem), (void*)&cl_spincount), "Error setting argument 2"); cracked = mem_alloc(sizeof(*cracked) * gws); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_key, key, 0, NULL, NULL), "Error Unmapping key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings"); HANDLE_CLERROR(clReleaseMemObject(cl_spincount), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_pwhash), "Release GPU buffer"); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(GenerateSHA1pwhash), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(Generate2010key), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void clear_keys(void) { memset(saved_key, 0, UNICODE_LENGTH * global_work_size * ocl_v_width); memset(saved_len, 0, sizeof(*saved_len) * global_work_size * ocl_v_width); } static void set_key(char *key, int index) { UTF16 *utfkey = (UTF16*)&saved_key[index * UNICODE_LENGTH]; /* convert key to UTF-16LE */ saved_len[index] = enc_to_utf16(utfkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (saved_len[index] < 0) saved_len[index] = strlen16(utfkey); /* Prepare for GPU */ utfkey[saved_len[index]] = 0x80; saved_len[index] <<= 1; new_keys = 1; } static void set_salt(void *salt) { cur_salt = (ms_office_custom_salt *)salt; memcpy(saved_salt, cur_salt->osalt, SALT_LENGTH); spincount = cur_salt->spinCount; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, SALT_LENGTH, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt"); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_spincount, CL_FALSE, 0, 4, &spincount, 0, NULL, NULL), "failed in clEnqueueWriteBuffer spincount"); } static void init(struct fmt_main *_self) { static char valgo[32] = ""; self = _self; opencl_prepare_dev(gpu_id); if ((ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int))) > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width); self->params.algorithm_name = valgo; } if (options.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DUNICODE_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, UNICODE_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/office2010_kernel.cl", gpu_id, build_opts); // create kernel to execute GenerateSHA1pwhash = clCreateKernel(program[gpu_id], "GenerateSHA1pwhash", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); crypt_kernel = clCreateKernel(program[gpu_id], "HashLoop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); Generate2010key = clCreateKernel(program[gpu_id], "Generate2010key", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 3, self, create_clobj, release_clobj, 2 * ocl_v_width * UNICODE_LENGTH, 0, db); // Auto tune execution from shared/included code. autotune_run(self, ITERATIONS + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t gws, scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; gws = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = gws * ocl_v_width; if (any_cracked) { memset(cracked, 0, count * sizeof(*cracked)); any_cracked = 0; } if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key"); BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len"); new_keys = 0; } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, lws, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel"); for (index = 0; index < (ocl_autotune_running ? 1 : spincount / HASH_LOOPS); index++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2010key, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel"); // read back verifier keys BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 32 * scalar_gws, key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { SHA_CTX ctx; unsigned char hash[20]; unsigned char decryptedVerifierHashInputBytes[16]; unsigned char decryptedVerifierHashBytes[32]; ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, &key[32*index], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16); ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, &key[32*index+16], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32); SHA1_Init(&ctx); SHA1_Update(&ctx, decryptedVerifierHashInputBytes, 16); SHA1_Final(hash, &ctx); if (!memcmp(hash, decryptedVerifierHashBytes, 20)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static char *get_key(int index) { UTF16 buf[PLAINTEXT_LENGTH + 1]; memcpy(buf, &saved_key[index * UNICODE_LENGTH], saved_len[index]); buf[saved_len[index] >> 1] = 0; return (char*)utf16_to_enc(buf); } struct fmt_main fmt_opencl_office2010 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP, { "iteration count", }, { FORMAT_TAG_OFFICE_2010 }, tests }, { init, done, reset, fmt_default_prepare, ms_office_common_valid_2010, fmt_default_split, fmt_default_binary, ms_office_common_get_salt, { ms_office_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
mergesort.c
#include "stdio.h" #include <omp.h> void merge(int a[], int l, int m, int r){ int temp1[m - l +1], temp2[r - m]; for(int i = 0; i < (m-l+1); i++){ temp1[i] = a[l + i]; } for(int i = 0; i < (r-m); i++){ temp2[i] = a[m + i +1]; } int i = 0, j = 0, k = l; while(i < (m-l+1) && j < (r-m)){ if(temp1[i] > temp2[j]){ a[k++] = temp2[j++]; } if(temp1[i] < temp2[j]){ a[k++] = temp1[i++]; } } while(i < (m-l+1)){ a[k++] = temp1[i++]; } while(j < (r-m)){ a[k++] = temp2[j++]; } } void mergesort(int a[], int l, int r){ if(l < r){ int mid = (l + r) / 2; #pragma omp parallel sections num_threads(1) { #pragma omp section { mergesort(a, l, mid); } #pragma omp section { mergesort(a, mid+1, r); } } merge(a, l, mid, r); } } int main(){ double start = omp_get_wtime(); int n = 1000000; int a[n]; for(int i = 0; i < n; i++){ a[i] = n - i; } mergesort(a, 0, n-1); printf("Time required = %f\n", omp_get_wtime() - start); }
GB_binop__band_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__band_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__band_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__band_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint8) // A*D function (colscale): GB (_AxD__band_uint8) // D*A function (rowscale): GB (_DxB__band_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__band_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__band_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint8) // C=scalar+B GB (_bind1st__band_uint8) // C=scalar+B' GB (_bind1st_tran__band_uint8) // C=A+scalar GB (_bind2nd__band_uint8) // C=A'+scalar GB (_bind2nd_tran__band_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT8 || GxB_NO_BAND_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__band_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__band_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__band_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__band_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__band_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
work.c
/******************************************************************** * BenchIT - Performance Measurement for Scientific Applications * Contact: [email protected] * * $Id: work.c 1 2009-09-11 12:26:19Z william $ * $URL: svn+ssh://[email protected]/svn-base/benchit-root/BenchITv6/kernel/numerical/gemv/C/OpenMP/0/double/work.c $ * For license details see COPYING in the package base directory *******************************************************************/ /* Kernel: C DGEMV kernel *******************************************************************/ #include "work.h" void ij_(int sizeVector,int sizeAusgabe,double alpha,double beta, double *x, double *A, double *y) { int i,j; double temp = 0.0; #pragma omp parallel for private(j) for (j=0;j<sizeAusgabe;j++) { y[j]=beta*y[j]; } // // now : x=x, A=A, y=beta*y // #pragma omp parallel for private(temp,j,i) for (i=0;i<sizeVector;i++) { temp=alpha*x[i]; for (j=0;j<sizeAusgabe;j++) { y[j]=y[j]+A[i*sizeAusgabe+j]*temp; } } } void ji_(int sizeVector,int sizeAusgabe,double alpha,double beta, double *x, double *A, double *y) { int i,j; double temp = 0.0; #pragma omp parallel for private (j) for (j=0;j<sizeAusgabe;j++) { y[j]=beta*y[j]; } // // now : x=x, A=A, y=beta*y // #pragma omp parallel for private(temp,j,i) for (j=0;j<sizeAusgabe;j++) { temp=0.0; for (i=0;i<sizeVector;i++) { temp=temp+A[i*sizeAusgabe+j]*x[i]; } temp=temp*alpha; y[j]=y[j]+temp; } }
jacobi.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <omp.h> static int N; static int IT_MAXIMAS; static int SEED; static int verbose, threads; static double CONVERGENCIA; #define SEPARADOR "------------------------------------\n" // Devuelve el tiempo en segundos double get_timestamp(); // parsear los argumentos void parse_arguments(int argc, char *argv[]); // Ejecución secuencial // Devuelve el numero de iteraciones int run(double *A, double *b, double *x_old, double *x_new) { int itr; int row, col; double dot; double dif; double sqdif; double *xtemp; // Bucle hasta convergencia o iteraciones itr = 0; do { // Iteracion de jacobi for (row = 0; row < N; row++) { dot = 0.0; for (col = 0; col < N; col++) { if (row != col) dot += A[row + col*N] * x_old[col]; } x_new[row] = (b[row] - dot) / A[row + row*N]; } // Intercambio de punteros xtemp = x_old; x_old = x_new; x_new = xtemp; // convergencia sqdif = 0.0; for (row = 0; row < N; row++) { dif = x_new[row] - x_old[row]; sqdif += dif * dif; } itr++; } while ((itr < IT_MAXIMAS) && (sqrt(sqdif) > CONVERGENCIA)); return itr; } // Ejecución paralela // Devuelve el numero de iteraciones int run_parallel(double *A, double *b, double *x_old, double *x_new, int threads) { int itr; int row, col; double dot; double dif; double sqdif; double *xtemp; // Bucle hasta convergencia o iteraciones itr = 0; do { // Iteracion de jacobi omp_set_num_threads(threads); #pragma omp parallel private (row,col,dot) { #pragma omp for for (row = 0; row < N; row++) { dot = 0.0; for (col = 0; col < N; col++) { if (row != col) dot += A[row + col*N] * x_old[col]; } x_new[row] = (b[row] - dot) / A[row + row*N]; } } // Intercambio de punteros xtemp = x_old; x_old = x_new; x_new = xtemp; // convergencia sqdif = 0.0; for (row = 0; row < N; row++) { dif = x_new[row] - x_old[row]; sqdif += dif * dif; } itr++; } while ((itr < IT_MAXIMAS) && (sqrt(sqdif) > CONVERGENCIA)); return itr; } int main(int argc, char *argv[]) { // parsear los argumentos de entrada parse_arguments(argc, argv); double *A = malloc(N*N*sizeof(double)); double *b = malloc(N*sizeof(double)); double *x_old = malloc(N*sizeof(double)); double *x_new = malloc(N*sizeof(double)); if(verbose == 1){ printf(SEPARADOR); printf("tamaño de matriz: %dx%d\n", N, N); printf("iteraciones máximas: %d\n", IT_MAXIMAS); printf("límite de convergencia: %lf\n", CONVERGENCIA); printf(SEPARADOR); } double err; double inicio_abs; double final_abs; double final_sol; int itr; double inicio_sol; //Verbose = 0 -> no hay output //Verbose = 1 -> todo el output //Verbose = 2 -> output paralelo //Verbose = 3 -> output solo secuencial if(verbose == 1 || verbose == 3 || verbose == 0){ inicio_abs = get_timestamp(); // SECUENCIAL // Inicializar datos srand(SEED); for (int row = 0; row < N; row++) { double rowsum = 0.0; for (int col = 0; col < N; col++) { double value = rand()/(double)RAND_MAX; A[row + col*N] = value; rowsum += value; } // Asegurar que es diagonal dominante A[row + row*N] += rowsum; b[row] = rand()/(double)RAND_MAX; x_old[row] = 0.0; } // ejecutar Jacobi inicio_sol = get_timestamp(); itr = run(A, b, x_old, x_new); final_sol = get_timestamp(); // comprobar error err = 0.0; for (int row = 0; row < N; row++) { double tmp = 0.0; for (int col = 0; col < N; col++) { tmp += A[row + col*N] * x_old[col]; } tmp = b[row] - tmp; err += tmp*tmp; } err = sqrt(err); final_abs = get_timestamp(); } if(verbose == 1){ printf("SECUENCIAL\n"); printf("Error = %lf\n", err); printf("Iteraciones = %d\n", itr); printf("Tiempo total = %lf seconds\n", (final_abs-inicio_abs)); printf("Tiempo solución = %lf seconds\n", (final_sol-inicio_sol)); if (itr == IT_MAXIMAS) printf("NO CONVERGENCIA\n"); printf(SEPARADOR); } if (verbose == 3) { printf("size: \t %d \t iterations: \t %d \t threads: \t %d \t time: \t %lf \t seconds\n", (N*N), itr, threads, (final_sol-inicio_sol)); } if(verbose == 1 || verbose == 2 || verbose == 0){ // PARALELO // Inicializar datos srand(SEED); for (int row = 0; row < N; row++) { double rowsum = 0.0; for (int col = 0; col < N; col++) { double value = rand()/(double)RAND_MAX; A[row + col*N] = value; rowsum += value; } // Asegurar que es diagonal dominante A[row + row*N] += rowsum; b[row] = rand()/(double)RAND_MAX; x_old[row] = 0.0; } // Ejecutar jacobi paralelo inicio_sol = get_timestamp(); itr = run_parallel(A, b, x_old, x_new, threads); final_sol = get_timestamp(); // Comprobar error err = 0.0; for (int row = 0; row < N; row++) { double tmp = 0.0; for (int col = 0; col < N; col++) { tmp += A[row + col*N] * x_old[col]; } tmp = b[row] - tmp; err += tmp*tmp; } err = sqrt(err); final_abs = get_timestamp(); } if(verbose == 1){ printf("PARALELO\n"); printf("Error = %lf\n", err); printf("Iteraciones = %d\n", itr); printf("Tiempo total = %lf seconds\n", (final_abs-inicio_abs)); printf("Tiempo solucion = %lf seconds\n", (final_sol-inicio_sol)); if (itr == IT_MAXIMAS) printf("NO CONVERGENCIA\n"); printf(SEPARADOR); } if (verbose == 2) { printf("size: \t %d \t iterations: \t %d \t threads: \t %d \t time: \t %lf \t seconds\n", (N*N), itr, threads, (final_sol-inicio_sol)); } // Liberar memoria free(A); free(b); free(x_old); free(x_new); return 0; } //Métodos entrada double get_timestamp() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + tv.tv_usec*1e-6; } int parse_int(const char *str) { char *next; int value = strtoul(str, &next, 10); return strlen(next) ? -1 : value; } double parse_double(const char *str) { char *next; double value = strtod(str, &next); return strlen(next) ? -1 : value; } void parse_arguments(int argc, char *argv[]) { // Valores por defecto N = 1000; IT_MAXIMAS = 20000; CONVERGENCIA = 0.0001; SEED = 0; verbose = 1; threads = 4; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--convergencia") || !strcmp(argv[i], "-c")) { if (++i >= argc || (CONVERGENCIA = parse_double(argv[i])) < 0) { printf("Límite de convergencia inválido\n"); exit(1); } } else if (!strcmp(argv[i], "--iteraciones") || !strcmp(argv[i], "-i")) { if (++i >= argc || (IT_MAXIMAS = parse_int(argv[i])) < 0) { printf("Numero de iteraciones inválido\n"); exit(1); } } else if (!strcmp(argv[i], "--norden") || !strcmp(argv[i], "-n")) { if (++i >= argc || (N = parse_int(argv[i])) < 0) { printf("orden de la matriz inválido\n"); exit(1); } } else if (!strcmp(argv[i], "--seed") || !strcmp(argv[i], "-s")) { if (++i >= argc || (SEED = parse_int(argv[i])) < 0) { printf("Seed inválida\n"); exit(1); } } else if (!strcmp(argv[i], "--verbose") || !strcmp(argv[i], "-v")) { if (++i >= argc || (verbose = parse_int(argv[i])) < 0) { printf("Verbose inválido\n"); exit(1); } } else if (!strcmp(argv[i], "--threads") || !strcmp(argv[i], "-t")) { if (++i >= argc || (threads = parse_int(argv[i])) < 0) { printf("threads inválidos\n"); exit(1); } } else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { printf("\n"); printf("Uso: ./jacobi_omp [OPCIONES]\n\n"); printf("Options:\n"); printf(" -h --help Imprime este mensaje\n"); printf(" -c --convergencia C Elige límite de convergencia\n"); printf(" -i --iteraciones I Elige numero máximo de iteraciones\n"); printf(" -n --norden N Elige el orden de la matriz (n)\n"); printf(" -s --seed S Elige el numero del seed\n"); printf(" -v --verbose V Elige nivel de verbosidad\n"); printf(" -t --threads T Elige numero de hilos\n"); printf("\n"); exit(0); } else { printf("Argumento no reconocido '%s' (prueba '--help')\n", argv[i]); exit(1); } } }
GB_unop__tgamma_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tgamma_fp64_fp64) // op(A') function: GB (_unop_tran__tgamma_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = tgamma (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tgamma (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = tgamma (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TGAMMA || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tgamma_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = tgamma (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = tgamma (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tgamma_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
diffusion_grid.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_DIFFUSION_GRID_H_ #define CORE_DIFFUSION_GRID_H_ #include <assert.h> #include <algorithm> #include <array> #include <cmath> #include <functional> #include <iostream> #include <string> #include <vector> #include "core/util/root.h" #include "core/container/math_array.h" #include "core/container/parallel_resize_vector.h" #include "core/param/param.h" #include "core/simulation.h" #include "core/util/log.h" #include "core/util/math.h" namespace bdm { /// A class that computes the diffusion of extracellular substances /// It maintains the concentration and gradient of a single substance class DiffusionGrid { public: explicit DiffusionGrid(TRootIOCtor* p) {} DiffusionGrid(int substance_id, std::string substance_name, double dc, double mu, int resolution = 11, unsigned int diffusion_step = 1) : substance_(substance_id), substance_name_(substance_name), dc_({{1 - dc, dc / 6, dc / 6, dc / 6, dc / 6, dc / 6, dc / 6}}), mu_(mu), resolution_(resolution), diffusion_step_(diffusion_step) {} virtual ~DiffusionGrid() {} /// @brief Initializes the grid by calculating the grid dimensions /// and number of boxes along the axis from the input arguments /// /// @param[in] grid_dimensions The grid dimensions /// @param[in] box_length The box length /// void Initialize(const std::array<int32_t, 6>& grid_dimensions) { // Get grid properties from neighbor grid grid_dimensions_ = grid_dimensions; assert(resolution_ > 0 && "The resolution cannot be zero!"); num_boxes_axis_[0] = resolution_; num_boxes_axis_[1] = resolution_; num_boxes_axis_[2] = resolution_; // Example: diffusion grid dimensions from 0-40 and resolution // of 4. Resolution must be adjusted otherwise one data pointer will be // missing. // Without adjustment: // box_length_: 10 // data points {0, 10, 20, 30} - 40 will be misssing! // With adjustment // box_length_: 13.3 // data points: {0, 13.3, 26.6, 39.9} box_length_ = (grid_dimensions_[1] - grid_dimensions_[0]) / static_cast<double>(resolution_ - 1); ParametersCheck(); box_volume_ = box_length_ * box_length_ * box_length_; assert(box_length_ > 0 && "Box length of diffusion grid must be greater than zero!"); // Set the parity of the number of boxes along the dimensions (since all // dimensions are the same, we just take the x-axis here) parity_ = num_boxes_axis_[0] % 2; total_num_boxes_ = num_boxes_axis_[0] * num_boxes_axis_[1] * num_boxes_axis_[2]; // Allocate memory for the concentration and gradient arrays c1_.resize(total_num_boxes_); c2_.resize(total_num_boxes_); gradients_.resize(3 * total_num_boxes_); // If we are utilising the Runge-Kutta method we need to resize an // additional vector, this will be used in estimating the concentration // between diffsuion steps. auto* param = Simulation::GetActive()->GetParam(); if (param->diffusion_type == "RK") { r1_.resize(total_num_boxes_); } initialized_ = true; } void ParametersCheck() { if (((1 - dc_[0]) * dt_) / (box_length_ * box_length_) >= (1.0 / 6)) { Log::Fatal( "DiffusionGrid", "The specified parameters of the diffusion grid with substance [", substance_name_, "] will result in unphysical behavior (diffusion coefficient = ", (1 - dc_[0]), ", resolution = ", resolution_, "). Please refer to the user guide for more information."); } else if (diffusion_step_ == 0) { Log::Fatal("DiffusionGrid", " The specified amount of diffusion steps for the grid with " "substance [", substance_name_, "] is not greater than or equal to 1, " "correct this and run the simulation again."); } } void RunInitializers() { assert(num_boxes_axis_[0] > 0 && "The number of boxes along an axis was found to be zero!"); if (initializers_.empty()) { return; } auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; // Apply all functions that initialize this diffusion grid for (size_t f = 0; f < initializers_.size(); f++) { for (size_t x = 0; x < nx; x++) { double real_x = grid_dimensions_[0] + x * box_length_; for (size_t y = 0; y < ny; y++) { double real_y = grid_dimensions_[2] + y * box_length_; for (size_t z = 0; z < nz; z++) { double real_z = grid_dimensions_[4] + z * box_length_; std::array<uint32_t, 3> box_coord; box_coord[0] = x; box_coord[1] = y; box_coord[2] = z; size_t idx = GetBoxIndex(box_coord); ChangeConcentrationBy(idx, initializers_[f](real_x, real_y, real_z)); } } } } // Clear the initializer to free up space initializers_.clear(); initializers_.shrink_to_fit(); } /// @brief Updates the grid dimensions, based on the given threshold /// values. The diffusion grid dimensions need always be larger /// than the neighbor grid dimensions, so that each simulation /// object can obtain its local concentration / gradient /// /// @param[in] threshold_dimensions The threshold values /// void Update(const std::array<int32_t, 2>& threshold_dimensions) { // Update the grid dimensions such that each dimension ranges from // {treshold_dimensions[0] - treshold_dimensions[1]} auto min_gd = threshold_dimensions[0]; auto max_gd = threshold_dimensions[1]; grid_dimensions_ = {min_gd, max_gd, min_gd, max_gd, min_gd, max_gd}; // If the grid is not perfectly divisible along each dimension by the // box length, extend the grid so that it is int dimension_length = max_gd - min_gd; for (int i = 0; i < 3; i++) { int r = fmod(dimension_length, box_length_); if (r > 1e-9) { // std::abs for the case that box_length_ > dimension_length grid_dimensions_[2 * i + 1] += (box_length_ - r); } } // Calculate by how many boxes each dimension has grown int new_dimension_length = grid_dimensions_[1] - grid_dimensions_[0]; int new_num_boxes = std::ceil(new_dimension_length / box_length_); int growth = new_num_boxes - num_boxes_axis_[0]; if (growth > 0) { // Store the old number of boxes along each axis for comparison std::array<size_t, 3> tmp_num_boxes_axis = num_boxes_axis_; // Increase number of boxes along axis accordingly num_boxes_axis_[0] += growth; num_boxes_axis_[1] += growth; num_boxes_axis_[2] += growth; // We need to maintain the parity of the number of boxes along each // dimension, otherwise copying of the substances to the increases grid // will not be symmetrically done; resulting in shifting of boxes // We add a box in the negative direction, because the only way the parity // could have changed is because of adding a box in the positive direction // (due to the grid not being perfectly divisible; see above) if (num_boxes_axis_[0] % 2 != parity_) { for (int i = 0; i < 3; i++) { grid_dimensions_[2 * i] -= box_length_; num_boxes_axis_[i]++; } } // Temporarily save previous grid data auto tmp_c1 = c1_; auto tmp_gradients = gradients_; c1_.clear(); c2_.clear(); gradients_.clear(); total_num_boxes_ = num_boxes_axis_[0] * num_boxes_axis_[1] * num_boxes_axis_[2]; CopyOldData(tmp_c1, tmp_gradients, tmp_num_boxes_axis); assert(total_num_boxes_ >= tmp_num_boxes_axis[0] * tmp_num_boxes_axis[1] * tmp_num_boxes_axis[2] && "The diffusion grid tried to shrink! It can only become larger"); } // If we are utilising the Runge-Kutta method we need to resize an // additional vector, this will be used in estimating the concentration // between diffsuion steps. auto* param = Simulation::GetActive()->GetParam(); if (param->diffusion_type == "RK") { r1_.resize(total_num_boxes_); } } /// Copies the concentration and gradients values to the new /// (larger) grid. In the 2D case it looks like the following: /// /// [0 0 0 0] /// [v1 v2] --> [0 v1 v2 0] /// [v3 v4] --> [0 v3 v4 0] /// [0 0 0 0] /// /// The dimensions are doubled in this case from 2x2 to 4x4 /// If the dimensions would be increased from 2x2 to 3x3, it will still /// be increased to 4x4 in order for GetBoxIndex to function correctly /// void CopyOldData(const ParallelResizeVector<double>& old_c1, const ParallelResizeVector<double>& old_gradients, const std::array<size_t, 3>& old_num_boxes_axis) { // Allocate more memory for the grid data arrays c1_.resize(total_num_boxes_); c2_.resize(total_num_boxes_); gradients_.resize(3 * total_num_boxes_); auto incr_dim_x = num_boxes_axis_[0] - old_num_boxes_axis[0]; auto incr_dim_y = num_boxes_axis_[1] - old_num_boxes_axis[1]; auto incr_dim_z = num_boxes_axis_[2] - old_num_boxes_axis[2]; int off_x = incr_dim_x / 2; int off_y = incr_dim_y / 2; int off_z = incr_dim_z / 2; int num_box_xy = num_boxes_axis_[0] * num_boxes_axis_[1]; int old_box_xy = old_num_boxes_axis[0] * old_num_boxes_axis[1]; int new_origin = off_z * (num_boxes_axis_[0] * num_boxes_axis_[1]) + off_y * num_boxes_axis_[0] + off_x; for (size_t k = 0; k < old_num_boxes_axis[2]; k++) { int offset = new_origin + k * num_box_xy; for (size_t j = 0; j < old_num_boxes_axis[1]; j++) { if (j != 0) { offset += num_boxes_axis_[0]; } for (size_t i = 0; i < old_num_boxes_axis[0]; i++) { auto idx = k * old_box_xy + j * old_num_boxes_axis[0] + i; c1_[offset + i] = old_c1[idx]; gradients_[3 * (offset + i)] = old_gradients[3 * idx]; gradients_[3 * (offset + i) + 1] = old_gradients[3 * idx + 1]; gradients_[3 * (offset + i) + 2] = old_gradients[3 * idx + 2]; } } } } /// Solves a 5-point stencil diffusion equation, with leaking-edge /// boundary conditions. Substances are allowed to leave the simulation /// space. This prevents building up concentration at the edges /// void DiffuseWithLeakingEdge() { int nx = num_boxes_axis_[0]; int ny = num_boxes_axis_[1]; int nz = num_boxes_axis_[2]; #define YBF 16 #pragma omp parallel for collapse(2) for (int yy = 0; yy < ny; yy += YBF) { for (int z = 0; z < nz; z++) { // To let the edges bleed we set some diffusion coefficients // to zero. This prevents substance building up at the edges auto dc_2_ = dc_; int ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (int y = yy; y < ymax; y++) { dc_2_ = dc_; int x; int c, n, s, b, t; x = 0; c = x + y * nx + z * nx * ny; if (y == 0) { n = c; dc_2_[4] = 0; } else { n = c - nx; } if (y == (ny - 1)) { s = c; dc_2_[3] = 0; } else { s = c + nx; } if (z == 0) { b = c; dc_2_[5] = 0; } else { b = c - nx * ny; } if (z == (nz - 1)) { t = c; dc_2_[6] = 0; } else { t = c + nx * ny; } // x = 0; we leak out substances past this edge (so multiply by 0) c2_[c] = (dc_2_[0] * c1_[c] + 0 * c1_[c] + dc_2_[2] * c1_[c + 1] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_2_[0] * c1_[c] + dc_2_[1] * c1_[c - 1] + dc_2_[2] * c1_[c + 1] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; // x = nx-1; we leak out substances past this edge (so multiply by 0) c2_[c] = (dc_2_[0] * c1_[c] + dc_2_[1] * c1_[c - 1] + 0 * c1_[c] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } /// Solves a 5-point stencil diffusion equation, with closed-edge /// boundary conditions. Substances are not allowed to leave the simulation /// space. Keep in mind that the concentration can build up at the edges /// void DiffuseWithClosedEdge() { auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x; int c, n, s, b, t; x = 0; c = x + y * nx + z * nx * ny; n = (y == 0) ? c : c - nx; s = (y == ny - 1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz - 1) ? c : c + nx * ny; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c] + dc_[2] * c1_[c + 1] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c - 1] + dc_[2] * c1_[c + 1] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c - 1] + dc_[2] * c1_[c] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void DiffuseEuler() { // check if diffusion coefficient and decay constant are 0 // i.e. if we don't need to calculate diffusion update if (IsFixedSubstance()) { return; } const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; if (y == 0 || y == (ny - 1) || z == 0 || z == (nz - 1)) { continue; } n = c - nx; s = c + nx; b = c - nx * ny; t = c + nx * ny; c2_[c] = (c1_[c] + d * dt_ * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt_ * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt_ * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void DiffuseEulerLeakingEdge() { // check if diffusion coefficient and decay constant are 0 // i.e. if we don't need to calculate diffusion update if (IsFixedSubstance()) { return; } const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; std::array<int, 4> l; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; l.fill(1); if (y == 0) { n = c; l[0] = 0; } else { n = c - nx; } if (y == ny - 1) { s = c; l[1] = 0; } else { s = c + nx; } if (z == 0) { b = c; l[2] = 0; } else { b = c - nx * ny; } if (z == nz - 1) { t = c; l[3] = 0; } else { t = c + nx * ny; } c2_[c] = (c1_[c] + d * dt_ * (0 - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt_ * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt_ * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (c1_[c] + d * dt_ * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt_ * (l[0] * c1_[s] - 2 * c1_[c] + l[1] * c1_[n]) * ibl2 + d * dt_ * (l[2] * c1_[b] - 2 * c1_[c] + l[3] * c1_[t]) * ibl2) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; c2_[c] = (c1_[c] + d * dt_ * (c1_[c - 1] - 2 * c1_[c] + 0) * ibl2 + d * dt_ * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt_ * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void RK() { // check if diffusion coefficient and decay constant are 0 // i.e. if we don't need to calculate diffusion update if (IsFixedSubstance()) { return; } const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; double step = diffusion_step_; double h = dt_ / step; #define YBF 16 for (size_t i = 0; i < step; i++) { for (size_t order = 0; order < 2; order++) { #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; if (y == 0 || y == (ny - 1) || z == 0 || z == (nz - 1)) { continue; } n = c - nx; s = c + nx; b = c - nx * ny; t = c + nx * ny; double h2 = h / 2.0; if (order == 0) { k_[0] = (d * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2); r1_[c] = c1_[c] + (k_[0] * h2); } else if (order == 1) { k_[1] = (d * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2); c2_[c] = c1_[c] + (k_[1] * h); } } ++c; ++n; ++s; ++b; ++t; } // tile ny } // tile nz } // block ny } c1_.swap(c2_); } } void RKLeaking() { // check if diffusion coefficient and decay constant are 0 // i.e. if we don't need to calculate diffusion update if (IsFixedSubstance()) { return; } const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; std::array<int, 6> l; double step = diffusion_step_; double h = dt_ / step; #define YBF 16 for (size_t i = 0; i < step; i++) { for (size_t order = 0; order < 2; order++) { #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, cm, cp, n, s, b, t; c = x + y * nx + z * nx * ny; l.fill(1); if (y == 0) { n = c; l[2] = 0; } else { n = c - nx; } if (y == ny - 1) { s = c; l[3] = 0; } else { s = c + nx; } if (z == 0) { b = c; l[4] = 0; } else { b = c - nx * ny; } if (z == nz - 1) { t = c; l[5] = 0; } else { t = c + nx * ny; } #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; if (x == 0) { cm = c; l[0] = 0; } else { cm = c - 1; } if (y == ny - 1) { cp = c; l[1] = 0; } else { cp = c + 1; } double h2 = h / 2.0; if (order == 0) { k_[0] = d * (l[0] * c1_[cm] - 2 * c1_[c] + l[1] * c1_[cp]) * ibl2 + d * (l[2] * c1_[s] - 2 * c1_[c] + l[3] * c1_[n]) * ibl2 + d * (l[4] * c1_[b] - 2 * c1_[c] + l[5] * c1_[t]) * ibl2; r1_[c] = c1_[c] + (k_[0] * h2); } else if (order == 1) { k_[1] = d * (l[0] * c1_[cm] - 2 * c1_[c] + l[1] * c1_[cp]) * ibl2 + d * (l[2] * c1_[s] - 2 * c1_[c] + l[3] * c1_[n]) * ibl2 + d * (l[4] * c1_[b] - 2 * c1_[c] + l[5] * c1_[t]) * ibl2; c2_[c] = c1_[c] + (k_[1] * h); } } ++c; ++n; ++s; ++b; ++t; } // tile ny } // tile nz } // block ny } c1_.swap(c2_); } } /// Calculates the gradient for each box in the diffusion grid. /// The gradient is calculated in each direction (x, y, z) as following: /// /// c(x + box_length_) - c(x - box_length) / (2 * box_length_), /// /// where c(x) implies the concentration at position x /// /// At the edges the gradient is the same as the box next to it void CalculateGradient() { // check if gradient has been calculated once // and if diffusion coefficient and decay constant are 0 // i.e. if we don't need to calculate gradient update if (init_gradient_ && IsFixedSubstance()) { return; } double gd = 1 / (box_length_ * 2); auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; #pragma omp parallel for collapse(2) for (size_t z = 0; z < nz; z++) { for (size_t y = 0; y < ny; y++) { for (size_t x = 0; x < nx; x++) { int c, e, w, n, s, b, t; c = x + y * nx + z * nx * ny; if (x == 0) { e = c; w = c + 2; } else if (x == nx - 1) { e = c - 2; w = c; } else { e = c - 1; w = c + 1; } if (y == 0) { n = c + 2 * nx; s = c; } else if (y == ny - 1) { n = c; s = c - 2 * nx; } else { n = c + nx; s = c - nx; } if (z == 0) { t = c + 2 * nx * ny; b = c; } else if (z == nz - 1) { t = c; b = c - 2 * nx * ny; } else { t = c + nx * ny; b = c - nx * ny; } // Let the gradient point from low to high concentration gradients_[3 * c + 0] = (c1_[w] - c1_[e]) * gd; gradients_[3 * c + 1] = (c1_[n] - c1_[s]) * gd; gradients_[3 * c + 2] = (c1_[t] - c1_[b]) * gd; } } } if (!init_gradient_) { init_gradient_ = true; } } /// Increase the concentration at specified position with specified amount virtual void ChangeConcentrationBy(const Double3& position, double amount) { auto idx = GetBoxIndex(position); ChangeConcentrationBy(idx, amount); } /// Increase the concentration at specified box with specified amount void ChangeConcentrationBy(size_t idx, double amount) { assert(idx < total_num_boxes_ && "Cell position is out of diffusion grid bounds"); c1_[idx] += amount; if (c1_[idx] > concentration_threshold_) { c1_[idx] = concentration_threshold_; } } /// Get the concentration at specified position double GetConcentration(const Double3& position) const { return c1_[GetBoxIndex(position)]; } /// Get the (normalized) gradient at specified position virtual void GetGradient(const Double3& position, Double3* gradient) const { auto idx = GetBoxIndex(position); assert(idx < total_num_boxes_ && "Cell position is out of diffusion grid bounds"); (*gradient)[0] = gradients_[3 * idx]; (*gradient)[1] = gradients_[3 * idx + 1]; (*gradient)[2] = gradients_[3 * idx + 2]; auto norm = std::sqrt((*gradient)[0] * (*gradient)[0] + (*gradient)[1] * (*gradient)[1] + (*gradient)[2] * (*gradient)[2]); if (norm > 1e-10) { (*gradient)[0] /= norm; (*gradient)[1] /= norm; (*gradient)[2] /= norm; } } std::array<uint32_t, 3> GetBoxCoordinates(const Double3& position) const { std::array<uint32_t, 3> box_coord; box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_; box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_; box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_; return box_coord; } size_t GetBoxIndex(const std::array<uint32_t, 3>& box_coord) const { size_t ret = box_coord[2] * num_boxes_axis_[0] * num_boxes_axis_[1] + box_coord[1] * num_boxes_axis_[0] + box_coord[0]; return ret; } /// Calculates the box index of the substance at specified position size_t GetBoxIndex(const Double3& position) const { auto box_coord = GetBoxCoordinates(position); return GetBoxIndex(box_coord); } void SetDiffusionSteps(int diffusion_step) { diffusion_step_ = diffusion_step; } void SetDecayConstant(double mu) { mu_ = mu; } void SetConcentrationThreshold(double t) { concentration_threshold_ = t; } double GetConcentrationThreshold() const { return concentration_threshold_; } const double* GetAllConcentrations() const { return c1_.data(); } const double* GetAllGradients() const { return gradients_.data(); } const std::array<size_t, 3>& GetNumBoxesArray() const { return num_boxes_axis_; } size_t GetNumBoxes() const { return total_num_boxes_; } double GetBoxLength() const { return box_length_; } int GetSubstanceId() const { return substance_; } const std::string& GetSubstanceName() const { return substance_name_; } double GetDecayConstant() const { return mu_; } const int32_t* GetDimensionsPtr() const { return grid_dimensions_.data(); } const std::array<int32_t, 6>& GetDimensions() const { return grid_dimensions_; } std::array<int32_t, 3> GetGridSize() const { std::array<int32_t, 3> ret; ret[0] = grid_dimensions_[1] - grid_dimensions_[0]; ret[1] = grid_dimensions_[3] - grid_dimensions_[2]; ret[2] = grid_dimensions_[5] - grid_dimensions_[4]; return ret; } const std::array<double, 7>& GetDiffusionCoefficients() const { return dc_; } bool IsInitialized() const { return initialized_; } int GetResolution() const { return resolution_; } double GetBoxVolume() const { return box_volume_; } template <typename F> void AddInitializer(F function) { initializers_.push_back(function); } // retrun true if substance concentration and gradient don't evolve over time bool IsFixedSubstance() { return (mu_ == 0 && dc_[1] == 0 && dc_[2] == 0 && dc_[3] == 0 && dc_[4] == 0 && dc_[5] == 0 && dc_[6] == 0); } private: /// The id of the substance of this grid int substance_ = 0; /// The name of the substance of this grid std::string substance_name_ = ""; /// The side length of each box double box_length_ = 0; /// the volume of each box double box_volume_ = 0; /// The array of concentration values ParallelResizeVector<double> c1_ = {}; /// An extra concentration data buffer for faster value updating ParallelResizeVector<double> c2_ = {}; /// Buffers for Runge Kutta ParallelResizeVector<double> r1_ = {}; /// k array for runge-kutta. std::array<double, 2> k_ = {}; /// The array of gradients (x, y, z) ParallelResizeVector<double> gradients_ = {}; /// The maximum concentration value that a box can have double concentration_threshold_ = 1e15; /// The diffusion coefficients [cc, cw, ce, cs, cn, cb, ct] std::array<double, 7> dc_ = {{0}}; /// The timestep resolution fhe diffusion grid // TODO(ahmad): this probably needs to scale with Param::simulation_timestep double dt_ = 1.0; /// The decay constant double mu_ = 0; /// The grid dimensions of the diffusion grid std::array<int32_t, 6> grid_dimensions_ = {{0}}; /// The number of boxes at each axis [x, y, z] std::array<size_t, 3> num_boxes_axis_ = {{0}}; /// The total number of boxes in the diffusion grid size_t total_num_boxes_ = 0; /// Flag to determine if this grid has been initialized bool initialized_ = false; /// The resolution of the diffusion grid int resolution_ = 0; /// Number of steps for RK diffusion grid; unsigned int diffusion_step_ = 1; /// If false, grid dimensions are even; if true, they are odd bool parity_ = false; /// A list of functions that initialize this diffusion grid /// ROOT currently doesn't support IO of std::function std::vector<std::function<double(double, double, double)>> initializers_ = {}; //! // turn to true after gradient initialization bool init_gradient_ = false; BDM_CLASS_DEF_NV(DiffusionGrid, 1); }; } // namespace bdm #endif // CORE_DIFFUSION_GRID_H_
GB_unaryop__ainv_bool_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_int16 // op(A') function: GB_tran__ainv_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_int16 ( bool *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_zlanhe.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ __attribute__((weak)) void plasma_core_zlanhe(plasma_enum_t norm, plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *work, double *value) { *value = LAPACKE_zlanhe_work(LAPACK_COL_MAJOR, lapack_const(norm), lapack_const(uplo), n, A, lda, work); } /******************************************************************************/ void plasma_core_omp_zlanhe(plasma_enum_t norm, plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) plasma_core_zlanhe(norm, uplo, n, A, lda, work, value); } } /******************************************************************************/ void plasma_core_omp_zlanhe_aux(plasma_enum_t norm, plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { switch (norm) { case PlasmaOneNorm: case PlasmaInfNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:n]) { if (sequence->status == PlasmaSuccess) { if (uplo == PlasmaUpper) { for (int i = 0; i < n; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { for (int i = 0; i < j; i++) { value[i] += cabs(A[lda*j+i]); value[j] += cabs(A[lda*j+i]); } value[j] += fabs(creal(A[lda*j+j])); } } else { // PlasmaLower for (int i = 0; i < n; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { value[j] += fabs(creal(A[lda*j+j])); for (int i = j+1; i < n; i++) { value[i] += cabs(A[lda*j+i]); value[j] += cabs(A[lda*j+i]); } } } } } break; } }
nested.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt #include "callback.h" #include <omp.h> #include <unistd.h> int main() { omp_set_nested(1); print_frame(0); #pragma omp parallel num_threads(4) { print_frame(1); print_ids(0); print_ids(1); print_frame(0); //get all implicit task events before starting nested: #pragma omp barrier #pragma omp parallel num_threads(4) { print_frame(1); print_ids(0); print_ids(1); print_ids(2); print_frame(0); sleep(1); #pragma omp barrier print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_barrier_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_barrier_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_wait_barrier_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_wait_barrier_end' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end! // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}0: NULL_POINTER=[[NULL:.*$]] // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[MAIN_REENTER]], parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // nested parallel masters // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: __builtin_frame_address(1)=[[EXIT:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // THREADS: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=[[REENTER]], parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_PARALLEL_FUNCTION:0x[0-f]+]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: __builtin_frame_address(1)=[[NESTED_EXIT:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_EXIT]], reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[REENTER]] // THREADS: {{^}}[[MASTER_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // THREADS: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[NESTED_REENTER:0x[0-f]+]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // explicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_EXIT]], reenter_frame=[[NESTED_REENTER]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_EXIT]], reenter_frame=[[NULL]] // implicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // implicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // nested parallel worker threads // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
pbkdf2-hmac-md5_fmt_plug.c
/* * This software is Copyright (c) 2015 Dhiru and magnum * and it is hereby released to * the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_md5; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_md5); #else #include <ctype.h> #include <string.h> #include <assert.h> #include "arch.h" //#undef SIMD_COEF_32 #include "misc.h" #include "common.h" #include "formats.h" #include "stdint.h" #include "pbkdf2_hmac_md5.h" #include "pbkdf2_hmac_common.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "PBKDF2-HMAC-MD5" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-MD5 " MD5_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-MD5 32/" ARCH_BITS_STR #endif #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(ARCH_WORD_32) #if SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define PLAINTEXT_LENGTH 125 static struct custom_salt { unsigned int length; unsigned int rounds; char salt[PBKDF2_32_MAX_SALT_SIZE]; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[PBKDF2_MDx_BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; memset(&cs, 0, sizeof(cs)); if (!strncmp(ciphertext, PBKDF2_MD5_FORMAT_TAG, PBKDF2_MD5_TAG_LEN)) ciphertext += PBKDF2_MD5_TAG_LEN; cs.rounds = atoi(ciphertext); ciphertext = strchr(ciphertext, '$') + 1; p = strchr(ciphertext, '$'); saltlen = 0; memset(cs.salt, 0, sizeof(cs.salt)); while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void*)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #if SIMD_COEF_32 int lens[SSE_GROUP_SZ_MD5], i; unsigned char *pin[SSE_GROUP_SZ_MD5]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_MD5]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_MD5; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_md5_sse((const unsigned char **)pin, lens, (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_MDx_BINARY_SIZE, 0); #else pbkdf2_md5((unsigned char*)(saved_key[index]), strlen(saved_key[index]), (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_MDx_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; //dump_stuff_msg("\nbinary", crypt_out[count - 1], 16); return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_MDx_BINARY_SIZE); } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int cmp_exact(char *source, int index) { return pbkdf2_hmac_md5_cmp_exact(get_key(index), source, (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_md5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_MDx_BINARY_SIZE, PBKDF2_32_BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, pbkdf2_hmac_md5_common_tests }, { init, done, fmt_default_reset, fmt_default_prepare, pbkdf2_hmac_md5_valid, pbkdf2_hmac_md5_split, pbkdf2_hmac_md5_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
archive_blake2sp_ref.c
/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves <[email protected]>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "archive_blake2.h" #include "archive_blake2_impl.h" #define PARALLELISM_DEGREE 8 /* blake2sp_init_param defaults to setting the expecting output length from the digest_length parameter block field. In some cases, however, we do not want this, as the output length of these instances is given by inner_length instead. */ static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P ) { int err = blake2s_init_param(S, P); S->outlen = P->inner_length; return err; } static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint32_t offset ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store32( &P->node_offset, offset ); store16( &P->xof_length, 0 ); P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2sp_init_leaf_param( S, P ); } static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store32( &P->node_offset, 0 ); store16( &P->xof_length, 0 ); P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2s_init_param( S, P ); } int blake2sp_init( blake2sp_state *S, size_t outlen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, 0, (uint32_t)i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; size_t i; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2sp_final( blake2sp_state *S, void *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; size_t i; if(out == NULL || outlen < S->outlen) { return -1; } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2S_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES; if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES; blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left ); } blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES ); } for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( S->R, out, S->outlen ); } int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; size_t i; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( keylen > BLAKE2S_KEYBYTES ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */ if( keylen > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > i * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[i], in__, len ); } blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( FS, out, outlen ); } #if defined(BLAKE2SP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( void ) { uint8_t key[BLAKE2S_KEYBYTES]; uint8_t buf[BLAKE2_KAT_LENGTH]; size_t i, step; for( i = 0; i < BLAKE2S_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; /* Test simple API */ for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES ); if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) ) { goto fail; } } /* Test streaming API */ for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) { for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp_state S; uint8_t * p = buf; size_t mlen = i; int err = 0; if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) { goto fail; } while (mlen >= step) { if ( (err = blake2sp_update(&S, p, step)) < 0 ) { goto fail; } mlen -= step; p += step; } if ( (err = blake2sp_update(&S, p, mlen)) < 0) { goto fail; } if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) { goto fail; } if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) { goto fail; } } } puts( "ok" ); return 0; fail: puts("error"); return -1; } #endif
ConvolutionRules.h
// Copyright 2016-present, Facebook, Inc. // All rights reserved. // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #ifndef CONVOLUTIONRULES_H #define CONVOLUTIONRULES_H #include "RectangularRegions.h" template <Int dimension> void Convolution_InputSgToRulesAndOutputSg(SparseGrid<dimension> &inputGrid, SparseGrid<dimension> &outputGrid, RuleBook &rules, long *size, long *stride, long *inputSpatialSize, long *outputSpatialSize) { rules.resize(volume<dimension>(size)); for (auto const &inIter : inputGrid.mp) { auto outRegion = OutputRegionCalculator<dimension>( inIter.first, size, stride, outputSpatialSize); for (auto j : outRegion) { auto inRegion = InputRegionCalculator<dimension>(j, size, stride); Int rulesOffset = inRegion.offset(inIter.first); auto mapVal = outputGrid.mp.insert(std::make_pair(j, 0)); if (mapVal.second) { mapVal.first->second = outputGrid.ctr++; } rules[rulesOffset].push_back(inIter.second + inputGrid.ctr); rules[rulesOffset].push_back(mapVal.first->second); } } } template <Int dimension> Int Convolution_InputSgsToRulesAndOutputSgs(SparseGrids<dimension> &input_SGs, SparseGrids<dimension> &output_SGs, RuleBook &rules, long *filterSize, long *filterStride, long *input_spatialSize, long *output_spatialSize) { rules.clear(); output_SGs.clear(); Int batchSize = input_SGs.size(); output_SGs.resize(batchSize); Int output_nActive = 0; for (Int i = 0; i < batchSize; i++) { auto &iSG = input_SGs[i]; auto &oSG = output_SGs[i]; oSG.ctr = output_nActive; Convolution_InputSgToRulesAndOutputSg<dimension>( iSG, oSG, rules, filterSize, filterStride, input_spatialSize, output_spatialSize); output_nActive = oSG.ctr; oSG.ctr = 0; } return output_nActive; } template <Int dimension> Int Convolution_InputSgsToRulesAndOutputSgs_OMP( SparseGrids<dimension> &input_SGs, SparseGrids<dimension> &output_SGs, RuleBook &rules, long *filterSize, long *filterStride, long *input_spatialSize, long *output_spatialSize) { rules.clear(); rules.resize(volume<dimension>(filterSize)); output_SGs.clear(); Int batchSize = input_SGs.size(); output_SGs.resize(batchSize); std::vector<RuleBook> rbs(batchSize); { Int i; #pragma omp parallel for private(i) for (i = 0; i < batchSize; i++) Convolution_InputSgToRulesAndOutputSg<dimension>( input_SGs[i], output_SGs[i], rbs[i], filterSize, filterStride, input_spatialSize, output_spatialSize); } Int output_nActive = 0; for (Int i = 0; i < batchSize; i++) { // Parallel assignment: // output_nActive <- output_nActive+output_SGs[i].ctr // output_SGs[i].ctr <- output_nActive Int tmp = output_nActive; output_nActive += output_SGs[i].ctr; output_SGs[i].ctr = tmp; } { Int i; #pragma omp parallel for private(i) for (i = 0; i < (Int)rules.size(); i++) { auto &R = rules[i]; for (Int j = 0; j < batchSize; j++) { auto &r = rbs[j][i]; auto offset = output_SGs[j].ctr; for (Int k = 0; k < (Int)r.size();) { R.push_back(r[k++]); R.push_back(r[k++] + offset); } } } } return output_nActive; } // for each active site, list of (inputFeatureNumber,batchIdx, spatialOffset) // triples template <Int dimension> void SparseToDense_InputSgsToRulesAndOutputSgs( SparseGrids<dimension> &input_SGs, RuleBook &rules, long *spatialSize) { Int batchSize = input_SGs.size(); rules.clear(); rules.resize(batchSize); Point<dimension> lb, ub; for (Int i = 0; i < dimension; ++i) { lb[i] = 0; ub[i] = spatialSize[i] - 1; } auto region = RectangularRegion<dimension>(lb, ub); for (Int batchIdx = 0; batchIdx < batchSize; batchIdx++) { auto &iSG = input_SGs[batchIdx]; for (auto const &inIter : iSG.mp) { rules[batchIdx].push_back(inIter.second + iSG.ctr); rules[batchIdx].push_back(region.offset(inIter.first)); } } } template <Int dimension> void SparseToDense_InputSgsToRulesAndOutputSgs_OMP( SparseGrids<dimension> &input_SGs, RuleBook &rules, long *spatialSize) { Int batchSize = input_SGs.size(); rules.clear(); rules.resize(batchSize); Point<dimension> lb, ub; for (Int i = 0; i < dimension; ++i) { lb[i] = 0; ub[i] = spatialSize[i] - 1; } auto region = RectangularRegion<dimension>(lb, ub); Int batchIdx; #pragma omp parallel for private(batchIdx) for (batchIdx = 0; batchIdx < batchSize; batchIdx++) { auto &iSG = input_SGs[batchIdx]; for (auto const &inIter : iSG.mp) { rules[batchIdx].push_back(inIter.second + iSG.ctr); rules[batchIdx].push_back(region.offset(inIter.first)); } } } #endif /* CONVOLUTIONRULES_H */
mg.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - MG This benchmark is an OpenMP C version of the NPB MG code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: E. Barszcz P. Frederickson A. Woo M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: F. Conti --------------------------------------------------------------------*/ #include "../common/npb-C.h" #include "globals.h" /* parameters */ #define T_BENCH 1 #define T_INIT 2 /* global variables */ /* common /grid/ */ static int is1, is2, is3, ie1, ie2, ie3; /* functions prototypes */ static void setup(int *n1, int *n2, int *n3, int lt); static void mg3P(double ****u, double ***v, double ****r, double a[4], double c[4], int n1, int n2, int n3, int k); static void psinv( double ***r, double ***u, int n1, int n2, int n3, double c[4], int k); static void resid( double ***u, double ***v, double ***r, int n1, int n2, int n3, double a[4], int k ); static void rprj3( double ***r, int m1k, int m2k, int m3k, double ***s, int m1j, int m2j, int m3j, int k ); static void interp( double ***z, int mm1, int mm2, int mm3, double ***u, int n1, int n2, int n3, int k ); static void norm2u3(double ***r, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz); static void rep_nrm(double ***u, int n1, int n2, int n3, char *title, int kk); static void comm3(double ***u, int n1, int n2, int n3, int kk); static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k); static void showall(double ***z, int n1, int n2, int n3); static double power( double a, int n ); static void bubble( double ten[M][2], int j1[M][2], int j2[M][2], int j3[M][2], int m, int ind ); static void zero3(double ***z, int n1, int n2, int n3); static void nonzero(double ***z, int n1, int n2, int n3); /*-------------------------------------------------------------------- program mg c-------------------------------------------------------------------*/ int main(int argc, char *argv[]) { /*------------------------------------------------------------------------- c k is the current level. It is passed down through subroutine args c and is NOT global. it is the current iteration c------------------------------------------------------------------------*/ int k, it; double t, tinit, mflops; int nthreads = 1; /*------------------------------------------------------------------------- c These arrays are in common because they are quite large c and probably shouldn't be allocated on the stack. They c are always passed as subroutine args. c------------------------------------------------------------------------*/ double ****u, ***v, ****r; double a[4], c[4]; double rnm2, rnmu; double epsilon = 1.0e-8; int n1, n2, n3, nit; double verify_value; boolean verified; int i, j, l; FILE *fp; timer_clear(T_BENCH); timer_clear(T_INIT); timer_start(T_INIT); /*---------------------------------------------------------------------- c Read in and broadcast input data c---------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - MG Benchmark\n\n"); fp = fopen("mg.input", "r"); if (fp != NULL) { printf(" Reading from input file mg.input\n"); fscanf(fp, "%d", &lt); while(fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]); while(fgetc(fp) != '\n'); fscanf(fp, "%d", &nit); while(fgetc(fp) != '\n'); for (i = 0; i <= 7; i++) { fscanf(fp, "%d", &debug_vec[i]); } fclose(fp); } else { printf(" No input file. Using compiled defaults\n"); lt = LT_DEFAULT; nit = NIT_DEFAULT; nx[lt] = NX_DEFAULT; ny[lt] = NY_DEFAULT; nz[lt] = NZ_DEFAULT; #pragma omp parallel for for (i = 0; i <= 7; i++) { debug_vec[i] = DEBUG_DEFAULT; } } if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) { Class = 'U'; } else if( nx[lt] == 32 && nit == 4 ) { Class = 'S'; } else if( nx[lt] == 64 && nit == 40 ) { Class = 'W'; } else if( nx[lt] == 256 && nit == 20 ) { Class = 'B'; } else if( nx[lt] == 512 && nit == 20 ) { Class = 'C'; } else if( nx[lt] == 256 && nit == 4 ) { Class = 'A'; } else { Class = 'U'; } /*-------------------------------------------------------------------- c Use these for debug info: c--------------------------------------------------------------------- c debug_vec(0) = 1 !=> report all norms c debug_vec(1) = 1 !=> some setup information c debug_vec(1) = 2 !=> more setup information c debug_vec(2) = k => at level k or below, show result of resid c debug_vec(3) = k => at level k or below, show result of psinv c debug_vec(4) = k => at level k or below, show result of rprj c debug_vec(5) = k => at level k or below, show result of interp c debug_vec(6) = 1 => (unused) c debug_vec(7) = 1 => (unused) c-------------------------------------------------------------------*/ a[0] = -8.0/3.0; a[1] = 0.0; a[2] = 1.0/6.0; a[3] = 1.0/12.0; if (Class == 'A' || Class == 'S' || Class =='W') { /*-------------------------------------------------------------------- c Coefficients for the S(a) smoother c-------------------------------------------------------------------*/ c[0] = -3.0/8.0; c[1] = 1.0/32.0; c[2] = -1.0/64.0; c[3] = 0.0; } else { /*-------------------------------------------------------------------- c Coefficients for the S(b) smoother c-------------------------------------------------------------------*/ c[0] = -3.0/17.0; c[1] = 1.0/33.0; c[2] = -1.0/61.0; c[3] = 0.0; } lb = 1; setup(&n1,&n2,&n3,lt); u = (double ****)malloc((lt+1)*sizeof(double ***)); for (l = lt; l >=1; l--) { u[l] = (double ***)malloc(m3[l]*sizeof(double **)); for (k = 0; k < m3[l]; k++) { u[l][k] = (double **)malloc(m2[l]*sizeof(double *)); for (j = 0; j < m2[l]; j++) { u[l][k][j] = (double *)malloc(m1[l]*sizeof(double)); } } } v = (double ***)malloc(m3[lt]*sizeof(double **)); for (k = 0; k < m3[lt]; k++) { v[k] = (double **)malloc(m2[lt]*sizeof(double *)); for (j = 0; j < m2[lt]; j++) { v[k][j] = (double *)malloc(m1[lt]*sizeof(double)); } } r = (double ****)malloc((lt+1)*sizeof(double ***)); for (l = lt; l >=1; l--) { r[l] = (double ***)malloc(m3[l]*sizeof(double **)); for (k = 0; k < m3[l]; k++) { r[l][k] = (double **)malloc(m2[l]*sizeof(double *)); for (j = 0; j < m2[l]; j++) { r[l][k][j] = (double *)malloc(m1[l]*sizeof(double)); } } } zero3(u[lt],n1,n2,n3); zran3(v,n1,n2,n3,nx[lt],ny[lt],lt); norm2u3(v,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); /* printf("\n norms of random v are\n"); printf(" %4d%19.12e%19.12e\n", 0, rnm2, rnmu); printf(" about to evaluate resid, k= %d\n", lt);*/ printf(" Size: %3dx%3dx%3d (class %1c)\n", nx[lt], ny[lt], nz[lt], Class); printf(" Iterations: %3d\n", nit); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); /*c--------------------------------------------------------------------- c One iteration for startup c---------------------------------------------------------------------*/ mg3P(u,v,r,a,c,n1,n2,n3,lt); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); setup(&n1,&n2,&n3,lt); zero3(u[lt],n1,n2,n3); zran3(v,n1,n2,n3,nx[lt],ny[lt],lt); timer_stop(T_INIT); timer_start(T_BENCH); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); for ( it = 1; it <= nit; it++) { mg3P(u,v,r,a,c,n1,n2,n3,lt); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); } norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); { #if defined(_OPENMP) nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_BENCH); t = timer_read(T_BENCH); tinit = timer_read(T_INIT); verified = FALSE; verify_value = 0.0; printf(" Initialization time: %15.3f seconds\n", tinit); printf(" Benchmark completed\n"); if (Class != 'U') { if (Class == 'S') { verify_value = 0.530770700573e-04; } else if (Class == 'W') { verify_value = 0.250391406439e-17; /* 40 iterations*/ /* 0.183103168997d-044 iterations*/ } else if (Class == 'A') { verify_value = 0.2433365309e-5; } else if (Class == 'B') { verify_value = 0.180056440132e-5; } else if (Class == 'C') { verify_value = 0.570674826298e-06; } if ( fabs( rnm2 - verify_value ) <= epsilon ) { verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.12e\n", rnm2); printf(" Error is %20.12e\n", rnm2 - verify_value); } else { verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.12e\n", rnm2); printf(" The correct L2 Norm is %20.12e\n", verify_value); } } else { verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if ( t != 0.0 ) { int nn = nx[lt]*ny[lt]*nz[lt]; mflops = 58.*nit*nn*1.0e-6 / t; } else { mflops = 0.0; } c_print_results("MG", Class, nx[lt], ny[lt], nz[lt], nit, nthreads, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(int *n1, int *n2, int *n3, int lt) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int k; for ( k = lt-1; k >= 1; k--) { nx[k] = nx[k+1]/2; ny[k] = ny[k+1]/2; nz[k] = nz[k+1]/2; } #pragma omp parallel for firstprivate(lt ) for (k = 1; k <= lt; k++) { m1[k] = nx[k]+2; m2[k] = nz[k]+2; m3[k] = ny[k]+2; } is1 = 1; ie1 = nx[lt]; *n1 = nx[lt]+2; is2 = 1; ie2 = ny[lt]; *n2 = ny[lt]+2; is3 = 1; ie3 = nz[lt]; *n3 = nz[lt]+2; if (debug_vec[1] >= 1 ) { printf(" in setup, \n"); printf(" lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", lt,nx[lt],ny[lt],nz[lt],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void mg3P(double ****u, double ***v, double ****r, double a[4], double c[4], int n1, int n2, int n3, int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c multigrid V-cycle routine c-------------------------------------------------------------------*/ int j; /*-------------------------------------------------------------------- c down cycle. c restrict the residual from the find grid to the coarse c-------------------------------------------------------------------*/ for (k = lt; k >= lb+1; k--) { j = k-1; rprj3(r[k], m1[k], m2[k], m3[k], r[j], m1[j], m2[j], m3[j], k); } k = lb; /*-------------------------------------------------------------------- c compute an approximate solution on the coarsest grid c-------------------------------------------------------------------*/ zero3(u[k], m1[k], m2[k], m3[k]); psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k); for (k = lb+1; k <= lt-1; k++) { j = k-1; /*-------------------------------------------------------------------- c prolongate from level k-1 to k c-------------------------------------------------------------------*/ zero3(u[k], m1[k], m2[k], m3[k]); interp(u[j], m1[j], m2[j], m3[j], u[k], m1[k], m2[k], m3[k], k); /*-------------------------------------------------------------------- c compute residual for level k c-------------------------------------------------------------------*/ resid(u[k], r[k], r[k], m1[k], m2[k], m3[k], a, k); /*-------------------------------------------------------------------- c apply smoother c-------------------------------------------------------------------*/ psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k); } j = lt - 1; k = lt; interp(u[j], m1[j], m2[j], m3[j], u[lt], n1, n2, n3, k); resid(u[lt], v, r[lt], n1, n2, n3, a, k); psinv(r[lt], u[lt], n1, n2, n3, c, k); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void psinv( double ***r, double ***u, int n1, int n2, int n3, double c[4], int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c psinv applies an approximate inverse as smoother: u = u + Cr c c This implementation costs 15A + 4M per result, where c A and M denote the costs of Addition and Multiplication. c Presuming coefficient c(3) is zero (the NPB assumes this, c but it is thus not a general case), 2A + 1M may be eliminated, c resulting in 13A + 3M. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int i3, i2, i1; double r1[M], r2[M]; #pragma omp parallel for private(i1 ,i2 ,i3 ,r1 ,r2 ) for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { #pragma omp parallel for firstprivate(n1 ,r ,i2 ,i3 ) for (i1 = 0; i1 < n1; i1++) { r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1] + r[i3-1][i2][i1] + r[i3+1][i2][i1]; r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1] + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1]; } #pragma omp parallel for firstprivate(n1 ,c ,r ,u ,i2 ,i3 ) for (i1 = 1; i1 < n1-1; i1++) { u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] ); /*-------------------------------------------------------------------- c Assume c(3) = 0 (Enable line below if c(3) not= 0) c--------------------------------------------------------------------- c > + c(3) * ( r2(i1-1) + r2(i1+1) ) c-------------------------------------------------------------------*/ } } } /*-------------------------------------------------------------------- c exchange boundary points c-------------------------------------------------------------------*/ comm3(u,n1,n2,n3,k); if (debug_vec[0] >= 1 ) { rep_nrm(u,n1,n2,n3," psinv",k); } if ( debug_vec[3] >= k ) { showall(u,n1,n2,n3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void resid( double ***u, double ***v, double ***r, int n1, int n2, int n3, double a[4], int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c resid computes the residual: r = v - Au c c This implementation costs 15A + 4M per result, where c A and M denote the costs of Addition (or Subtraction) and c Multiplication, respectively. c Presuming coefficient a(1) is zero (the NPB assumes this, c but it is thus not a general case), 3A + 1M may be eliminated, c resulting in 12A + 3M. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int i3, i2, i1; double u1[M], u2[M]; #pragma omp parallel for private(i1 ,i2 ,i3 ,u1 ,u2 ) for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { #pragma omp parallel for firstprivate(n1 ,u ,i2 ,i3 ) for (i1 = 0; i1 < n1; i1++) { u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1] + u[i3-1][i2][i1] + u[i3+1][i2][i1]; u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1] + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1]; } for (i1 = 1; i1 < n1-1; i1++) { r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] /*-------------------------------------------------------------------- c Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0) c--------------------------------------------------------------------- c > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3) c > + u1(i1) ) c-------------------------------------------------------------------*/ - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] ) - a[3] * ( u2[i1-1] + u2[i1+1] ); } } } /*-------------------------------------------------------------------- c exchange boundary data c--------------------------------------------------------------------*/ comm3(r,n1,n2,n3,k); if (debug_vec[0] >= 1 ) { rep_nrm(r,n1,n2,n3," resid",k); } if ( debug_vec[2] >= k ) { showall(r,n1,n2,n3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void rprj3( double ***r, int m1k, int m2k, int m3k, double ***s, int m1j, int m2j, int m3j, int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c rprj3 projects onto the next coarser grid, c using a trilinear Finite Element projection: s = r' = P r c c This implementation costs 20A + 4M per result, where c A and M denote the costs of Addition and Multiplication. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int j3, j2, j1, i3, i2, i1, d1, d2, d3; double x1[M], y1[M], x2, y2; if (m1k == 3) { d1 = 2; } else { d1 = 1; } if (m2k == 3) { d2 = 2; } else { d2 = 1; } if (m3k == 3) { d3 = 2; } else { d3 = 1; } #pragma omp parallel for private(j1 ,j2 ,i1 ,i2 ,i3 ,x1 ,y1 ,x2 ,y2 ) for (j3 = 1; j3 < m3j-1; j3++) { i3 = 2*j3-d3; /*C i3 = 2*j3-1*/ for (j2 = 1; j2 < m2j-1; j2++) { i2 = 2*j2-d2; /*C i2 = 2*j2-1*/ #pragma omp parallel for firstprivate(m1j ,d2 ,d3 ,i1 ,d1 ,i2 ,i3 ,r ,j2 ,j3 ) for (j1 = 1; j1 < m1j; j1++) { i1 = 2*j1-d1; /*C i1 = 2*j1-1*/ x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1] + r[i3][i2+1][i1] + r[i3+2][i2+1][i1]; y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1] + r[i3][i2+2][i1] + r[i3+2][i2+2][i1]; } for (j1 = 1; j1 < m1j-1; j1++) { i1 = 2*j1-d1; /*C i1 = 2*j1-1*/ y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1] + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1]; x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1] + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1]; s[j3][j2][j1] = 0.5 * r[i3+1][i2+1][i1+1] + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2) + 0.125 * ( x1[i1] + x1[i1+2] + y2) + 0.0625 * ( y1[i1] + y1[i1+2] ); } } } comm3(s,m1j,m2j,m3j,k-1); if (debug_vec[0] >= 1 ) { rep_nrm(s,m1j,m2j,m3j," rprj3",k-1); } if (debug_vec[4] >= k ) { showall(s,m1j,m2j,m3j); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void interp( double ***z, int mm1, int mm2, int mm3, double ***u, int n1, int n2, int n3, int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c interp adds the trilinear interpolation of the correction c from the coarser grid to the current approximation: u = u + Qu' c c Observe that this implementation costs 16A + 4M, where c A and M denote the costs of Addition and Multiplication. c Note that this vectorizes, and is also fine for cache c based machines. Vector machines may get slightly better c performance however, with 8 separate "do i1" loops, rather than 4. c-------------------------------------------------------------------*/ int i3, i2, i1, d1, d2, d3, t1, t2, t3; /* c note that m = 1037 in globals.h but for this only need to be c 535 to handle up to 1024^3 c integer m c parameter( m=535 ) */ double z1[M], z2[M], z3[M]; if ( n1 != 3 && n2 != 3 && n3 != 3 ) { #pragma omp parallel for private(i1 ,i2 ,z1 ,z2 ,z3 ) for (i3 = 0; i3 < mm3-1; i3++) { for (i2 = 0; i2 < mm2-1; i2++) { #pragma omp parallel for firstprivate(mm1 ,z ,i2 ,i3 ) for (i1 = 0; i1 < mm1; i1++) { z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1]; z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1]; z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1]; } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1] +z[i3][i2][i1]; u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1] +0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]); } #pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1] +0.5 * z1[i1]; u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1] +0.25*( z1[i1] + z1[i1+1] ); } #pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1] +0.5 * z2[i1]; u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1] +0.25*( z2[i1] + z2[i1+1] ); } #pragma omp parallel for firstprivate(mm1 ,u ,i2 ,i3 ) for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1] +0.25* z3[i1]; u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1] +0.125*( z3[i1] + z3[i1+1] ); } } } } else { if (n1 == 3) { d1 = 2; t1 = 1; } else { d1 = 1; t1 = 0; } if (n2 == 3) { d2 = 2; t2 = 1; } else { d2 = 1; t2 = 0; } if (n3 == 3) { d3 = 2; t3 = 1; } else { d3 = 1; t3 = 0; } { #pragma omp parallel for private(i1 ,i2) for ( i3 = d3; i3 <= mm3-1; i3++) { for ( i2 = d2; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] +z[i3-1][i2-1][i1-1]; } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] +0.5*(z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]); } } for ( i2 = 1; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] +0.5*(z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] +0.25*(z[i3-1][i2][i1]+z[i3-1][i2-1][i1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } } } #pragma omp parallel for private(i1 ,i2) for ( i3 = 1; i3 <= mm3-1; i3++) { for ( i2 = d2; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] +0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] +0.25*(z[i3][i2-1][i1]+z[i3][i2-1][i1-1] +z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]); } } for ( i2 = 1; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] +0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] +0.125*(z[i3][i2][i1]+z[i3][i2-1][i1] +z[i3][i2][i1-1]+z[i3][i2-1][i1-1] +z[i3-1][i2][i1]+z[i3-1][i2-1][i1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } } } } }//end #pragma omp parallel if (debug_vec[0] >= 1 ) { rep_nrm(z,mm1,mm2,mm3,"z: inter",k-1); rep_nrm(u,n1,n2,n3,"u: inter",k); } if ( debug_vec[5] >= k ) { showall(z,mm1,mm2,mm3); showall(u,n1,n2,n3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void norm2u3(double ***r, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c norm2u3 evaluates approximations to the L2 norm and the c uniform (or L-infinity or Chebyshev) norm, under the c assumption that the boundaries are periodic or zero. Add the c boundaries in with half weight (quarter weight on the edges c and eighth weight at the corners) for inhomogeneous boundaries. c-------------------------------------------------------------------*/ double s = 0.0; int i3, i2, i1, n; double a = 0.0, tmp = 0.0; n = nx*ny*nz; #pragma omp parallel for private(i1 ,i2 ,a ) reduction(+:tmp) reduction(+:s) for (i3 = 1; i3 < n3-1; i3++) { #pragma omp parallel for firstprivate(n3 ,i1 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) for (i2 = 1; i2 < n2-1; i2++) { #pragma omp parallel for firstprivate(n3 ,i2 ,r ,n1 ,n2 ,i3 ) reduction(+:tmp) reduction(+:s) for (i1 = 1; i1 < n1-1; i1++) { s = s + r[i3][i2][i1] * r[i3][i2][i1]; a = fabs(r[i3][i2][i1]); if (a > tmp) tmp = a; } } } *rnmu = tmp; *rnm2 = sqrt(s/(double)n); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void rep_nrm(double ***u, int n1, int n2, int n3, char *title, int kk) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c report on norm c-------------------------------------------------------------------*/ double rnm2, rnmu; norm2u3(u,n1,n2,n3,&rnm2,&rnmu,nx[kk],ny[kk],nz[kk]); printf(" Level%2d in %8s: norms =%21.14e%21.14e\n", kk, title, rnm2, rnmu); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void comm3(double ***u, int n1, int n2, int n3, int kk) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c comm3 organizes the communication on all borders c-------------------------------------------------------------------*/ int i1, i2, i3; /* axis = 1 */ { #pragma omp parallel for private(i1 ,i2 ,i3 ) for ( i3 = 1; i3 < n3-1; i3++) { #pragma omp parallel for firstprivate(n3 ,i1 ,u ,n1 ,n2 ,i3 ) for ( i2 = 1; i2 < n2-1; i2++) { u[i3][i2][n1-1] = u[i3][i2][1]; u[i3][i2][0] = u[i3][i2][n1-2]; } // } /* axis = 2 */ //#pragma omp for // for ( i3 = 1; i3 < n3-1; i3++) { #pragma omp parallel for firstprivate(n3 ,i2 ,u ,n1 ,n2 ,i3 ) for ( i1 = 0; i1 < n1; i1++) { u[i3][n2-1][i1] = u[i3][1][i1]; u[i3][0][i1] = u[i3][n2-2][i1]; } } /* axis = 3 */ #pragma omp parallel for private(i1 ,i3 ) for ( i2 = 0; i2 < n2; i2++) { for ( i1 = 0; i1 < n1; i1++) { u[n3-1][i2][i1] = u[1][i2][i1]; u[0][i2][i1] = u[n3-2][i2][i1]; } } }//end #pragma omp parallel } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zran3 loads +1 at ten randomly chosen points, c loads -1 at a different ten random points, c and zero elsewhere. c-------------------------------------------------------------------*/ #define MM 10 #define A pow(5.0,13) #define X 314159265.e0 int i0, m0, m1; int i1, i2, i3, d1, e1, e2, e3; double xx, x0, x1, a1, a2, ai; double ten[MM][2], best; int i, j1[MM][2], j2[MM][2], j3[MM][2]; int jg[4][MM][2]; double rdummy; a1 = power( A, nx ); a2 = power( A, nx*ny ); zero3(z,n1,n2,n3); i = is1-1+nx*(is2-1+ny*(is3-1)); ai = power( A, i ); d1 = ie1 - is1 + 1; e1 = ie1 - is1 + 2; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = X; rdummy = randlc( &x0, ai ); for (i3 = 1; i3 < e3; i3++) { x1 = x0; for (i2 = 1; i2 < e2; i2++) { xx = x1; vranlc( d1, &xx, A, &(z[i3][i2][0])); rdummy = randlc( &x1, a1 ); } rdummy = randlc( &x0, a2 ); } /*-------------------------------------------------------------------- c call comm3(z,n1,n2,n3) c call showall(z,n1,n2,n3) c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c each processor looks for twenty candidates c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 0; i < MM; i++) { ten[i][1] = 0.0; j1[i][1] = 0; j2[i][1] = 0; j3[i][1] = 0; ten[i][0] = 1.0; j1[i][0] = 0; j2[i][0] = 0; j3[i][0] = 0; } for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 1; i1 < n1-1; i1++) { if ( z[i3][i2][i1] > ten[0][1] ) { ten[0][1] = z[i3][i2][i1]; j1[0][1] = i1; j2[0][1] = i2; j3[0][1] = i3; bubble( ten, j1, j2, j3, MM, 1 ); } if ( z[i3][i2][i1] < ten[0][0] ) { ten[0][0] = z[i3][i2][i1]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble( ten, j1, j2, j3, MM, 0 ); } } } } /*-------------------------------------------------------------------- c Now which of these are globally best? c-------------------------------------------------------------------*/ i1 = MM - 1; i0 = MM - 1; for (i = MM - 1 ; i >= 0; i--) { best = z[j3[i1][1]][j2[i1][1]][j1[i1][1]]; if (best == z[j3[i1][1]][j2[i1][1]][j1[i1][1]]) { jg[0][i][1] = 0; jg[1][i][1] = is1 - 1 + j1[i1][1]; jg[2][i][1] = is2 - 1 + j2[i1][1]; jg[3][i][1] = is3 - 1 + j3[i1][1]; i1 = i1-1; } else { jg[0][i][1] = 0; jg[1][i][1] = 0; jg[2][i][1] = 0; jg[3][i][1] = 0; } ten[i][1] = best; best = z[j3[i0][0]][j2[i0][0]][j1[i0][0]]; if (best == z[j3[i0][0]][j2[i0][0]][j1[i0][0]]) { jg[0][i][0] = 0; jg[1][i][0] = is1 - 1 + j1[i0][0]; jg[2][i][0] = is2 - 1 + j2[i0][0]; jg[3][i][0] = is3 - 1 + j3[i0][0]; i0 = i0-1; } else { jg[0][i][0] = 0; jg[1][i][0] = 0; jg[2][i][0] = 0; jg[3][i][0] = 0; } ten[i][0] = best; } m1 = i1+1; m0 = i0+1; /* printf(" negative charges at"); for (i = 0; i < MM; i++) { if (i%5 == 0) printf("\n"); printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]); } printf("\n positive charges at"); for (i = 0; i < MM; i++) { if (i%5 == 0) printf("\n"); printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]); } printf("\n small random numbers were\n"); for (i = MM-1; i >= 0; i--) { printf(" %15.8e", ten[i][0]); } printf("\n and they were found on processor number\n"); for (i = MM-1; i >= 0; i--) { printf(" %4d", jg[0][i][0]); } printf("\n large random numbers were\n"); for (i = MM-1; i >= 0; i--) { printf(" %15.8e", ten[i][1]); } printf("\n and they were found on processor number\n"); for (i = MM-1; i >= 0; i--) { printf(" %4d", jg[0][i][1]); } printf("\n");*/ #pragma omp parallel for private(i2 ,i1 ) for (i3 = 0; i3 < n3; i3++) { #pragma omp parallel for firstprivate(n3 ,i1 ,z ,n1 ,n2 ,i3 ) for (i2 = 0; i2 < n2; i2++) { #pragma omp parallel for firstprivate(n3 ,i2 ,z ,n1 ,n2 ,i3 ) for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } for (i = MM-1; i >= m0; i--) { z[j3[i][0]][j2[i][0]][j1[i][0]] = -1.0; } for (i = MM-1; i >= m1; i--) { z[j3[i][1]][j2[i][1]][j1[i][1]] = 1.0; } comm3(z,n1,n2,n3,k); /*-------------------------------------------------------------------- c call showall(z,n1,n2,n3) c-------------------------------------------------------------------*/ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void showall(double ***z, int n1, int n2, int n3) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i1,i2,i3; int m1, m2, m3; m1 = min(n1,18); m2 = min(n2,14); m3 = min(n3,18); printf("\n"); for (i3 = 0; i3 < m3; i3++) { for (i1 = 0; i1 < m1; i1++) { for (i2 = 0; i2 < m2; i2++) { printf("%6.3f", z[i3][i2][i1]); } printf("\n"); } printf(" - - - - - - - \n"); } printf("\n"); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static double power( double a, int n ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c power raises an integer, disguised as a double c precision real, to an integer power c-------------------------------------------------------------------*/ double aj; int nj; double rdummy; double power; power = 1.0; nj = n; aj = a; while (nj != 0) { if( (nj%2) == 1 ) rdummy = randlc( &power, aj ); rdummy = randlc( &aj, aj ); nj = nj/2; } return (power); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void bubble( double ten[M][2], int j1[M][2], int j2[M][2], int j3[M][2], int m, int ind ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c bubble does a bubble sort in direction dir c-------------------------------------------------------------------*/ double temp; int i, j_temp; if ( ind == 1 ) { for (i = 0; i < m-1; i++) { if ( ten[i][ind] > ten[i+1][ind] ) { temp = ten[i+1][ind]; ten[i+1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i+1][ind]; j1[i+1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i+1][ind]; j2[i+1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i+1][ind]; j3[i+1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } else { for (i = 0; i < m-1; i++) { if ( ten[i][ind] < ten[i+1][ind] ) { temp = ten[i+1][ind]; ten[i+1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i+1][ind]; j1[i+1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i+1][ind]; j2[i+1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i+1][ind]; j3[i+1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void zero3(double ***z, int n1, int n2, int n3) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i1, i2, i3; #pragma omp parallel for private(i1 ,i2 ,i3 ) for (i3 = 0;i3 < n3; i3++) { #pragma omp parallel for firstprivate(i3 ) for (i2 = 0; i2 < n2; i2++) { #pragma omp parallel for firstprivate(i2 ,i3 ) for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } } /*---- end of program ------------------------------------------------*/
conv_dw_kernel_int8_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: [email protected] */ #include "conv_dw_kernel_int8_arm.h" #include <stdint.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include "utility/sys_port.h" #ifdef __aarch64__ void depthwise_k3s1p1_int8_a72(int8_t* input, int8_t* kernel, int8_t* out, int* bias, long out_h, long out_w, long multi, long shift, long input_w, long act_min, long act_max); void depthwise_k3s2p1_int8_a72(int8_t* input, int8_t* kernel, int8_t* out, int* bias, long out_h, long out_w, long multi, long shift, long input_w, long act_min, long act_max); #else void depthwise_k3s1_int8(int8_t* input, int8_t* kernel, int8_t* out, int* bias, int out_h, int out_w, int multi, int shift, int input_w, int act_min, int act_max); void depthwise_k3s2_int8(int8_t* input, int8_t* kernel, int8_t* out, int* bias, int out_h, int out_w, int multi, int shift, int input_w, int act_min, int act_max); #endif int conv_dw_int8_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; priv_info->multi = (int*)sys_malloc(out_c * sizeof(int)); priv_info->q_shift = (int*)sys_malloc(out_c * sizeof(int)); float input_scale = input_tensor->scale; float* kernel_scales = filter_tensor->scale_list; float output_scale = output_tensor->scale; priv_info->activation_min = -127; priv_info->activation_max = 127; /* set activation */ if (param->activation >= 0) { priv_info->activation_min = 0; if (param->activation == 1) priv_info->activation_max = round(1.0 / output_scale); if (param->activation == 6) priv_info->activation_max = round(6.0 / output_scale); if (priv_info->activation_max > 127) priv_info->activation_max = 127; } for (int i = 0; i < out_c; i++) { float kernel_scale = kernel_scales[i]; float scale = input_scale * kernel_scale / output_scale; int shift; float q = frexp(scale, &shift); int fix_q = round(q * (1ll << 31)); // TLOG_ERR("prerun: %f,%lld,%d,%d, %lld\n",q, fix_q, multi, q_shift, 1ll<<31); if (fix_q == (1l << 31)) { fix_q /= 2; shift++; } priv_info->multi[i] = (int)fix_q; priv_info->q_shift[i] = (int)shift; } return 0; } int conv_dw_int8_postrun(struct conv_priv_info* priv_info) { if (priv_info->multi) { sys_free(priv_info->multi); priv_info->multi = NULL; } if (priv_info->q_shift) { sys_free(priv_info->q_shift); priv_info->q_shift = NULL; } return 0; } void conv_dw_int8_direct(int8_t* input_buf, int8_t* weight_buf, int8_t* output_buf, int* bias, int input_h, int input_w, int output_h, int output_w, int channel_num, int stride, int* pads, int* p_multi, int* p_shift, int activation_min, int activation_max, int num_thread, int cpu_affinity) { int channel_size = input_h * input_w; #ifndef __aarch64__ int8_t* input_pad = NULL; int input_h_pad = input_h + pads[0] + pads[2]; int input_w_pad = input_w + pads[1] + pads[3]; int is_pad0 = (pads[0] == 0 && pads[1] == 0 && pads[2] == 0 && pads[3] == 0); if (!is_pad0) { input_pad = (int8_t*)malloc(sizeof(int8_t) * channel_num * input_h_pad * input_w_pad + 128); memset(input_pad, 0, sizeof(int8_t) * channel_num * input_h_pad * input_w_pad + 128); } #endif #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < channel_num; i++) { int8_t* input_tmp = NULL; int* bias_tmp = bias ? (bias + i) : NULL; #ifndef __aarch64__ if (!is_pad0) { int8_t* tmp = input_pad + i * input_h_pad * input_w_pad; input_tmp = tmp; tmp += pads[0] * input_w_pad + pads[1]; for (int j = 0; j < input_h; j++) { memcpy(tmp, input_buf + i * channel_size + j * input_w, input_w); tmp += input_w_pad; } } else #endif { input_tmp = input_buf + i * channel_size; } if (1 == stride) { #ifdef __aarch64__ depthwise_k3s1p1_int8_a72(input_tmp, weight_buf + 9 * i, output_buf + i * output_h * output_w, bias_tmp, output_h, output_w, p_multi[i], p_shift[i], input_w, activation_min, activation_max); #else depthwise_k3s1_int8(input_tmp, weight_buf + 9 * i, output_buf + i * output_h * output_w, bias_tmp, output_h, output_w, p_multi[i], p_shift[i], input_w_pad, activation_min, activation_max); #endif } else if (2 == stride) { #ifdef __aarch64__ depthwise_k3s2p1_int8_a72(input_tmp, weight_buf + 9 * i, output_buf + i * output_h * output_w, bias_tmp, output_h, output_w, p_multi[i], p_shift[i], input_w, activation_min, activation_max); #else depthwise_k3s2_int8(input_tmp, weight_buf + 9 * i, output_buf + i * output_h * output_w, bias_tmp, output_h, output_w, p_multi[i], p_shift[i], input_w_pad, activation_min, activation_max); #endif } } #ifndef __aarch64__ if (!is_pad0) { free(input_pad); input_pad = NULL; } #endif } int conv_dw_int8_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int pads[4] = {0}; int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int act_type = param->activation; pads[0] = param->pad_h0; pads[1] = param->pad_w0; pads[2] = param->pad_h1; pads[3] = param->pad_w1; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int input_image_size = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3]; int out_c = output_tensor->dims[1] / group; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); int output_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3]; int activation_min = priv_info->activation_min; int activation_max = priv_info->activation_max; /* buffer addr */ int8_t* input_buf = (int8_t*)input_tensor->data; int8_t* kernel_buf = (int8_t*)filter_tensor->data; int8_t* output_buf = (int8_t*)output_tensor->data; int32_t* biases_buf = NULL; if (bias_tensor != NULL) { biases_buf = (int32_t*)bias_tensor->data; } int* multi = priv_info->multi; int* q_shift = priv_info->q_shift; for (int n = 0; n < batch; n++) // batch size { int8_t* input = input_buf + n * input_size * group; int8_t* kernel = kernel_buf + n * kernel_size * group; int8_t* output = output_buf + n * output_size * group; conv_dw_int8_direct(input, kernel, output, biases_buf, in_h, in_w, out_h, out_w, in_c * group, stride_h, pads, multi, q_shift, activation_min, activation_max, num_thread, cpu_affinity); } return 0; }
hst_compiler_explorer.c
// TODO Mark regions #include <assert.h> #include <getopt.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <omp.h> #include "../../support/common.h" #include "../../support/timer.h" // Pointer declaration static T *A; static unsigned int *histo_host; typedef struct Params { unsigned int input_size; unsigned int bins; int n_warmup; int n_reps; const char *file_name; int exp; int n_threads; } Params; /** * @brief creates input arrays * @param nr_elements how many elements in input arrays */ static void read_input(T *A, const Params p) { char dctFileName[100]; FILE *File = NULL; // Open input file unsigned short temp; sprintf(dctFileName, p.file_name); if ((File = fopen(dctFileName, "rb")) != NULL) { for (unsigned int y = 0; y < p.input_size; y++) { fread(&temp, sizeof(unsigned short), 1, File); A[y] = (unsigned int)ByteSwap16(temp); if (A[y] >= 4096) A[y] = 4095; } fclose(File); } else { printf("%s does not exist\n", dctFileName); exit(1); } } /** * @brief compute output in the host */ static void histogram_host(unsigned int *histo, T *A, unsigned int bins, unsigned int nr_elements, int exp, unsigned int nr_of_dpus, int t) { omp_set_num_threads(t); if (!exp) { #pragma omp parallel for for (unsigned int i = 0; i < nr_of_dpus; i++) { for (unsigned int j = 0; j < nr_elements; j++) { T d = A[j]; histo[i * bins + ((d * bins) >> DEPTH)] += 1; } } } else { #pragma omp parallel for for (unsigned int j = 0; j < nr_elements; j++) { T d = A[j]; #pragma omp atomic update histo[(d * bins) >> DEPTH] += 1; } } } // Params --------------------------------------------------------------------- void usage() { fprintf(stderr, "\nUsage: ./program [options]" "\n" "\nGeneral options:" "\n -h help" "\n -w <W> # of untimed warmup iterations (default=1)" "\n -e <E> # of timed repetition iterations (default=3)" "\n -t <T> # of threads (default=8)" "\n -x <X> Weak (0) or strong (1) scaling (default=0)" "\n" "\nBenchmark-specific options:" "\n -i <I> input size (default=1536*1024 elements)" "\n -b <B> histogram size (default=256 bins)" "\n -f <F> input image file (default=../input/image_VanHateren.iml)" "\n"); } struct Params input_params(int argc, char **argv) { struct Params p; p.input_size = 1536 * 1024; p.bins = 256; p.n_warmup = 1; p.n_reps = 3; p.n_threads = 8; p.exp = 1; p.file_name = "../../input/image_VanHateren.iml"; int opt; while ((opt = getopt(argc, argv, "hi:b:w:e:f:x:t:")) >= 0) { switch (opt) { case 'h': usage(); exit(0); break; case 'i': p.input_size = atoi(optarg); break; case 'b': p.bins = atoi(optarg); break; case 'w': p.n_warmup = atoi(optarg); break; case 'e': p.n_reps = atoi(optarg); break; case 'f': p.file_name = optarg; break; case 'x': p.exp = atoi(optarg); break; case 't': p.n_threads = atoi(optarg); break; default: fprintf(stderr, "\nUnrecognized option!\n"); usage(); exit(0); } } assert(p.n_threads > 0 && "Invalid # of ranks!"); return p; } /** * @brief Main of the Host Application. */ int main() { struct Params p = input_params(argc, argv); uint32_t nr_of_dpus; const unsigned int input_size = p.input_size; // Size of input image if (!p.exp) assert(input_size % p.n_threads == 0 && "Input size!"); else assert(input_size % p.n_threads == 0 && "Input size!"); // Input/output allocation A = malloc(input_size * sizeof(T)); T *bufferA = A; if (!p.exp) histo_host = malloc(nr_of_dpus * p.bins * sizeof(unsigned int)); else histo_host = malloc(p.bins * sizeof(unsigned int)); // Create an input file with arbitrary data. read_input(A, p); Timer timer; start(&timer, 0, 0); if (!p.exp) memset(histo_host, 0, nr_of_dpus * p.bins * sizeof(unsigned int)); else memset(histo_host, 0, p.bins * sizeof(unsigned int)); histogram_host(histo_host, A, p.bins, input_size, p.exp, nr_of_dpus, p.n_threads); stop(&timer, 0); printf("Kernel "); print(&timer, 0, 1); printf("\n"); return 0; }
GB_unop__carg_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__carg_fp32_fc32) // op(A') function: GB (_unop_tran__carg_fp32_fc32) // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cargf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cargf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cargf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CARG || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__carg_fp32_fc32) ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cargf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cargf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__carg_fp32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_bool_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_int16 // op(A') function: GB_tran__lnot_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_int16 ( bool *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_sgemm_pack4to16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4to16_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + size % 16, 16u, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 4, opt.workspace_allocator); { int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; float* tmpptr = tmp.channel(i / 16); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x16 __m128 _r0 = _mm_load_ps(img0); __m128 _r1 = _mm_load_ps(img0 + 4); __m128 _r2 = _mm_load_ps(img0 + 4 * 2); __m128 _r3 = _mm_load_ps(img0 + 4 * 3); __m128 _r4 = _mm_load_ps(img0 + 4 * 4); __m128 _r5 = _mm_load_ps(img0 + 4 * 5); __m128 _r6 = _mm_load_ps(img0 + 4 * 6); __m128 _r7 = _mm_load_ps(img0 + 4 * 7); __m128 _r8 = _mm_load_ps(img0 + 4 * 8); __m128 _r9 = _mm_load_ps(img0 + 4 * 9); __m128 _ra = _mm_load_ps(img0 + 4 * 10); __m128 _rb = _mm_load_ps(img0 + 4 * 11); __m128 _rc = _mm_load_ps(img0 + 4 * 12); __m128 _rd = _mm_load_ps(img0 + 4 * 13); __m128 _re = _mm_load_ps(img0 + 4 * 14); __m128 _rf = _mm_load_ps(img0 + 4 * 15); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _MM_TRANSPOSE4_PS(_rc, _rd, _re, _rf); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _rc); _mm_store_ps(tmpptr + 4 * 4, _r1); _mm_store_ps(tmpptr + 4 * 5, _r5); _mm_store_ps(tmpptr + 4 * 6, _r9); _mm_store_ps(tmpptr + 4 * 7, _rd); _mm_store_ps(tmpptr + 4 * 8, _r2); _mm_store_ps(tmpptr + 4 * 9, _r6); _mm_store_ps(tmpptr + 4 * 10, _ra); _mm_store_ps(tmpptr + 4 * 11, _re); _mm_store_ps(tmpptr + 4 * 12, _r3); _mm_store_ps(tmpptr + 4 * 13, _r7); _mm_store_ps(tmpptr + 4 * 14, _rb); _mm_store_ps(tmpptr + 4 * 15, _rf); img0 += size * 4; tmpptr += 64; } } } remain_size_start += nn_size << 4; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { float* tmpptr = tmp.channel(i / 16 + i % 16); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { __m128 _val = _mm_load_ps(img0); _mm_store_ps(tmpptr, _val); img0 += size * 4; tmpptr += 4; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float zeros[16] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 16 : zeros; int i = 0; for (; i + 15 < size; i += 16) { float* tmpptr = tmp.channel(i / 16); const float* kptr = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 __m512 _sum0 = _mm512_loadu_ps(biasptr); __m512 _sum1 = _sum0; __m512 _sum2 = _sum0; __m512 _sum3 = _sum0; __m512 _sum4 = _sum0; __m512 _sum5 = _sum0; __m512 _sum6 = _sum0; __m512 _sum7 = _sum0; __m512 _sum8 = _sum0; __m512 _sum9 = _sum0; __m512 _suma = _sum0; __m512 _sumb = _sum0; __m512 _sumc = _sum0; __m512 _sumd = _sum0; __m512 _sume = _sum0; __m512 _sumf = _sum0; for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(kptr); __m512 _val0 = _mm512_set1_ps(tmpptr[0]); __m512 _val1 = _mm512_set1_ps(tmpptr[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); __m512 _val2 = _mm512_set1_ps(tmpptr[2]); __m512 _val3 = _mm512_set1_ps(tmpptr[3]); _sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3); __m512 _val4 = _mm512_set1_ps(tmpptr[4]); __m512 _val5 = _mm512_set1_ps(tmpptr[5]); _sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5); __m512 _val6 = _mm512_set1_ps(tmpptr[6]); __m512 _val7 = _mm512_set1_ps(tmpptr[7]); _sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7); __m512 _val8 = _mm512_set1_ps(tmpptr[8]); __m512 _val9 = _mm512_set1_ps(tmpptr[9]); _sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9); __m512 _vala = _mm512_set1_ps(tmpptr[10]); __m512 _valb = _mm512_set1_ps(tmpptr[11]); _suma = _mm512_fmadd_ps(_vala, _w0, _suma); _sumb = _mm512_fmadd_ps(_valb, _w0, _sumb); __m512 _valc = _mm512_set1_ps(tmpptr[12]); __m512 _vald = _mm512_set1_ps(tmpptr[13]); _sumc = _mm512_fmadd_ps(_valc, _w0, _sumc); _sumd = _mm512_fmadd_ps(_vald, _w0, _sumd); __m512 _vale = _mm512_set1_ps(tmpptr[14]); __m512 _valf = _mm512_set1_ps(tmpptr[15]); _sume = _mm512_fmadd_ps(_vale, _w0, _sume); _sumf = _mm512_fmadd_ps(_valf, _w0, _sumf); kptr += 16; tmpptr += 16; } _mm512_store_ps(outptr0, _sum0); _mm512_store_ps(outptr0 + 16, _sum1); _mm512_store_ps(outptr0 + 16 * 2, _sum2); _mm512_store_ps(outptr0 + 16 * 3, _sum3); _mm512_store_ps(outptr0 + 16 * 4, _sum4); _mm512_store_ps(outptr0 + 16 * 5, _sum5); _mm512_store_ps(outptr0 + 16 * 6, _sum6); _mm512_store_ps(outptr0 + 16 * 7, _sum7); _mm512_store_ps(outptr0 + 16 * 8, _sum8); _mm512_store_ps(outptr0 + 16 * 9, _sum9); _mm512_store_ps(outptr0 + 16 * 10, _suma); _mm512_store_ps(outptr0 + 16 * 11, _sumb); _mm512_store_ps(outptr0 + 16 * 12, _sumc); _mm512_store_ps(outptr0 + 16 * 13, _sumd); _mm512_store_ps(outptr0 + 16 * 14, _sume); _mm512_store_ps(outptr0 + 16 * 15, _sumf); outptr0 += 16 * 16; } for (; i < size; i++) { float* tmpptr = tmp.channel(i / 16 + i % 16); const float* kptr = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 __m512 _sum0 = _mm512_loadu_ps(biasptr); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(kptr); __m512 _val0 = _mm512_set1_ps(tmpptr[0]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); kptr += 16; tmpptr += 1; } _mm512_store_ps(outptr0, _sum0); outptr0 += 16; } } } static void convolution_im2col_sgemm_transform_kernel_pack4to16_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 16b-4a-maxk-inch/4a-outch/16b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(16 * 4 * maxk, inch / 4, outch / 16, (size_t)4u); for (int q = 0; q + 15 < outch; q += 16) { float* g00 = kernel_tm.channel(q / 16); for (int p = 0; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 16; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void convolution_im2col_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row(dilation_h * u) + dilation_w * v * 4; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { __m128 _val = _mm_load_ps(sptr); _mm_store_ps(ptr, _val); sptr += stride_w * 4; ptr += 4; } sptr += gap; } } } } } im2col_sgemm_pack4to16_avx512(bottom_im2col, top_blob, kernel, _bias, opt); }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }