source
stringlengths 3
92
| c
stringlengths 26
2.25M
|
---|---|
parallel_unit.c
|
#include <stdio.h>
#include <errno.h> // for errno
#include <math.h>
#include <limits.h> // for INT_MAX
#include <stdlib.h> // for strtol
#include <time.h>
#include <omp.h>
long number_of_threads = 2;
long max_number_of_char = 10;
long number_of_types_char = 10;
long number_of_results = 5;
long number_of_queues = 2;
typedef struct Client
{
long* initial_characteristics;
long** derived_characteristics;
long number_of_init_chars;
long identifier;
long result;
} Client;
long rand_between(long l, long r, unsigned int seed) {
int value;
#pragma omp critical (rand)
{
srand(seed);
value = rand();
}
return (long) (l + (value % (r - l + 1)));
}
Client* createClient(long id, unsigned int seed) {
Client* client = malloc(sizeof(Client));
client->number_of_init_chars = rand_between(0, max_number_of_char, seed);
seed += client->number_of_init_chars;
if(client->number_of_init_chars > 0) {
client->initial_characteristics = malloc(client->number_of_init_chars*sizeof(long));
//printf("id: %ld - init size: %ld\n", id, client->number_of_init_chars);
for (long i = 0; i < client->number_of_init_chars; ++i) {
client->initial_characteristics[i] = rand_between(1, number_of_types_char, seed);
seed += client->initial_characteristics[i];
}
if(number_of_queues - 1 > 0){
client->derived_characteristics = malloc((number_of_queues-1)*sizeof(long*));
for (long i = 0; i < number_of_queues-1; ++i) {
client->derived_characteristics[i] = malloc((client->number_of_init_chars+i+1)*sizeof(long));
//printf("id: %ld - level: %ld - size: %ld\n", id, i, client->number_of_init_chars+i+1);
}
}
} else {
client->initial_characteristics = NULL;
client->derived_characteristics = NULL;
}
client->identifier = id;
client->result = -1;
return client;
}
void destroyClient(Client* client){
if(client->number_of_init_chars > 0) {
free(client->initial_characteristics);
if(number_of_queues-1 > 0){
for (long i = 0; i < number_of_queues-1; ++i) {
free(client->derived_characteristics[i]);
}
free(client->derived_characteristics);
}
}
free(client);
}
void printClient(Client* client, short int detail_level) {
#pragma omp critical (print)
{
if(detail_level > 1) {
printf("\nId: %ld\n", client->identifier);
printf("Result: %ld\n", client->result);
if(detail_level > 1) {
printf("Nº of characteristics: %ld\n", client->number_of_init_chars);
if(client->number_of_init_chars > 0) {
printf("Characteristics: %ld", client->initial_characteristics[0]);
for (long i = 1; i < client->number_of_init_chars; ++i) {
printf(", %ld", client->initial_characteristics[i]);
}
}
for (long i = 0; i < number_of_queues - 1; ++i) {
if(detail_level > i+2) {
if(client->number_of_init_chars > 0) {
printf("\nCharacteristics on queue %ld: %ld", i+1, client->derived_characteristics[i][0]);
for (long j = 1; j < client->number_of_init_chars+i+1; ++j) {
printf(", %ld", client->derived_characteristics[i][j]);
}
}
}
}
}
printf("\n");
}
}
}
void printClientCSV(Client* client) {
printf("\n%ld, %ld, %ld\n", client->identifier, client->result, client->number_of_init_chars);
}
void printCSV(long* ids, long* results, long* n_of_chars, long number_of_clients) {
printf("ID,Result,Nº Initial Characteristics\n");
for (long i = 0; i < number_of_clients; ++i) {
printf("%ld, %ld, %ld\n", ids[i], results[i], n_of_chars[i]);
}
}
long initialCharProcess(Client* client) {
long value = 0;
for (long i = 0; i < client->number_of_init_chars; ++i) {
value += client->initial_characteristics[i];
}
return (value / (client->number_of_init_chars + 1)) + (value % (client->number_of_init_chars + 1));
}
long levelCharProcess(Client* client, long* values, long level) {
//printf("client: %ld - chars: %ld\n", client->identifier, client->number_of_init_chars);
long* origin = NULL;
if (level == 0) {
origin = client->initial_characteristics;
} else {
origin = client->derived_characteristics[level-1];
}
long value = 0;
//printf("client: %ld - level: %ld - level size: %ld\n", client->identifier, level, client->number_of_init_chars+level);
for (long i = 0; i < client->number_of_init_chars+level-1; ++i) {
client->derived_characteristics[level][i] = 0;
for (long j = 0; j < client->number_of_init_chars+level; ++j) {
client->derived_characteristics[level][i] += abs(origin[i] - origin[j]);
}
//printf("client: %ld - %ld: %ld\n", client->identifier,i, client->derived_characteristics[level][i]);
value += client->derived_characteristics[level][i];
}
client->derived_characteristics[level][client->number_of_init_chars+level-1] = abs(origin[client->number_of_init_chars+level-1] - origin[0]);
//printf("client: %ld - %ld: %ld\n", client->identifier,client->number_of_init_chars+level-1, client->derived_characteristics[level][client->number_of_init_chars+level-1]);
client->derived_characteristics[level][client->number_of_init_chars+level] = values[level];
//printf("client: %ld - %ld: %ld\n", client->identifier,client->number_of_init_chars+level, client->derived_characteristics[level][client->number_of_init_chars+level]);
value += client->derived_characteristics[level][client->number_of_init_chars+level-1];
value += client->derived_characteristics[level][client->number_of_init_chars+level];
return (value * client->number_of_init_chars) % number_of_types_char;
}
long categoryFromValue(long* values) {
long result = 0;
if(values[0] == 0){
return 0;
}
for (long i = 0; i < number_of_queues; ++i) {
result += values[i] * (i+1);
}
result = result / number_of_queues;
return 1 + ( result % number_of_results);
}
void categorizeClient(Client* client) {
if(client->number_of_init_chars == 0) {
client->result = 0;
return;
}
long* values = calloc(number_of_queues, sizeof(long));
values[0] = initialCharProcess(client);
//printf("initial value: %ld\n", values[0]);
for (long i = 0; i < number_of_queues-1; ++i) {
values[i+1] += levelCharProcess(client, values, i);
//printf("value %ld: %ld\n", i, values[i]);
}
client->result = categoryFromValue(values);
free(values);
}
long convert_str_long(char *str){
char *p;
errno = 0;
long conv = strtol(str, &p, 10);
if (errno != 0 || *p != '\0')
{
printf("%s não é um número!\n", str);
exit(-1);
}
return (long)conv;
}
int main(int argc, char **argv){
if (argc != 9) {
printf("É necessário informar os seguintes argumentos:\n");
printf("Quantidade de threads a serem usadas\n");
printf("Seed usada para gerar os dados\n");
printf("Número de clientes a serem criados\n");
printf("Quantidade máxima de caracteristicas por cliente\n");
printf("Quantidade de tipos de caracteristicas\n");
printf("Quantidade de resultados diferentes do 0\n");
printf("Número de etapas a serem utilizadas para processar o resultado\n");
printf("Nível de detalhe da exibição dos resultados:\n");
printf(" - Caso seja 0: Imprime apenas o tempo gasto\n");
printf(" - Caso seja 1: Imprime o Id, o Resultado e o Nª de categorias de cada cliente em um .csv\n");
printf(" - Caso seja n: Imprime os dados de cada cliente detalhando as caracteristicas de até n-1 etapas e ao final os dados de detalhe 1\n");
return -1;
}
number_of_threads = convert_str_long(argv[1]);
unsigned int seed = convert_str_long(argv[2]);
long number_of_clients = convert_str_long(argv[3]);
max_number_of_char = convert_str_long(argv[4]);
number_of_types_char = convert_str_long(argv[5]);
number_of_results = convert_str_long(argv[6]);
number_of_queues = convert_str_long(argv[7]);
long detail_level = convert_str_long(argv[8]);
long* ids = malloc(number_of_clients*sizeof(long));
long* results = malloc(number_of_clients*sizeof(long));
long* n_of_chars = malloc(number_of_clients*sizeof(long));
Client** clients = malloc(number_of_clients*sizeof(Client*));
for (long i = 0; i < number_of_clients; ++i) {
clients[i] = createClient(i, seed+i);
}
double t = omp_get_wtime();
#pragma omp parallel num_threads(number_of_threads) default(none) \
shared(ids, results, n_of_chars, number_of_clients, clients, detail_level)
{
#pragma omp for schedule(guided)
for (long i = 0; i < number_of_clients; ++i) {
categorizeClient(clients[i]);
ids[i] = clients[i] ->identifier;
results[i] = clients[i] ->result;
n_of_chars[i] = clients[i] ->number_of_init_chars;
printClient(clients[i] , detail_level);
}
}
t = omp_get_wtime() - t;
for (long i = 0; i < number_of_clients; ++i) {
destroyClient(clients[i]);
}
if(detail_level > 0) {
printCSV(ids, results, n_of_chars, number_of_clients);
} else {
printf("%.10lf\n", t);
}
free(clients);
free(ids);
free(results);
free(n_of_chars);
return 0;
} /* main */
|
GB_unop__identity_int64_int8.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_int8
// op(A') function: GB_unop_tran__identity_int64_int8
// C type: int64_t
// A type: int8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_int8
(
int64_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__iseq_fc64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc64)
// C=scalar+B GB (_bind1st__iseq_fc64)
// C=scalar+B' GB (_bind1st_tran__iseq_fc64)
// C=A+scalar GB (_bind2nd__iseq_fc64)
// C=A'+scalar GB (_bind2nd_tran__iseq_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_iseq (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC64_iseq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FC64 || GxB_NO_ISEQ_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__iseq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = Bx [p] ;
Cx [p] = GB_FC64_iseq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = Ax [p] ;
Cx [p] = GB_FC64_iseq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_iseq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_iseq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fox_floats_timer_caching_omp_fileIO_benchmark.c
|
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices
*
* Implementation of parallel matrix multiplication:
* LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$
*
* Input:
* Input Matrix file name: A.dat, B.dat
*
* Output:
* Output Matrix file name: C.dat
* Output Sub-matrices file name: SubMatrices.dat
*
* Notes:
* 1. Assumes the number of processes is a perfect square
* 2. The array member of the matrices is statically allocated
*
* See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI
*/
/* Compiler command:
* mpiicc -O3 -xCORE-AVX2 -qopenmp -qopt-report-phase=vec -qopt-report=3 -g -debug all -trace fox_floats_timer_caching_omp_fileIO_benchmark.c
* -o fox_floats_timer_caching_omp_fileIO_benchmark
*
* export VT_PCTRACE=on
*
* Run command:
* mpirun -n -4 -trace ./fox_floats_timer_caching_omp
*/
/* Head files */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
// define problem scale, matrix row/col size
#define PROBLEM_SCALE 2048
// define whether or not Print Matices in the Command Line
#define PRINT_A 0
#define PRINT_B 0
#define PRINT_C 0
#define PRINT_LOCAL_A 0
#define PRINT_LOCAL_B 0
#define PRINT_LOCAL_C 0
// define float precision, 4 byte single-precision float or 8 byte double-precision float
#define FLOAT double
#define FLOAT_MPI MPI_DOUBLE
// Define threads speed-up affnity in the computing
#define NUM_THREADS 2
// Define threads affinity "scatter" or "compact"
#define AFFINITY "KMP_AFFINITY = compact"
/* Type define structure of process grid */
typedef struct {
int p; /* Total number of processes */
MPI_Comm comm; /* Communicator for entire grid */
MPI_Comm row_comm; /* Communicator for my row */
MPI_Comm col_comm; /* Communicator for my col */
int q; /* Order of grid */
int my_row; /* My row number */
int my_col; /* My column number */
int my_rank; /* My rank in the grid comm */
} GRID_INFO_T;
/* Type define structure of local matrix */
#define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21)
typedef struct {
int n_bar;
#define Order(A) ((A)->n_bar) // defination with parameters
FLOAT entries[MAX];
#define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference
} LOCAL_MATRIX_T;
/* Function Declarations */
LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar);
void Free_local_matrix(LOCAL_MATRIX_T** local_A);
void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Read matrix A from a file
void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k)
GRID_INFO_T* grid, int n); // Read matrix B from a file
void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Print matrix A in the command line
void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid, int n); // Print matrix B in the command line
void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Print matrix C in the command line
void Set_to_zero(LOCAL_MATRIX_T* local_A);
void Local_matrix_multiply(LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
void Build_matrix_type(LOCAL_MATRIX_T* local_A);
MPI_Datatype local_matrix_mpi_t;
LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer
void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid);
void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid);
void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B,
GRID_INFO_T* grid);
void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Write matrix multiplication to a file
void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix A to a file
void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid); // Write local matrix B to a file
void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix C to a file
/*********************************************************/
main(int argc, char* argv[]) {
FILE *fp;
int p;
int my_rank;
GRID_INFO_T grid;
LOCAL_MATRIX_T* local_A;
LOCAL_MATRIX_T* local_B;
LOCAL_MATRIX_T* local_C;
int n;
int n_bar;
double timer_start;
double timer_end;
int content;
int i;
int j;
void Setup_grid(GRID_INFO_T* grid);
void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
// Matrix Generator
fp = fopen("A.dat", "w"); // Generate and print matrix A into a file
for (i = 0; i < PROBLEM_SCALE; i++) {
for (j = 0; j < PROBLEM_SCALE; j++)
if(i == j){
fprintf(fp,"%d ", 1);
}
else {
fprintf(fp,"%d ", 0);
}
fprintf(fp,"\n");
}
fclose(fp);
fp = fopen("B.dat", "w"); // Generate and print matrix B into a file
for (i = 0; i < PROBLEM_SCALE; i++){
for (j = 0; j < PROBLEM_SCALE; j++)
fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j);
fprintf(fp, "\n");
}
fclose(fp);
// SPMD Mode start from here (Processess fork from here)
MPI_Init(&argc, &argv); // MPI initializing
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
// Initial OpenMP Environment
omp_set_num_threads(NUM_THREADS);
kmp_set_defaults(AFFINITY);
Setup_grid(&grid); // Set up Processess grid
if (my_rank == 0) {
fp = fopen("A.dat","r");
n = 0;
while((content = fgetc(fp)) != EOF)
{
//printf("fgetc = %d\n", content);
if(content != 0x20 && content != 0x0A) n++;
}
fclose(fp);
n = (int) sqrt((double) n);
printf("We read the order of the matrices from A.dat is\n %d\n", n);
// while(fgetc(fp) != EOF) n++;
// printf("What's the order of the matrices?\n");
// scanf("%d", &n); // Overall Matrix's Order
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order
n_bar = n/grid.q; // \bar n is the local matrix's order
local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A
Order(local_A) = n_bar; // Local matrix A's order
Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_A == 1)
Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure)
local_B = Local_matrix_allocate(n_bar); // Allocate local matrix
Order(local_B) = n_bar; // Local matrix B's order
Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_B == 1)
Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure)
Build_matrix_type(local_A); // Buid local_A's MPI matrix data type
temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n
local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C
Order(local_C) = n_bar; // Set matrix local_C's order
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
timer_start = MPI_Wtime(); // Get the MPI wall time
Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function
timer_end = MPI_Wtime(); // Get the MPI wall time
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
if (PRINT_C == 1)
Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
Write_local_matrices_A("Write split of local matrix A into local_A.dat",
local_A, &grid); // Write local matrix A into file
if (PRINT_LOCAL_A == 1)
Print_local_matrices_A("Split of local matrix A",
local_A, &grid); // Print matrix A split in processess
Write_local_matrices_B("Write split of local matrix B into local_B.dat",
local_B, &grid); // Write local matrix B into file, special for row-major storage
if (PRINT_LOCAL_B == 1)
Print_local_matrices_B("Split of local matrix B",
local_B, &grid); // Print matrix B split in processess, special for row-major storage
Write_local_matrices_C("Write split of local matrix C into local_C.dat",
local_C, &grid); // Print matrix C split in processess
if (PRINT_LOCAL_C == 1)
Print_local_matrices_C("Split of local matrix C",
local_C, &grid); // Print matrix C split in processess
Free_local_matrix(&local_A); // Free local matrix local_A
Free_local_matrix(&local_B); // Free local matrix local_B
Free_local_matrix(&local_C); // Free local matrix local_C
if(my_rank == 0)
printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start);
MPI_Finalize(); // MPI finalize, processes join and resource recycle
} /* main */
/*********************************************************/
void Setup_grid(
GRID_INFO_T* grid /* out */) {
int old_rank;
int dimensions[2];
int wrap_around[2];
int coordinates[2];
int free_coords[2];
/* Set up Global Grid Information */
MPI_Comm_size(MPI_COMM_WORLD, &(grid->p));
MPI_Comm_rank(MPI_COMM_WORLD, &old_rank);
/* We assume p is a perfect square */ // but what if it's not a perfect square
grid->q = (int) sqrt((double) grid->p);
dimensions[0] = dimensions[1] = grid->q;
/* We want a circular shift in second dimension. */
/* Don't care about first */
wrap_around[0] = wrap_around[1] = 1;
MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions,
wrap_around, 1, &(grid->comm));
MPI_Comm_rank(grid->comm, &(grid->my_rank));
MPI_Cart_coords(grid->comm, grid->my_rank, 2,
coordinates);
grid->my_row = coordinates[0];
grid->my_col = coordinates[1];
/* Set up row communicators */
free_coords[0] = 0;
free_coords[1] = 1;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->row_comm));
/* Set up column communicators */
free_coords[0] = 1;
free_coords[1] = 0;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->col_comm));
} /* Setup_grid */
/*********************************************************/
void Fox(
int n /* in */,
GRID_INFO_T* grid /* in */,
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */
/* matrix of A used during */
/* the current stage */
int stage;
int bcast_root;
int n_bar; /* n/sqrt(p) */
int source;
int dest;
MPI_Status status;
n_bar = n/grid->q;
Set_to_zero(local_C);
/* Calculate addresses for row circular shift of B */
source = (grid->my_row + 1) % grid->q;
dest = (grid->my_row + grid->q - 1) % grid->q;
/* Set aside storage for the broadcast block of A */
temp_A = Local_matrix_allocate(n_bar);
for (stage = 0; stage < grid->q; stage++) {
bcast_root = (grid->my_row + stage) % grid->q;
if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator
MPI_Bcast(local_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(local_A, local_B,
local_C);
} else { // temp_A is a buffer for process P_{ij} to store A_{ij}
MPI_Bcast(temp_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(temp_A, local_B,
local_C);
}
MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer
dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation
} /* for */
} /* Fox */
/*********************************************************/
LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) {
LOCAL_MATRIX_T* temp;
temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T));
return temp;
} /* Local_matrix_allocate */
/*********************************************************/
void Free_local_matrix(
LOCAL_MATRIX_T** local_A_ptr /* in/out */) {
free(*local_A_ptr);
} /* Free_local_matrix */
/*********************************************************/
/* Read and distribute matrix for matrix A:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_A(
char* prompt /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("A.dat","r");
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) {
for (mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp, "%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
*/
} else {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp,"%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status);
}
} /* Read_matrix */
/*********************************************************/
/* Read and distribute matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_B(
char* prompt /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT *temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("B.dat","r");
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) { // process 0 (local)
for (mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
/* scanf("%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
*/
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col); */
} else {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
MPI_Recv(temp, Order(local_B),
FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage
/* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status); */
}
free(temp);
}
} /* Read_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_A */
/*********************************************************/
/* Recive and Print Matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
*(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage
MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm);
}
free(temp);
}
} /* Print_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", Entry(local_C, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_C */
/*********************************************************/
/* Recive and Write Matrix C into a file:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Write_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
fp = fopen("C.dat", "w+");
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col));
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", temp[mat_col]);
// printf("%20.15E ", temp[mat_col]);
}
}
fprintf(fp,"\n");
}
free(temp);
fclose(fp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Write_matrix_C */
/*********************************************************/
/*
* Set local matrix's element to zero
*/
void Set_to_zero(
LOCAL_MATRIX_T* local_A /* out */) {
int i, j;
for (i = 0; i < Order(local_A); i++)
for (j = 0; j < Order(local_A); j++)
Entry(local_A,i,j) = 0.0E0;
} /* Set_to_zero */
/*********************************************************/
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype
/*
Synopsis
int MPI_Type_contiguous(int count,
MPI_Datatype oldtype,
MPI_Datatype *newtype)
Input Parameters
count
replication count (nonnegative integer)
oldtype
old datatype (handle)
*/
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory
MPI_Address(&(local_A->n_bar), &address);
/*
Synopsis
int MPI_Address(const void *location, MPI_Aint *address)
Input Parameters
location
location in caller memory (choice)
Output Parameters
address
address of location (address integer)
*/
displacements[0] = address - start_address;
MPI_Address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t); // Creates a struct datatype
/*
Synopsis
int MPI_Type_struct(int count,
const int *array_of_blocklengths,
const MPI_Aint *array_of_displacements,
const MPI_Datatype *array_of_types,
MPI_Datatype *newtype)
Input Parameters
count
number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths
array_of_blocklengths
number of elements in each block (array)
array_of_displacements
byte displacement of each block (array)
array_of_types
type of elements in each block (array of handles to datatype objects)
Output Parameters
newtype
new datatype (handle)
*/
MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype
/*
Synopsis
int MPI_Type_commit(MPI_Datatype *datatype)
Input Parameters
datatype
datatype (handle)
*/
} /* Build_matrix_type */
/*********************************************************/
/* local matrix multiplication function
* withing OpenMP Thread Acceleration
*/
void Local_matrix_multiply(
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
int i, j, k;
// int my_rank;
// MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split
for (i = 0; i < Order(local_A); i++) {
// printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num());
for (j = 0; j < Order(local_A); j++)
for (k = 0; k < Order(local_B); k++)
Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage
+ Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k)
/* Entry(local_C,i,j) = Entry(local_C,i,j)
+ Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper
*/
}
} /* Local_matrix_multiply */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
printf("%20.15E ", Entry(local_A,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_A */
/*********************************************************/
/* Recive and Print Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_B */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
printf("%20.15E ", Entry(local_C,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_C */
/*********************************************************/
/* Recive and Write Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_A.dat","w+");
printf("%s\n", title);
fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
fprintf(fp,"%20.15E ", Entry(local_A,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_A */
/*********************************************************/
/* Recive and Write Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_B.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_B */
/*********************************************************/
/* Recive and Write Local Matrix C:
* Process 0 print local matrix local_C
* Other Processess send local matrix local_C to process 0
* And process 0 receive local matrix local_C from other processess
*/
void Write_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_C.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
fprintf(fp, "%20.15E ", Entry(local_C,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_C */
|
Grid.h
|
/*
* Grid.h
* Cubism
*
* Copyright 2018 ETH Zurich. All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <vector>
#include <iostream>
#include <fstream>
#include <cassert>
#include <algorithm>
#include "BlockInfo.h"
//hello git
template <typename Block, template<typename X> class allocator=std::allocator>
class Grid
{
Block * m_blocks;
std::vector<BlockInfo> m_vInfo;
protected:
const unsigned int NX, NY, NZ, N;
const double maxextent;
void _dealloc()
{
allocator<Block> alloc;
alloc.deallocate(m_blocks, N);
}
void _alloc()
{
allocator<Block> alloc;
m_blocks = alloc.allocate(N);
assert(m_blocks!=NULL);
//numa touch
#pragma omp parallel
{
#pragma omp for schedule(static)
for(int i=0; i<(int)N; ++i)
m_blocks[i].clear();
}
}
Block* _linaccess(const unsigned int idx) const
{
assert(idx >= 0);
assert(idx < N);
return m_blocks + idx;
}
unsigned int _encode(const unsigned int ix, const unsigned int iy, const unsigned int iz) const
{
assert(ix>=0 && ix<NX);
assert(iy>=0 && iy<NY);
assert(iz>=0 && iz<NZ);
return ix + NX*(iy + NY*iz);
}
public:
typedef Block BlockType;
Grid(const unsigned int NX, const unsigned int NY = 1, const unsigned int NZ = 1, const double maxextent = 1) :
m_blocks(NULL), NX(NX), NY(NY), NZ(NZ), N(NX*NY*NZ), maxextent(maxextent)
{
_alloc();
const double h = (maxextent / std::max(NX, std::max(NY, NZ)));
for(unsigned int iz=0; iz<NZ; iz++)
for(unsigned int iy=0; iy<NY; iy++)
for(unsigned int ix=0; ix<NX; ix++)
{
const long long blockID = _encode(ix, iy, iz);
const int idx[3] = {(int)ix, (int)iy, (int)iz};
const double origin[3] = {ix*h, iy*h, iz*h};
m_vInfo.push_back(BlockInfo(blockID, idx, origin, h, h/Block::sizeX, _linaccess(blockID)));
}
}
virtual ~Grid() { _dealloc(); }
void setup(const unsigned int nX, const unsigned int nY, const unsigned int nZ)
{
std::cout << "Setting up the grid with " << nX << "x" << nY << "x" << nZ << " blocks ...";
_dealloc();
_alloc();
std::cout << "done. " << std::endl;
}
virtual int getBlocksPerDimension(int idim) const
{
assert(idim>=0 && idim<3);
switch (idim)
{
case 0: return NX;
case 1: return NY;
case 2: return NZ;
default: abort();
return 0;
}
}
virtual bool avail(unsigned int ix, unsigned int iy=0, unsigned int iz=0) const { return true; }
virtual Block& operator()(unsigned int ix, unsigned int iy=0, unsigned int iz=0) const
{
return *_linaccess( _encode((ix+NX) % NX, (iy+NY) % NY, (iz+NZ) % NZ) );
}
virtual std::vector<BlockInfo>& getBlocksInfo()
{
return m_vInfo;
}
virtual const std::vector<BlockInfo>& getBlocksInfo() const
{
return m_vInfo;
}
double getH() const
{
std::vector<BlockInfo> vInfo = this->getBlocksInfo();
BlockInfo info = vInfo[0];
return info.h_gridpoint;
}
};
template <typename Block, template<typename X> class allocator>
std::ostream& operator<< (std::ostream& out, const Grid<Block, allocator>& grid)
{
//save metadata
out << grid.getBlocksPerDimension(0) << " "
<< grid.getBlocksPerDimension(1) << " "
<< grid.getBlocksPerDimension(2) << std::endl;
return out;
}
template <typename Block, template<typename X> class allocator>
std::ifstream& operator>> (std::ifstream& in, Grid<Block, allocator>& grid)
{
//read metadata
unsigned int nx, ny, nz;
in >> nx;
in.ignore(1,' ');
in >> ny;
in.ignore(1,' ');
in >> nz;
in.ignore(1,'\n');
grid.setup(nx, ny, nz);
return in;
}
|
GB_unop__identity_uint8_int16.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_int16
// op(A') function: GB_unop_tran__identity_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_int16
(
uint8_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
TagList.h
|
//
// Created by lopea on 5/29/20.
//
#ifndef GIGAENGINE_TAGLIST_H
#define GIGAENGINE_TAGLIST_H
#include <cstdint>
#include <vector>
#include <deque>
#include <set>
typedef uint64_t Entity;
class GenericTagList
{
public:
virtual ~GenericTagList() = default;
};
template<typename T>
class TagList : public GenericTagList
{
public:
void AddTag(Entity entity);
bool HasTag(Entity entity) const;
void RemoveTag(Entity entity);
void RemoveTags(std::vector<Entity>& entities);
void clear();
bool empty() const;
std::vector<Entity> GetOverlappingEntities(const std::vector<Entity>& reference);
~TagList();
private:
std::deque<Entity> entities_;
bool sorted_ = false;
};
template<typename T>
void TagList<T>::AddTag(Entity entity)
{
#pragma omp critical
{
entities_.push_back(entity);
sorted_ = false;
}
}
template<typename T>
TagList<T>::~TagList()
{
clear();
}
template<typename T>
bool TagList<T>::HasTag(Entity entity) const
{
return std::find(entities_.begin(),entities_.end(),entity) != entities_.end();
}
template<typename T>
void TagList<T>::RemoveTag(Entity entity)
{
auto it = std::find(entities_.begin(),entities_.end(), entity);
if (it != entities_.end())
{
entities_.erase(it);
}
}
template<typename T>
void TagList<T>::clear()
{
entities_.clear();
}
template<typename T>
bool TagList<T>::empty() const
{
return entities_.empty();
}
template<typename T>
std::vector<Entity> TagList<T>::GetOverlappingEntities(const std::vector<Entity> &reference)
{
std::vector<Entity> result;
if(!sorted_)
{
std::sort(entities_.begin(),entities_.end());
sorted_ = true;
}
std::set_intersection(entities_.begin(), entities_.end(),reference.begin(),reference.end(), std::back_inserter(result));
return result;
}
template<typename T>
void TagList<T>::RemoveTags(std::vector<Entity> &entities)
{
for(Entity entity : entities)
{
RemoveTag(entities);
}
}
#endif //GIGAENGINE_TAGLIST_H
|
kernel_matern2.c
|
/*! @copyright (c) 2017 King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
*
* STARS-H is a software package, provided by King Abdullah
* University of Science and Technology (KAUST)
*
* @generate NDIM -> n 1 2 3 4
* Generate different functions for different dimensions. This hack improves
* performance in certain cases. Value 'n' stands for general case, whereas all
* other values correspond to static values of dimensionality.
* During code generation step, each appearance of @NDIM (including this one)
* will be replace by proposed values. If you want to use this file outside
* STARS-H, simply do substitutions yourself.
*
* @file src/applications/spatial/kernel_matern2.c
* @version 0.1.1
* @author Aleksandr Mikhalev
* @date 2018-11-06
*/
#include "common.h"
#include "starsh.h"
#include "starsh-spatial.h"
// If dimensionality is static
#if (@NDIM != n)
//! Replace variable ndim with static integer value
#define ndim @NDIM
#endif
#ifdef GSL
void starsh_ssdata_block_matern2_kernel_@NDIMd(int nrows, int ncols,
STARSH_int *irow, STARSH_int *icol, void *row_data, void *col_data,
void *result, int ld)
//! Matérn kernel for @NDIM-dimensional spatial statistics problem
/*! Fills matrix \f$ A \f$ with values
* \f[
* A_{ij} = \sigma^2 \frac{2^{1-\nu}}{\Gamma(\nu)} \left( \frac{r_{ij}}
* {\beta} \right)^{\nu} K_{\nu} \left( \frac{r_{ij}}{\beta} \right) +
* \mu \delta(r_{ij}),
* \f]
* where \f$ \Gamma \f$ is the Gamma function, \f$ K_{\nu} \f$ is the modified
* Bessel function of the second kind, \f$ \delta \f$ is the delta function
* \f[
* \delta(x) = \left\{ \begin{array}{ll} 0, & x \ne 0\\ 1, & x = 0
* \end{array} \right.,
* \f]
* \f$ r_{ij} \f$ is a distance between \f$i\f$-th and \f$j\f$-th spatial
* points and variance \f$ \sigma \f$, correlation length \f$ \beta \f$,
* smoothing parameter \f$ \nu \f$ and noise \f$ \mu \f$ come from \p
* row_data (\ref STARSH_ssdata object). No memory is allocated in this
* function!
*
* @param[in] nrows: Number of rows of \f$ A \f$.
* @param[in] ncols: Number of columns of \f$ A \f$.
* @param[in] irow: Array of row indexes.
* @param[in] icol: Array of column indexes.
* @param[in] row_data: Pointer to physical data (\ref STARSH_ssdata object).
* @param[in] col_data: Pointer to physical data (\ref STARSH_ssdata object).
* @param[out] result: Pointer to memory of \f$ A \f$.
* @param[in] ld: Leading dimension of `result`.
* @sa starsh_ssdata_block_matern2_kernel_1d(),
* starsh_ssdata_block_matern2_kernel_2d(),
* starsh_ssdata_block_matern2_kernel_3d(),
* starsh_ssdata_block_matern2_kernel_4d(),
* starsh_ssdata_block_matern2_kernel_nd().
* @ingroup app-spatial-kernels
* */
{
int i, j, k;
STARSH_ssdata *data1 = row_data;
STARSH_ssdata *data2 = col_data;
double tmp, dist;
// Read parameters
// If dimensionality is not static
#if (@NDIM == n)
int ndim = data1->particles.ndim;
#endif
double beta = data1->beta;
double nu = data1->nu;
double noise = data1->noise;
double sigma = data1->sigma;
// Get coordinates
STARSH_int count1 = data1->particles.count;
STARSH_int count2 = data2->particles.count;
double *x1[ndim], *x2[ndim];
x1[0] = data1->particles.point;
x2[0] = data2->particles.point;
//#pragma omp simd
for(i = 1; i < ndim; i++)
{
x1[i] = x1[0]+i*count1;
x2[i] = x2[0]+i*count2;
}
double *x1_cur, *x2_cur;
double *buffer = result;
// Fill column-major matrix
//#pragma omp simd
for(j = 0; j < ncols; j++)
{
for(i = 0; i < nrows; i++)
{
dist = 0.0;
for(k = 0; k < ndim; k++)
{
tmp = x1[k][irow[i]]-x2[k][icol[j]];
dist += tmp*tmp;
}
dist = sqrt(dist)/beta;
if(dist == 0)
buffer[j*(size_t)ld+i] = sigma+noise;
else
buffer[j*(size_t)ld+i] = sigma*pow(2.0, 1.0-nu)/
gsl_sf_gamma(nu)*pow(dist, nu)*
gsl_sf_bessel_Knu(nu, dist);
}
}
}
void starsh_ssdata_block_matern2_kernel_@NDIMd_simd(int nrows, int ncols,
STARSH_int *irow, STARSH_int *icol, void *row_data, void *col_data,
void *result, int ld)
//! Matérn kernel for @NDIM-dimensional spatial statistics problem
/*! Fills matrix \f$ A \f$ with values
* \f[
* A_{ij} = \sigma^2 \frac{2^{1-\nu}}{\Gamma(\nu)} \left( \frac{r_{ij}}
* {\beta} \right)^{\nu} K_{\nu} \left( \frac{r_{ij}}{\beta} \right) +
* \mu \delta(r_{ij}),
* \f]
* where \f$ \Gamma \f$ is the Gamma function, \f$ K_{\nu} \f$ is the modified
* Bessel function of the second kind, \f$ \delta \f$ is the delta function
* \f[
* \delta(x) = \left\{ \begin{array}{ll} 0, & x \ne 0\\ 1, & x = 0
* \end{array} \right.,
* \f]
* \f$ r_{ij} \f$ is a distance between \f$i\f$-th and \f$j\f$-th spatial
* points and variance \f$ \sigma \f$, correlation length \f$ \beta \f$,
* smoothing parameter \f$ \nu \f$ and noise \f$ \mu \f$ come from \p
* row_data (\ref STARSH_ssdata object). No memory is allocated in this
* function!
*
* Uses SIMD instructions.
*
* @param[in] nrows: Number of rows of \f$ A \f$.
* @param[in] ncols: Number of columns of \f$ A \f$.
* @param[in] irow: Array of row indexes.
* @param[in] icol: Array of column indexes.
* @param[in] row_data: Pointer to physical data (\ref STARSH_ssdata object).
* @param[in] col_data: Pointer to physical data (\ref STARSH_ssdata object).
* @param[out] result: Pointer to memory of \f$ A \f$.
* @param[in] ld: Leading dimension of `result`.
* @sa starsh_ssdata_block_matern2_kernel_1d_simd(),
* starsh_ssdata_block_matern2_kernel_2d_simd(),
* starsh_ssdata_block_matern2_kernel_3d_simd(),
* starsh_ssdata_block_matern2_kernel_4d_simd(),
* starsh_ssdata_block_matern2_kernel_nd_simd().
* @ingroup app-spatial-kernels
* */
{
int i, j, k;
STARSH_ssdata *data1 = row_data;
STARSH_ssdata *data2 = col_data;
double tmp, dist;
// Read parameters
// If dimensionality is not static
#if (@NDIM == n)
int ndim = data1->particles.ndim;
#endif
double beta = data1->beta;
double nu = data1->nu;
double noise = data1->noise;
double sigma = data1->sigma;
// Get coordinates
STARSH_int count1 = data1->particles.count;
STARSH_int count2 = data2->particles.count;
double *x1[ndim], *x2[ndim];
x1[0] = data1->particles.point;
x2[0] = data2->particles.point;
#pragma omp simd
for(i = 1; i < ndim; i++)
{
x1[i] = x1[0]+i*count1;
x2[i] = x2[0]+i*count2;
}
double *x1_cur, *x2_cur;
double *buffer = result;
// Fill column-major matrix
#pragma omp simd
for(j = 0; j < ncols; j++)
{
for(i = 0; i < nrows; i++)
{
dist = 0.0;
for(k = 0; k < ndim; k++)
{
tmp = x1[k][irow[i]]-x2[k][icol[j]];
dist += tmp*tmp;
}
dist = sqrt(dist)/beta;
if(dist == 0)
buffer[j*(size_t)ld+i] = sigma+noise;
else
buffer[j*(size_t)ld+i] = sigma*pow(2.0, 1.0-nu)/
gsl_sf_gamma(nu)*pow(dist, nu)*
gsl_sf_bessel_Knu(nu, dist);
}
}
}
#endif // GSL
|
impliedBarrier.c
|
// OpenMP Implied Barrier Example
// Inclusions
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// Main
int main( int argc, char** argv ) {
int i = 0; // Loop Iterator
int n = 0; // Number of Iterations
double start = 0.0; // Start Time
double middle = 0.0; // Middle Time
double end = 0.0; // End Time
double for1 = 0.0; // For Loop 1 Time
double for2 = 0.0; // For Loop 2 Time
double total = 0.0; // Total Time
// Parallel Region
#pragma omp parallel \
shared( n ) \
private( i )
{
start = omp_get_wtime( ); // Get Start Time
#pragma omp for // Parallelize For Loop
for( i = 0; i < n; i++ ) { // Iterate Through
printf( "Thread %d of %d - Iteration %d\n",
omp_get_thread_num( ),
omp_get_max_threads( ), i );
}
middle = omp_get_wtime( ); // Get Middle Time
#pragma omp for // Parallelize For Loop
for( i = 0; i < n; i++ ) { // Iterate Through
printf( "Thread %d of %d - Iteration %d\n",
omp_get_thread_num( ),
omp_get_max_threads( ), i );
}
end = omp_get_wtime( ); // Get End Time
}
// Calculate Time
for1 = middle - start;
for2 = end - middle;
total = end - start;
// Display Time
printf( "For Loop 1: %0.9lf\n", for1 );
printf( "For Loop 2: %0.9lf\n", for2 );
printf( "Total Time: %0.9lf\n", total );
return 0;
}
// End impliedBarrier.c - EWG SDG
|
facedetectcnn.h
|
/*
The MIT License (MIT)
Copyright (c) 2018-2019 Shiqi Yu
[email protected]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#pragma once
#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!!
unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be RGB (three-channel) image!
//DO NOT EDIT the following code if you don't really understand it.
#if defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
#define _ENABLE_INT8_CONV
#endif
#if defined(_ENABLE_AVX2)
#define _MALLOC_ALIGN 256
#else
#define _MALLOC_ALIGN 128
#endif
#if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON)
#error Cannot enable the two of SSE2 AVX and NEON at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
using namespace std;
void* myAlloc(size_t size);
void myFree_(void* ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_
{
float score;
int x;
int y;
int w;
int h;
}FaceRect;
class CDataBlob
{
public:
float * data_float;
signed char * data_int8;
int width;
int height;
int channels;
int floatChannelStepInByte;
int int8ChannelStepInByte;
float int8float_scale;
bool int8_data_valid;
public:
CDataBlob() {
data_float = 0;
data_int8 = 0;
width = 0;
height = 0;
channels = 0;
floatChannelStepInByte = 0;
int8ChannelStepInByte = 0;
int8float_scale = 1.0f;
int8_data_valid = false;
}
CDataBlob(int w, int h, int c)
{
data_float = 0;
data_int8 = 0;
create(w, h, c);
}
~CDataBlob()
{
setNULL();
}
void setNULL()
{
if (data_float)
myFree(&data_float);
if (data_int8)
myFree(&data_int8);
width = height = channels = floatChannelStepInByte = int8ChannelStepInByte = 0;
int8float_scale = 1.0f;
int8_data_valid = false;
}
bool create(int w, int h, int c)
{
setNULL();
width = w;
height = h;
channels = c;
//alloc space for float array
int remBytes = (sizeof(float)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
floatChannelStepInByte = channels * sizeof(float);
else
floatChannelStepInByte = (channels * sizeof(float)) + (_MALLOC_ALIGN / 8) - remBytes;
data_float = (float*)myAlloc(width * height * floatChannelStepInByte);
//alloc space for int8 array
remBytes = (sizeof(char)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
int8ChannelStepInByte = channels * sizeof(char);
else
int8ChannelStepInByte = (channels * sizeof(char)) + (_MALLOC_ALIGN / 8) - remBytes;
data_int8 = (signed char*)myAlloc(width * height * int8ChannelStepInByte);
if (data_float == NULL)
{
cerr << "Cannot alloc memeory for float data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
if (data_int8 == NULL)
{
cerr << "Cannot alloc memeory for uint8 data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
//memset(data_float, 0, width * height * floatChannelStepInByte);
//memset(data_int8, 0, width * height * int8ChannelStepInByte);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
int pixel_end = this->floatChannelStepInByte / sizeof(float);
float * pF = (float*)(this->data_float + (r * this->width + c) * this->floatChannelStepInByte/sizeof(float));
for (int ch = this->channels; ch < pixel_end; ch++)
pF[ch] = 0;
pixel_end = this->int8ChannelStepInByte / sizeof(char);
char * pI = (char*)(this->data_int8 + (r * this->width + c) * this->int8ChannelStepInByte/sizeof(char));
for (int ch = this->channels; ch < pixel_end; ch++)
pI[ch] = 0;
}
}
return true;
}
bool setInt8DataFromCaffeFormat(signed char * pData, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dim of the data can not match that of the Blob." << endl;
return false;
}
//create(dataWidth, dataHeight, dataChannels);
for(int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
signed char * p = (this->data_int8 + (width * row + col) * int8ChannelStepInByte /sizeof(char));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
return true;
}
bool setFloatDataFromCaffeFormat(float * pData, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dim of the data can not match that of the Blob." << endl;
return false;
}
//create(dataWidth, dataHeight, dataChannels);
for (int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
float * p = (this->data_float + (width * row + col) * floatChannelStepInByte / sizeof(float));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
return true;
}
bool setDataFromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep,
int * pChannelMean)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (pChannelMean == NULL)
{
cerr << "The mean values is null." << endl;
return false;
}
create(imgWidth, imgHeight, imgChannels);
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < imgHeight; r++)
{
for (int c = 0; c < imgWidth; c++)
{
const unsigned char * pImgData = imgData + imgWidthStep * r + imgChannels * c;
float * pBlobData = this->data_float + (this->width * r + c) * this->floatChannelStepInByte /sizeof(float);
for (int ch = 0; ch < imgChannels; ch++)
pBlobData[ch] = (float)(pImgData[ch] - pChannelMean[ch]);
}
}
return true;
}
bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep,
int * pChannelMean)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (pChannelMean == NULL)
{
cerr << "The mean values is null." << endl;
return false;
}
if (imgChannels != 3)
{
cerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
create((imgWidth+1)/2, (imgHeight+1)/2, 27);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
memset(data_float, 0, width * height * floatChannelStepInByte);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
float * pData = this->data_float + (r * this->width + c) * this->floatChannelStepInByte / sizeof(float);
for (int fy = -1; fy <= 1; fy++)
{
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++)
{
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
const unsigned char * pImgData = imgData + imgWidthStep * srcy + imgChannels * srcx;
int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
pData[output_channel_offset] = (float)(pImgData[0] - pChannelMean[0]);
pData[output_channel_offset+1] = (float)(pImgData[1] - pChannelMean[1]);
pData[output_channel_offset+2] = (float)(pImgData[2] - pChannelMean[2]);
}
}
}
}
return true;
}
float getElementFloat(int x, int y, int channel)
{
if (this->data_float)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
float * p = (float*)(this->data_float + (y*this->width + x)*this->floatChannelStepInByte / sizeof(float));
return p[channel];
}
}
return 0.f;
}
int getElementint8(int x, int y, int channel)
{
if (this->data_int8 && this->int8_data_valid)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
signed char * p = this->data_int8 + (y*this->width + x)*this->int8ChannelStepInByte/sizeof(char);
return p[channel];
}
}
return 0;
}
friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob)
{
output << "DataBlob Size (Width, Height, Channel) = ("
<< dataBlob.width
<< ", " << dataBlob.height
<< ", " << dataBlob.channels
<< ")" << endl;
for (int ch = 0; ch < dataBlob.channels; ch++)
{
output << "Channel " << ch << ": " << endl;
for (int row = 0; row < dataBlob.height; row++)
{
output << "(";
for (int col = 0; col < dataBlob.width; col++)
{
float * p = (dataBlob.data_float + (dataBlob.width * row + col) * dataBlob.floatChannelStepInByte/sizeof(float));
output << p[ch];
if (col != dataBlob.width - 1)
output << ", ";
}
output << ")" << endl;
}
}
return output;
}
};
class Filters {
public:
vector<CDataBlob *> filters;
int pad;
int stride;
float scale; //element * scale = original value
};
bool convolution(CDataBlob *inputData, const Filters* filters, CDataBlob *outputData);
bool maxpooling2x2S2(const CDataBlob *inputData, CDataBlob *outputData);
bool concat4(const CDataBlob *inputData1, const CDataBlob *inputData2, const CDataBlob *inputData3, const CDataBlob *inputData4, CDataBlob *outputData);
bool scale(CDataBlob * dataBlob, float scale);
bool relu(const CDataBlob *inputOutputData);
bool priorbox(const CDataBlob * featureData, const CDataBlob * imageData, int num_sizes, float * pWinSizes, CDataBlob * outputData);
bool normalize(CDataBlob * inputOutputData, float * pScale);
bool blob2vector(const CDataBlob * inputData, CDataBlob * outputData, bool isFloat);
bool detection_output(const CDataBlob * priorbox, const CDataBlob * loc, const CDataBlob * conf, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob * outputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
bool softmax1vector2class(const CDataBlob *inputOutputData);
vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
|
H2ERI_build_Coulomb.c
|
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <omp.h>
#include "H2Pack_matvec.h"
#include "H2Pack_utils.h"
#include "H2ERI_typedef.h"
#include "H2ERI_build_Coulomb.h"
#include "H2ERI_matvec.h"
#include "utils.h" // In H2Pack
// "Uncontract" the density matrix according to SSP and unroll
// the result to a column for H2 matvec.
// Input parameters:
// den_mat : Symmetric density matrix, size h2eri->num_bf^2
// h2eri->num_bf : Number of basis functions in the system
// h2eri->num_sp : Number of screened shell pairs (SSP)
// h2eri->shell_bf_sidx : Array, size nshell, indices of each shell's
// first basis function
// h2eri->sp_bfp_sidx : Array, size num_sp+1, indices of each
// SSP's first basis function pair
// h2eri->sp_shell_idx : Array, size 2 * num_sp, each row is
// the contracted shell indices of a SSP
// Output parameter:
// h2eri->unc_denmat_x : Array, size num_sp_bfp, uncontracted density matrix
void H2ERI_uncontract_den_mat(H2ERI_p h2eri, const double *den_mat)
{
int num_bf = h2eri->num_bf;
int num_sp = h2eri->num_sp;
int *shell_bf_sidx = h2eri->shell_bf_sidx;
int *sp_bfp_sidx = h2eri->sp_bfp_sidx;
int *sp_shell_idx = h2eri->sp_shell_idx;
double *x = h2eri->unc_denmat_x;
#pragma omp parallel for schedule(dynamic, 16)
for (int i = 0; i < num_sp; i++)
{
int x_spos = sp_bfp_sidx[i];
int shell_idx0 = sp_shell_idx[i];
int shell_idx1 = sp_shell_idx[i + num_sp];
int srow = shell_bf_sidx[shell_idx0];
int erow = shell_bf_sidx[shell_idx0 + 1];
int scol = shell_bf_sidx[shell_idx1];
int ecol = shell_bf_sidx[shell_idx1 + 1];
int nrow = erow - srow;
int ncol = ecol - scol;
double sym_coef = (shell_idx0 == shell_idx1) ? 1.0 : 2.0;
// Originally we need to store den_mat[srow:erow-1, scol:ecol-1]
// column by column to x(x_spos:x_epos-1). Since den_mat is
// symmetric, we store den_mat[scol:ecol-1, srow:erow-1] row by
// row to x(x_spos:x_epos-1).
for (int j = 0; j < ncol; j++)
{
const double *den_mat_ptr = den_mat + (scol + j) * num_bf + srow;
double *x_ptr = x + x_spos + j * nrow;
#pragma omp simd
for (int k = 0; k < nrow; k++)
x_ptr[k] = sym_coef * den_mat_ptr[k];
}
}
}
// "Contract" the H2 matvec result according to SSP and reshape
// the result to form a symmetric Coulomb matrix
// Input parameters:
// h2eri->num_bf : Number of basis functions in the system
// h2eri->num_sp : Number of SSP
// h2eri->shell_bf_sidx : Array, size nshell, indices of each shell's
// first basis function
// h2eri->sp_bfp_sidx : Array, size num_sp+1, indices of each
// SSP's first basis function pair
// h2eri->sp_shell_idx : Array, size 2 * num_sp, each row is
// the contracted shell indices of a SSP
// h2eri->H2_matvec_y : Array, size num_sp_bfp, H2 matvec result
// Output parameter:
// J_mat : Symmetric Coulomb matrix, size h2eri->num_bf^2
void H2ERI_contract_H2_matvec(H2ERI_p h2eri, double *J_mat)
{
int num_bf = h2eri->num_bf;
int num_sp = h2eri->num_sp;
int *shell_bf_sidx = h2eri->shell_bf_sidx;
int *sp_bfp_sidx = h2eri->sp_bfp_sidx;
int *sp_shell_idx = h2eri->sp_shell_idx;
double *y = h2eri->H2_matvec_y;
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_bf * num_bf; i++) J_mat[i] = 0.0;
#pragma omp parallel for schedule(dynamic, 16)
for (int i = 0; i < num_sp; i++)
{
int y_spos = sp_bfp_sidx[i];
int shell_idx0 = sp_shell_idx[i];
int shell_idx1 = sp_shell_idx[i + num_sp];
int srow = shell_bf_sidx[shell_idx0];
int erow = shell_bf_sidx[shell_idx0 + 1];
int scol = shell_bf_sidx[shell_idx1];
int ecol = shell_bf_sidx[shell_idx1 + 1];
int nrow = erow - srow;
int ncol = ecol - scol;
double sym_coef = (shell_idx0 == shell_idx1) ? 0.5 : 1.0;
// Originally we need to reshape y(y_spos:y_epos-1) as a
// nrow-by-ncol column-major matrix and add it to column-major
// matrix J_mat[srow:erow-1, scol:ecol-1]. Since J_mat is
// symmetric, we reshape y(y_spos:y_epos-1) as a ncol-by-nrow
// row-major matrix and add it to J_mat[scol:ecol-1, srow:erow-1].
for (int j = 0; j < ncol; j++)
{
double *J_mat_ptr = J_mat + (scol + j) * num_bf + srow;
double *y_ptr = y + y_spos + j * nrow;
#pragma omp simd
for (int k = 0; k < nrow; k++) J_mat_ptr[k] += sym_coef * y_ptr[k];
}
}
// Symmetrize the Coulomb matrix: J_mat = J_mat + J_mat^T
#pragma omp parallel for schedule(dynamic, 16)
for (int i = 0; i < num_bf; i++)
{
for (int j = 0; j < i; j++)
{
int idx0 = i * num_bf + j;
int idx1 = j * num_bf + i;
double val = J_mat[idx0] + J_mat[idx1];
J_mat[idx0] = val;
J_mat[idx1] = val;
}
int idx_ii = i * num_bf + i;
J_mat[idx_ii] += J_mat[idx_ii];
}
}
// Build the Coulomb matrix using the density matrix, H2 representation
// of the ERI tensor, and H2 matvec
void H2ERI_build_Coulomb(H2ERI_p h2eri, const double *den_mat, double *J_mat)
{
if (h2eri->unc_denmat_x == NULL)
{
size_t vec_msize = sizeof(double) * h2eri->num_sp_bfp;
h2eri->unc_denmat_x = (double *) malloc(vec_msize);
h2eri->H2_matvec_y = (double *) malloc(vec_msize);
assert(h2eri->unc_denmat_x != NULL && h2eri->H2_matvec_y != NULL);
}
H2ERI_uncontract_den_mat(h2eri, den_mat);
H2ERI_matvec(h2eri, h2eri->unc_denmat_x, h2eri->H2_matvec_y);
H2ERI_contract_H2_matvec(h2eri, J_mat);
}
|
THTensorMath.c
|
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/THTensorMath.c"
#else
#define TH_OMP_OVERHEAD_THRESHOLD 100000
void THTensor_(fill)(THTensor *r_, real value)
{
TH_TENSOR_APPLY(real, r_,
THVector_(fill)(r__data, value, r__size); break;);
}
void THTensor_(zero)(THTensor *r_)
{
TH_TENSOR_APPLY(real, r_,
THVector_(fill)(r__data, 0, r__size); break;);
}
void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1) THError("Mask tensor can take 0 and 1 values only");
else if (*mask_data == 1) *tensor_data = value;);
}
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
{
THTensor *srct = THTensor_(newContiguous)(src);
real *src_data = THTensor_(data)(srct);
long cntr = 0;
long nelem = THTensor_(nElement)(srct);
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
src_data++;
cntr++;
if (cntr > nelem)
THError("Number of elements of src != mask");
});
if (cntr != nelem)
THError("Number of elements of src != mask");
THTensor_(free)(srct);
}
void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask)
{
long numel = THByteTensor_sumall(mask);
real *tensor_data;
THTensor_(resize1d)(tensor,numel);
tensor_data = THTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
long i, numel;
THLongStorage *newSize;
THTensor *tSlice, *sSlice;
long *index_data;
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension,4,"Indexing dim is out of bounds");
THArgCheck(src->nDimension > 0,2,"Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
newSize->data[dim] = numel;
THTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (src->nDimension > 1)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, i);
THTensor_(select)(sSlice, src, dim, index_data[i]-1);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i]-1));
}
}
THLongTensor_free(index);
}
void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension,4,"Indexing dim is out of bounds");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, index_data[i]-1);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
THTensor_(set1d)(tensor,index_data[i]-1,THTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
long i, numel;
THTensor *tSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension,4,"Indexing dim is out of bounds");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor,dim,index_data[i]-1);
THTensor_(fill)(tSlice, val);
THTensor_(free)(tSlice);
}
else
{
THTensor_(set1d)(tensor,index_data[i]-1,val);
}
}
THLongTensor_free(index);
}
accreal THTensor_(dot)(THTensor *tensor, THTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
real THTensor_(minall)(THTensor *tensor)
{
real theMin;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor, if(*tensor_data < theMin) theMin = *tensor_data;);
return theMin;
}
real THTensor_(maxall)(THTensor *tensor)
{
real theMax;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor, if(*tensor_data > theMax) theMax = *tensor_data;);
return theMax;
}
accreal THTensor_(sumall)(THTensor *tensor)
{
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
return sum;
}
void THTensor_(add)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] + value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THTensor_(mul)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] * value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THTensor_(div)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] / value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] + value * sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] * sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] / sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected");
if( mat->size[1] != vec->size[0] )
THError("size mismatch");
if(t->nDimension != 1)
THError("size mismatch");
if(t->size[0] != mat->size[0])
THError("size mismatch");
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(mat->stride[0] == 1)
{
THBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THTensor_(data)(mat), mat->stride[1],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(mat), mat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cmat = THTensor_(newContiguous)(mat);
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(cmat), cmat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
THTensor_(free)(cmat);
}
}
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
{
long N1 = m1->size[0];
long N2 = m2->size[0];
long dim;
real *m1_p;
real *m2_p;
real *r_p;
long i;
THTensor_(resize2d)(r_, N1, N2);
m1 = THTensor_(newContiguous)(m1);
m2 = THTensor_(newContiguous)(m2);
THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1);
THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2);
dim = m1->size[1];
THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim");
m1_p = THTensor_(data)(m1);
m2_p = THTensor_(data)(m2);
r_p = THTensor_(data)(r_);
#pragma omp parallel for private(i)
for (i=0; i<N1; i++) {
long j,k;
for (j=0; j<N2; j++) {
real sum = 0;
for (k=0; k<dim; k++) {
real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ];
sum += term*term;
}
r_p[ i*N2 + j ] = gain * sum;
}
}
THTensor_(free)(m1);
THTensor_(free)(m2);
}
void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2) )
THError("matrix and matrix expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) )
THError("size mismatch");
if(t != r_)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
/* printf("%ldx%ld = %ldx%ld X %ldx%ld\n", r_->size[0], r_->size[1], m1->size[0], m1->size[1], m2->size[0], m2->size[1]); */
/* r_ */
if(r_->stride[0] == 1)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1)
{
THTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
r__ = THTensor_(newWithSize2d)(r_->size[1], r_->size[0]);
THTensor_(copy)(r__, r_);
THTensor_(transpose)(r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THTensor_(newContiguous)(m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THTensor_(newContiguous)(m2);
}
/* do the operation */
THBlas_(gemm)(transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THTensor_(data)(m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THTensor_(data)(m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THTensor_(data)(r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THTensor_(free)(m1_);
if(m2_ != m2)
THTensor_(free)(m2_);
if(r__ != r_)
THTensor_(freeCopyTo)(r__, r_);
}
void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) )
THError("size mismatch");
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(beta != 1)
THTensor_(mul)(r_, r_, beta);
if(r_->stride[0] == 1)
{
THBlas_(ger)(vec1->size[0], vec2->size[0],
alpha, THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cr = THTensor_(newClone)(r_);
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(cr), cr->stride[0]);
THTensor_(freeCopyTo)(cr, r_);
}
}
long THTensor_(numel)(THTensor *t)
{
return THTensor_(nElement)(t);
}
void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
long i;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long theIndex = 0;
real theMax = t_data[0];
for(i = 1; i < t_size; i++)
{
if(t_data[i*t_stride] > theMax)
{
theIndex = i;
theMax = t_data[i*t_stride];
}
}
*indices__data = theIndex;
*values__data = theMax;);
}
void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
long i;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long theIndex = 0;
real theMin = t_data[0];
for(i = 1; i < t_size; i++)
{
if(t_data[i*t_stride] < theMin)
{
theIndex = i;
theMin = t_data[i*t_stride];
}
}
*indices__data = theIndex;
*values__data = theMin;);
}
void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
}
void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
long i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
}
void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
long i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
long i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
void THTensor_(sign)(THTensor *r_, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
#if defined (TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else *r__data = 0;);
#else
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else if (*t_data < 0) *r__data = -1;
else *r__data = 0;);
#endif
}
accreal THTensor_(trace)(THTensor *t)
{
real *t_data = THTensor_(data)(t);
accreal sum = 0;
long i = 0;
long t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "not a matrix");
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension)
{
int i;
if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b))
THError("inconsitent tensor sizes");
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) != THTensor_(size)(b, i))
THError("inconsistent tensor sizes");
}
if(dimension < 0)
{
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0)
THError("no dimension of size 3");
}
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension out of range");
THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension size is not 3");
THTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(zero)(r_);
}
void THTensor_(ones)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(fill)(r_, 1);
}
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THTensor_(nDimension)(t) == 1)
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_size = THTensor_(size)(t, 0);
long sz = t_size + (k >= 0 ? k : -k);
real *r__data;
long r__stride_0;
long r__stride_1;
long i;
THTensor_(resize2d)(r_, sz, sz);
THTensor_(zero)(r_);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_stride_1 = THTensor_(stride)(t, 1);
long sz;
real *r__data;
long r__stride_0;
long i;
if(k >= 0)
sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k);
else
sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1));
THTensor_(resize1d)(r_, sz);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THTensor_(eye)(THTensor *r_, long n, long m)
{
real *r__data;
long i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THTensor_(resize2d)(r_, n, m);
THTensor_(zero)(r_);
i = 0;
r__data = THTensor_(data)(r_);
sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THTensor_(range)(THTensor *r_, real xmin, real xmax, real step)
{
long size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound incoherent with step sign");
size = (long)((xmax-xmin)/step+1);
THTensor_(resize1d)(r_, size);
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(randperm)(THTensor *r_, long n)
{
real *r__data;
long r__stride_0;
long i;
THArgCheck(n > 0, 1, "must be strictly positive");
THTensor_(resize1d)(r_, n);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_,0);
for(i = 0; i < n; i++)
r__data[i*r__stride_0] = (real)(i);
for(i = 0; i < n-1; i++)
{
long z = THRandom_random() % (n-i);
real sav = r__data[i*r__stride_0];
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
r__data[(z+i)*r__stride_0] = sav;
}
}
void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
http://www.alienryderflex.com/quicksort/
This public-domain C implementation by Darel Rex Finley.
Thanks man :)
Updated Oct 16 2013: change choice of pivot to avoid worst-case being a pre-sorted input - Daniel and Julien
Updated Oct 24 2013: change pivot comparison to strict inequality to avoid worst-case on constant input, see Sedgewick, Algorithms in C, Addison Wesley, 1990, p. 120 - Julien
*/
#define MAX_LEVELS 300
static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R, P, swap, pid;
real rswap, piv;
beg[0]=0; end[0]=elements;
while (i>=0) {
L=beg[i]; R=end[i]-1;
if (L<R) {
P=(L+R)>>1; /* Choose pivot as middle element of the current block */
piv=arr[P*stride];
pid=idx[P*stride];
rswap=arr[L*stride];
swap=idx[L*stride];
arr[L*stride]=piv;
idx[L*stride]=pid;
arr[P*stride]=rswap;
idx[P*stride]=swap;
while (L<R) {
while (arr[R*stride]>piv && L<R)
R--;
if (L<R) {
idx[L*stride]=idx[R*stride];
arr[L*stride]=arr[R*stride];
L++;
}
while (arr[L*stride]<piv && L<R)
L++;
if (L<R) {
idx[R*stride]=idx[L*stride];
arr[R*stride]=arr[L*stride];
R--;
}
}
idx[L*stride]=pid;
arr[L*stride]=piv;
beg[i+1]=L+1;
end[i+1]=end[i];
end[i++]=L;
if (end[i]-beg[i]>end[i-1]-beg[i-1]) {
swap=beg[i]; beg[i]=beg[i-1]; beg[i-1]=swap;
swap=end[i]; end[i]=end[i-1]; end[i-1]=swap;
}
}
else {
i--;
}
}
}
static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R, P, swap, pid;
real rswap, piv;
beg[0]=0; end[0]=elements;
while (i>=0) {
L=beg[i]; R=end[i]-1;
if (L<R) {
P=(L+R)>>1; /* Choose pivot as middle element of the current block */
piv=arr[P*stride];
pid=idx[P*stride];
rswap=arr[L*stride];
swap=idx[L*stride];
arr[L*stride]=piv;
idx[L*stride]=pid;
arr[P*stride]=rswap;
idx[P*stride]=swap;
while (L<R) {
while (arr[R*stride]<piv && L<R)
R--;
if (L<R) {
idx[L*stride]=idx[R*stride];
arr[L*stride]=arr[R*stride];
L++;
}
while (arr[L*stride]>piv && L<R)
L++;
if (L<R) {
idx[R*stride]=idx[L*stride];
arr[R*stride]=arr[L*stride];
R--;
}
}
idx[L*stride]=pid;
arr[L*stride]=piv;
beg[i+1]=L+1;
end[i+1]=end[i];
end[i++]=L;
if (end[i]-beg[i]>end[i-1]-beg[i-1]) {
swap=beg[i]; beg[i]=beg[i-1]; beg[i-1]=swap;
swap=end[i]; end[i]=end[i-1]; end[i-1]=swap;
}
}
else {
i--;
}
}
}
void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension");
THTensor_(resizeAs)(rt_, t);
THTensor_(copy)(rt_, t);
{
THLongStorage *size = THTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
void THTensor_(tril)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "not a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THTensor_(triu)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "not a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
{
THLongStorage *size;
int i;
int ndim = THMax(ta->nDimension, tb->nDimension);
ndim = THMax(ndim, dimension+1);
THArgCheck(dimension >= 0, 4, "invalid dimension");
size = THLongStorage_newWithSize(ndim);
for(i = 0; i < ndim; i++)
{
int tadi = (i < ta->nDimension ? ta->size[i] : 1);
int tbdi = (i < tb->nDimension ? tb->size[i] : 1);
if(i == dimension)
size->data[i] = tadi+tbdi;
else
{
if(tadi != tbdi)
{
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
size->data[i] = tadi;
}
}
THTensor_(resize)(r_, size, NULL);
THLongStorage_free(size);
{
THTensor *nta = THTensor_(newWithTensor)(r_);
THTensor_(narrow)(nta, NULL, dimension, 0, (dimension < ta->nDimension ? ta->size[dimension] : 1));
THTensor_(copy)(nta, ta);
THTensor_(free)(nta);
}
{
THTensor *ntb = THTensor_(newWithTensor)(r_);
THTensor_(narrow)(ntb, NULL, dimension, (dimension < ta->nDimension ? ta->size[dimension] : 1), (dimension < tb->nDimension ? tb->size[dimension] : 1));
THTensor_(copy)(ntb, tb);
THTensor_(free)(ntb);
}
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \
{ \
THByteTensor_rawResize(r_, t->nDimension, t->size, NULL); \
THByteTensor_zero(r_); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
if (*t_data OP value) *r__data = 1;); \
} \
void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \
{ \
THTensor_(rawResize)(r_, t->nDimension, t->size, NULL); \
THTensor_(zero)(r_); \
TH_TENSOR_APPLY2(real, r_, real, t, \
if (*t_data OP value) *r__data = 1;); \
} \
void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THByteTensor_rawResize(r_, ta->nDimension, ta->size, NULL); \
THByteTensor_zero(r_); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
if(*ta_data OP *tb_data) *r__data = 1;); \
} \
void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THTensor_(rawResize)(r_, ta->nDimension, ta->size, NULL); \
THTensor_(zero)(r_); \
TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \
if(*ta_data OP *tb_data) *r__data = 1;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
/* floating point only now */
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t, real value) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \
} \
\
LAB_IMPLEMENT_BASIC_FUNCTION(log,log)
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,log1p)
LAB_IMPLEMENT_BASIC_FUNCTION(exp,exp)
LAB_IMPLEMENT_BASIC_FUNCTION(cos,cos)
LAB_IMPLEMENT_BASIC_FUNCTION(acos,acos)
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,cosh)
LAB_IMPLEMENT_BASIC_FUNCTION(sin,sin)
LAB_IMPLEMENT_BASIC_FUNCTION(asin,asin)
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,sinh)
LAB_IMPLEMENT_BASIC_FUNCTION(tan,tan)
LAB_IMPLEMENT_BASIC_FUNCTION(atan,atan)
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,tanh)
LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,pow)
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,sqrt)
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,ceil)
LAB_IMPLEMENT_BASIC_FUNCTION(floor,floor)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,fabs)
void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
{
THTensor_(resizeAs)(r_, tx);
TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = atan2(*tx_data,*ty_data););
}
void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum/t_size;);
}
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sqrt(sum2);
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sqrt(sum2);
});
}
void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = sum2;
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sum2;
});
}
void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
if(value == 0) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride] != 0.0;
*r__data = sum;)
} else {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += pow(fabs(t_data[i*t_stride]), value);
*r__data = pow(sum, 1.0/value);)
}
}
accreal THTensor_(normall)(THTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else {
TH_TENSOR_APPLY(real, tensor, sum += pow(fabs(*tensor_data), value););
return pow(sum, 1.0/value);
}
}
accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += pow(fabs(*tensor_data - *src_data), value);)
return pow(sum, 1.0/value);
}
accreal THTensor_(meanall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor);
}
accreal THTensor_(varall)(THTensor *tensor)
{
accreal mean = THTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= (THTensor_(nElement)(tensor)-1);
return sum;
}
accreal THTensor_(stdall)(THTensor *tensor)
{
return sqrt(THTensor_(varall)(tensor));
}
void THTensor_(linspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
THArgCheck(a <= b, 2, "end range should be greater than start range");
THTensor_(resize1d)(r_, n);
if(n == 1) {
TH_TENSOR_APPLY(real, r_,
*r__data = a;
i++;
);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = a + i*(b-a)/((real)(n-1));
i++;
);
}
}
void THTensor_(logspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
THArgCheck(a <= b, 2, "end range should be greater than start range");
THTensor_(resize1d)(r_, n);
if(n == 1) {
TH_TENSOR_APPLY(real, r_,
*r__data = pow(10.0, a);
i++;
);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = pow(10.0, a + i*(b-a)/((real)(n-1)));
i++;
);
}
}
void THTensor_(rand)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(uniform)(r_, 0, 1);
}
void THTensor_(randn)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(normal)(r_, 0, 1);
}
void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue)
{
THTensor *clone;
real minval;
real maxval;
real bins;
real *h_data;
THTensor_(resize1d)(hist, nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
bins = (real)(nbins)-1e-6;
clone = THTensor_(newWithSize1d)(THTensor_(nElement)(tensor));
THTensor_(copy)(clone,tensor);
THTensor_(add)(clone, clone, -minval);
THTensor_(div)(clone, clone, (maxval-minval));
THTensor_(mul)(clone, clone, bins);
THTensor_(floor)(clone, clone);
THTensor_(add)(clone, clone, 1);
h_data = THTensor_(data)(hist);
TH_TENSOR_APPLY(real, clone, \
if ((*clone_data <= nbins) && (*clone_data >= 1)) { \
*(h_data + (int)(*clone_data) - 1) += 1; \
});
THTensor_(free)(clone);
}
#endif /* floating point only part */
#endif
|
phono3py.c
|
/* Copyright (C) 2021 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "phono3py.h"
#include <stdio.h>
#include <stdlib.h>
#include "bzgrid.h"
#include "collision_matrix.h"
#include "fc3.h"
#include "grgrid.h"
#include "imag_self_energy_with_g.h"
#include "interaction.h"
#include "isotope.h"
#include "lagrid.h"
#include "lapack_wrapper.h"
#include "phonoc_array.h"
#include "pp_collision.h"
#include "real_self_energy.h"
#include "tetrahedron_method.h"
#include "triplet.h"
#include "triplet_iw.h"
long ph3py_get_interaction(
Darray *fc3_normal_squared, const char *g_zero, const Darray *frequencies,
const lapack_complex_double *eigenvectors, const long (*triplets)[3],
const long num_triplets, const long (*bz_grid_addresses)[3],
const long D_diag[3], const long Q[3][3], const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const long *band_indices, const long symmetrize_fc3_q,
const double cutoff_frequency) {
ConstBZGrid *bzgrid;
long i, j;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
}
}
itr_get_interaction(fc3_normal_squared, g_zero, frequencies, eigenvectors,
triplets, num_triplets, bzgrid, fc3, is_compact_fc3,
svecs, multi_dims, multiplicity, masses, p2s_map,
s2p_map, band_indices, symmetrize_fc3_q,
cutoff_frequency);
free(bzgrid);
bzgrid = NULL;
return 1;
}
long ph3py_get_pp_collision(
double *imag_self_energy,
const long relative_grid_address[24][4][3], /* thm */
const double *frequencies, const lapack_complex_double *eigenvectors,
const long (*triplets)[3], const long num_triplets,
const long *triplet_weights, const long (*bz_grid_addresses)[3], /* thm */
const long *bz_map, /* thm */
const long bz_grid_type, const long D_diag[3], const long Q[3][3],
const double *fc3, const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const Larray *band_indices, const Darray *temperatures, const long is_NU,
const long symmetrize_fc3_q, const double cutoff_frequency) {
ConstBZGrid *bzgrid;
long i, j;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
}
}
ppc_get_pp_collision(
imag_self_energy, relative_grid_address, frequencies, eigenvectors,
triplets, num_triplets, triplet_weights, bzgrid, fc3, is_compact_fc3,
svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices,
temperatures, is_NU, symmetrize_fc3_q, cutoff_frequency);
free(bzgrid);
bzgrid = NULL;
return 1;
}
long ph3py_get_pp_collision_with_sigma(
double *imag_self_energy, const double sigma, const double sigma_cutoff,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long (*triplets)[3], const long num_triplets,
const long *triplet_weights, const long (*bz_grid_addresses)[3],
const long D_diag[3], const long Q[3][3], const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const Larray *band_indices, const Darray *temperatures, const long is_NU,
const long symmetrize_fc3_q, const double cutoff_frequency) {
ConstBZGrid *bzgrid;
long i, j;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
}
}
ppc_get_pp_collision_with_sigma(
imag_self_energy, sigma, sigma_cutoff, frequencies, eigenvectors,
triplets, num_triplets, triplet_weights, bzgrid, fc3, is_compact_fc3,
svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices,
temperatures, is_NU, symmetrize_fc3_q, cutoff_frequency);
free(bzgrid);
bzgrid = NULL;
return 1;
}
void ph3py_get_imag_self_energy_at_bands_with_g(
double *imag_self_energy, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double *g, const char *g_zero,
const double temperature, const double cutoff_frequency,
const long num_frequency_points, const long frequency_point_index) {
ise_get_imag_self_energy_with_g(
imag_self_energy, fc3_normal_squared, frequencies, triplets,
triplet_weights, g, g_zero, temperature, cutoff_frequency,
num_frequency_points, frequency_point_index);
}
void ph3py_get_detailed_imag_self_energy_at_bands_with_g(
double *detailed_imag_self_energy, double *imag_self_energy_N,
double *imag_self_energy_U, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const long (*bz_grid_addresses)[3],
const double *g, const char *g_zero, const double temperature,
const double cutoff_frequency) {
ise_get_detailed_imag_self_energy_with_g(
detailed_imag_self_energy, imag_self_energy_N, imag_self_energy_U,
fc3_normal_squared, frequencies, triplets, triplet_weights,
bz_grid_addresses, g, g_zero, temperature, cutoff_frequency);
}
void ph3py_get_real_self_energy_at_bands(
double *real_self_energy, const Darray *fc3_normal_squared,
const long *band_indices, const double *frequencies,
const long (*triplets)[3], const long *triplet_weights,
const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
rse_get_real_self_energy_at_bands(real_self_energy, fc3_normal_squared,
band_indices, frequencies, triplets,
triplet_weights, epsilon, temperature,
unit_conversion_factor, cutoff_frequency);
}
void ph3py_get_real_self_energy_at_frequency_point(
double *real_self_energy, const double frequency_point,
const Darray *fc3_normal_squared, const long *band_indices,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
rse_get_real_self_energy_at_frequency_point(
real_self_energy, frequency_point, fc3_normal_squared, band_indices,
frequencies, triplets, triplet_weights, epsilon, temperature,
unit_conversion_factor, cutoff_frequency);
}
void ph3py_get_collision_matrix(
double *collision_matrix, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplets_map, const long *map_q,
const long *rotated_grid_points, const double *rotations_cartesian,
const double *g, const long num_ir_gp, const long num_gp,
const long num_rot, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
col_get_collision_matrix(collision_matrix, fc3_normal_squared, frequencies,
triplets, triplets_map, map_q, rotated_grid_points,
rotations_cartesian, g, num_ir_gp, num_gp, num_rot,
temperature, unit_conversion_factor,
cutoff_frequency);
}
void ph3py_get_reducible_collision_matrix(
double *collision_matrix, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplets_map, const long *map_q, const double *g,
const long num_gp, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
col_get_reducible_collision_matrix(
collision_matrix, fc3_normal_squared, frequencies, triplets,
triplets_map, map_q, g, num_gp, temperature, unit_conversion_factor,
cutoff_frequency);
}
void ph3py_get_isotope_scattering_strength(
double *gamma, const long grid_point, const double *mass_variances,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long num_grid_points, const long *band_indices, const long num_band,
const long num_band0, const double sigma, const double cutoff_frequency) {
iso_get_isotope_scattering_strength(gamma, grid_point, mass_variances,
frequencies, eigenvectors,
num_grid_points, band_indices, num_band,
num_band0, sigma, cutoff_frequency);
}
void ph3py_get_thm_isotope_scattering_strength(
double *gamma, const long grid_point, const long *ir_grid_points,
const long *weights, const double *mass_variances,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long num_ir_grid_points, const long *band_indices,
const long num_band, const long num_band0,
const double *integration_weights, const double cutoff_frequency) {
iso_get_thm_isotope_scattering_strength(
gamma, grid_point, ir_grid_points, weights, mass_variances, frequencies,
eigenvectors, num_ir_grid_points, band_indices, num_band, num_band0,
integration_weights, cutoff_frequency);
}
void ph3py_distribute_fc3(double *fc3, const long target, const long source,
const long *atom_mapping, const long num_atom,
const double *rot_cart) {
fc3_distribute_fc3(fc3, target, source, atom_mapping, num_atom, rot_cart);
}
void ph3py_rotate_delta_fc2(double (*fc3)[3][3][3],
const double (*delta_fc2s)[3][3],
const double *inv_U,
const double (*site_sym_cart)[3][3],
const long *rot_map_syms, const long num_atom,
const long num_site_sym, const long num_disp) {
fc3_rotate_delta_fc2(fc3, delta_fc2s, inv_U, site_sym_cart, rot_map_syms,
num_atom, num_site_sym, num_disp);
}
void ph3py_get_permutation_symmetry_fc3(double *fc3, const long num_atom) {
fc3_set_permutation_symmetry_fc3(fc3, num_atom);
}
void ph3py_get_permutation_symmetry_compact_fc3(
double *fc3, const long p2s[], const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom, const long n_patom) {
fc3_set_permutation_symmetry_compact_fc3(fc3, p2s, s2pp, nsym_list, perms,
n_satom, n_patom);
}
void ph3py_transpose_compact_fc3(double *fc3, const long p2s[],
const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom,
const long n_patom, const long t_type) {
fc3_transpose_compact_fc3(fc3, p2s, s2pp, nsym_list, perms, n_satom,
n_patom, t_type);
}
long ph3py_get_triplets_reciprocal_mesh_at_q(
long *map_triplets, long *map_q, const long grid_point,
const long D_diag[3], const long is_time_reversal, const long num_rot,
const long (*rec_rotations)[3][3], const long swappable) {
return tpl_get_triplets_reciprocal_mesh_at_q(
map_triplets, map_q, grid_point, D_diag, is_time_reversal, num_rot,
rec_rotations, swappable);
}
long ph3py_get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point,
const long (*bz_grid_addresses)[3],
const long *bz_map, const long *map_triplets,
const long num_map_triplets,
const long D_diag[3], const long Q[3][3],
const long bz_grid_type) {
ConstBZGrid *bzgrid;
long i, j, num_ir;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
}
}
bzgrid->size = num_map_triplets;
num_ir =
tpl_get_BZ_triplets_at_q(triplets, grid_point, bzgrid, map_triplets);
free(bzgrid);
bzgrid = NULL;
return num_ir;
}
/* relative_grid_addresses are given as P multipled with those from dataset,
* i.e.,
* np.dot(relative_grid_addresses, P.T) */
long ph3py_get_integration_weight(
double *iw, char *iw_zero, const double *frequency_points,
const long num_band0, const long relative_grid_address[24][4][3],
const long D_diag[3], const long (*triplets)[3], const long num_triplets,
const long (*bz_grid_addresses)[3], const long *bz_map,
const long bz_grid_type, const double *frequencies1, const long num_band1,
const double *frequencies2, const long num_band2, const long tp_type,
const long openmp_per_triplets, const long openmp_per_bands) {
ConstBZGrid *bzgrid;
long i;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
}
tpl_get_integration_weight(
iw, iw_zero, frequency_points, num_band0, relative_grid_address,
triplets, num_triplets, bzgrid, frequencies1, num_band1, frequencies2,
num_band2, tp_type, openmp_per_triplets, openmp_per_bands);
free(bzgrid);
bzgrid = NULL;
return 1;
}
void ph3py_get_integration_weight_with_sigma(
double *iw, char *iw_zero, const double sigma, const double sigma_cutoff,
const double *frequency_points, const long num_band0,
const long (*triplets)[3], const long num_triplets,
const double *frequencies, const long num_band, const long tp_type) {
tpl_get_integration_weight_with_sigma(
iw, iw_zero, sigma, sigma_cutoff, frequency_points, num_band0, triplets,
num_triplets, frequencies, num_band, tp_type);
}
/* From single address to grid index */
long ph3py_get_grid_index_from_address(const long address[3],
const long D_diag[3]) {
return grg_get_grid_index(address, D_diag);
}
void ph3py_get_gr_grid_addresses(long gr_grid_addresses[][3],
const long D_diag[3]) {
grg_get_all_grid_addresses(gr_grid_addresses, D_diag);
}
long ph3py_get_reciprocal_rotations(long rec_rotations[48][3][3],
const long (*rotations)[3][3],
const long num_rot,
const long is_time_reversal) {
return grg_get_reciprocal_point_group(rec_rotations, rotations, num_rot,
is_time_reversal, 1);
}
/* Rotation matrices with respect to reciprocal basis vectors are
* transformed to those for GRGrid. This set of the rotations are
* used always in GRGrid handling. */
long ph3py_transform_rotations(long (*transformed_rots)[3][3],
const long (*rotations)[3][3],
const long num_rot, const long D_diag[3],
const long Q[3][3]) {
return grg_transform_rotations(transformed_rots, rotations, num_rot, D_diag,
Q);
}
long ph3py_get_snf3x3(long D_diag[3], long P[3][3], long Q[3][3],
const long A[3][3]) {
return grg_get_snf3x3(D_diag, P, Q, A);
}
/* The rotations are those after proper transformation in GRGrid. */
long ph3py_get_ir_grid_map(long *ir_grid_map, const long D_diag[3],
const long PS[3], const long (*grg_rotations)[3][3],
const long num_rot) {
long num_ir, i;
grg_get_ir_grid_map(ir_grid_map, grg_rotations, num_rot, D_diag, PS);
num_ir = 0;
for (i = 0; i < D_diag[0] * D_diag[1] * D_diag[2]; i++) {
if (ir_grid_map[i] == i) {
num_ir++;
}
}
return num_ir;
}
long ph3py_get_bz_grid_addresses(long (*bz_grid_addresses)[3], long *bz_map,
long *bzg2grg, const long D_diag[3],
const long Q[3][3], const long PS[3],
const double rec_lattice[3][3],
const long type) {
BZGrid *bzgrid;
long i, j, size;
if ((bzgrid = (BZGrid *)malloc(sizeof(BZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->bzg2grg = bzg2grg;
bzgrid->type = type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = PS[i];
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
bzgrid->reclat[i][j] = rec_lattice[i][j];
}
}
if (bzg_get_bz_grid_addresses(bzgrid)) {
size = bzgrid->size;
} else {
size = 0;
}
free(bzgrid);
bzgrid = NULL;
return size;
}
long ph3py_rotate_bz_grid_index(const long bz_grid_index,
const long rotation[3][3],
const long (*bz_grid_addresses)[3],
const long *bz_map, const long D_diag[3],
const long PS[3], const long bz_grid_type) {
ConstBZGrid *bzgrid;
long i, rot_bz_gp;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
}
rot_bz_gp = bzg_rotate_grid_index(bz_grid_index, rotation, bzgrid);
free(bzgrid);
bzgrid = NULL;
return rot_bz_gp;
}
void ph3py_symmetrize_collision_matrix(double *collision_matrix,
const long num_column,
const long num_temp,
const long num_sigma) {
double val;
long i, j, k, l, adrs_shift;
for (i = 0; i < num_sigma; i++) {
for (j = 0; j < num_temp; j++) {
adrs_shift = (i * num_column * num_column * num_temp +
j * num_column * num_column);
/* show_colmat_info(py_collision_matrix, i, j, adrs_shift); */
#ifdef _OPENMP
#pragma omp parallel for schedule(guided) private(l, val)
#endif
for (k = 0; k < num_column; k++) {
for (l = k + 1; l < num_column; l++) {
val = (collision_matrix[adrs_shift + k * num_column + l] +
collision_matrix[adrs_shift + l * num_column + k]) /
2;
collision_matrix[adrs_shift + k * num_column + l] = val;
collision_matrix[adrs_shift + l * num_column + k] = val;
}
}
}
}
}
void ph3py_expand_collision_matrix(double *collision_matrix,
const long *rot_grid_points,
const long *ir_grid_points,
const long num_ir_gp,
const long num_grid_points,
const long num_rot, const long num_sigma,
const long num_temp, const long num_band)
{
long i, j, k, l, m, n, p, adrs_shift, adrs_shift_plus, ir_gp, gp_r;
long num_column, num_bgb;
long *multi;
double *colmat_copy;
multi = (long *)malloc(sizeof(long) * num_ir_gp);
colmat_copy = NULL;
num_column = num_grid_points * num_band;
num_bgb = num_band * num_grid_points * num_band;
#ifdef _OPENMP
#pragma omp parallel for schedule(guided) private(j, ir_gp)
#endif
for (i = 0; i < num_ir_gp; i++) {
ir_gp = ir_grid_points[i];
multi[i] = 0;
for (j = 0; j < num_rot; j++) {
if (rot_grid_points[j * num_grid_points + ir_gp] == ir_gp) {
multi[i]++;
}
}
}
for (i = 0; i < num_sigma; i++) {
for (j = 0; j < num_temp; j++) {
adrs_shift = (i * num_column * num_column * num_temp +
j * num_column * num_column);
#ifdef _OPENMP
#pragma omp parallel for private(ir_gp, adrs_shift_plus, colmat_copy, l, gp_r, \
m, n, p)
#endif
for (k = 0; k < num_ir_gp; k++) {
ir_gp = ir_grid_points[k];
adrs_shift_plus = adrs_shift + ir_gp * num_bgb;
colmat_copy = (double *)malloc(sizeof(double) * num_bgb);
for (l = 0; l < num_bgb; l++) {
colmat_copy[l] =
collision_matrix[adrs_shift_plus + l] / multi[k];
collision_matrix[adrs_shift_plus + l] = 0;
}
for (l = 0; l < num_rot; l++) {
gp_r = rot_grid_points[l * num_grid_points + ir_gp];
for (m = 0; m < num_band; m++) {
for (n = 0; n < num_grid_points; n++) {
for (p = 0; p < num_band; p++) {
collision_matrix
[adrs_shift + gp_r * num_bgb +
m * num_grid_points * num_band +
rot_grid_points[l * num_grid_points + n] *
num_band +
p] +=
colmat_copy[m * num_grid_points * num_band +
n * num_band + p];
}
}
}
}
free(colmat_copy);
colmat_copy = NULL;
}
}
}
free(multi);
multi = NULL;
}
/* tpi_get_neighboring_grid_points around multiple grid points for using openmp
*
* relative_grid_addresses are given as P multipled with those from dataset,
* i.e.,
* np.dot(relative_grid_addresses, P.T) */
long ph3py_get_neighboring_gird_points(
long *relative_grid_points, const long *grid_points,
const long (*relative_grid_address)[3], const long D_diag[3],
const long (*bz_grid_addresses)[3], const long *bz_map,
const long bz_grid_type, const long num_grid_points,
const long num_relative_grid_address) {
long i;
ConstBZGrid *bzgrid;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < num_grid_points; i++) {
tpi_get_neighboring_grid_points(
relative_grid_points + i * num_relative_grid_address,
grid_points[i], relative_grid_address, num_relative_grid_address,
bzgrid);
}
free(bzgrid);
bzgrid = NULL;
return 1;
}
/* thm_get_integration_weight at multiple grid points for using openmp
*
* relative_grid_addresses are given as P multipled with those from dataset,
* i.e.,
* np.dot(relative_grid_addresses, P.T) */
long ph3py_get_thm_integration_weights_at_grid_points(
double *iw, const double *frequency_points, const long num_frequency_points,
const long num_band, const long num_gp,
const long (*relative_grid_address)[4][3], const long D_diag[3],
const long *grid_points, const long (*bz_grid_addresses)[3],
const long *bz_map, const long bz_grid_type, const double *frequencies,
const long *gp2irgp_map, const char function) {
long i, j, k, bi;
long vertices[24][4];
double freq_vertices[24][4];
ConstBZGrid *bzgrid;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
}
#ifdef _OPENMP
#pragma omp parallel for private(j, k, bi, vertices, freq_vertices)
#endif
for (i = 0; i < num_gp; i++) {
for (j = 0; j < 24; j++) {
tpi_get_neighboring_grid_points(vertices[j], grid_points[i],
relative_grid_address[j], 4,
bzgrid);
}
for (bi = 0; bi < num_band; bi++) {
for (j = 0; j < 24; j++) {
for (k = 0; k < 4; k++) {
freq_vertices[j][k] =
frequencies[gp2irgp_map[vertices[j][k]] * num_band +
bi];
}
}
for (j = 0; j < num_frequency_points; j++) {
iw[i * num_frequency_points * num_band + j * num_band + bi] =
thm_get_integration_weight(frequency_points[j],
freq_vertices, function);
}
}
}
free(bzgrid);
bzgrid = NULL;
return 1;
}
|
GB_unaryop__minv_bool_uint32.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_uint32
// op(A') function: GB_tran__minv_bool_uint32
// C type: bool
// A type: uint32_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_uint32
(
bool *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rs2_depth_metrics.h
|
// License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
//
// Plane Fit implementation follows http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points algorithm
#pragma once
namespace rs2
{
namespace depth_quality
{
struct snapshot_metrics
{
int width;
int height;
rs2::region_of_interest roi;
float distance;
float angle;
float angle_x;
float angle_y;
plane p;
std::array<float3, 4> plane_corners;
};
struct single_metric_data
{
single_metric_data(std::string name, float val) :
val(val), name(name) {}
float val;
std::string name;
};
using callback_type = std::function<void(
const std::vector<rs2::float3>& points,
const plane p,
const rs2::region_of_interest roi,
const float baseline_mm,
const float focal_length_pixels,
const int ground_thruth_mm,
const bool plane_fit,
const float plane_fit_to_ground_truth_mm,
const float distance_mm,
bool record,
std::vector<single_metric_data>& samples)>;
inline plane plane_from_point_and_normal(const rs2::float3& point, const rs2::float3& normal)
{
return{ normal.x, normal.y, normal.z, -(normal.x*point.x + normal.y*point.y + normal.z*point.z) };
}
//Based on: http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points
inline plane plane_from_points(const std::vector<rs2::float3> points)
{
if (points.size() < 3) throw std::runtime_error("Not enough points to calculate plane");
rs2::float3 sum = { 0,0,0 };
for (auto point : points) sum = sum + point;
rs2::float3 centroid = sum / float(points.size());
double xx = 0, xy = 0, xz = 0, yy = 0, yz = 0, zz = 0;
for (auto point : points) {
rs2::float3 temp = point - centroid;
xx += temp.x * temp.x;
xy += temp.x * temp.y;
xz += temp.x * temp.z;
yy += temp.y * temp.y;
yz += temp.y * temp.z;
zz += temp.z * temp.z;
}
double det_x = yy*zz - yz*yz;
double det_y = xx*zz - xz*xz;
double det_z = xx*yy - xy*xy;
double det_max = std::max({ det_x, det_y, det_z });
if (det_max <= 0) return{ 0, 0, 0, 0 };
rs2::float3 dir{};
if (det_max == det_x)
{
float a = static_cast<float>((xz*yz - xy*zz) / det_x);
float b = static_cast<float>((xy*yz - xz*yy) / det_x);
dir = { 1, a, b };
}
else if (det_max == det_y)
{
float a = static_cast<float>((yz*xz - xy*zz) / det_y);
float b = static_cast<float>((xy*xz - yz*xx) / det_y);
dir = { a, 1, b };
}
else
{
float a = static_cast<float>((yz*xy - xz*yy) / det_z);
float b = static_cast<float>((xz*xy - yz*xx) / det_z);
dir = { a, b, 1 };
}
return plane_from_point_and_normal(centroid, dir.normalize());
}
inline double evaluate_pixel(const plane& p, const rs2_intrinsics* intrin, float x, float y, float distance, float3& output)
{
float pixel[2] = { x, y };
rs2_deproject_pixel_to_point(&output.x, intrin, pixel, distance);
return evaluate_plane(p, output);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y, float min, float max)
{
float3 point;
auto far = evaluate_pixel(p, intrin, x, y, max, point);
if (fabs(max - min) < 1e-3) return point;
auto near = evaluate_pixel(p, intrin, x, y, min, point);
if (far*near > 0) return{ 0, 0, 0 };
auto avg = (max + min) / 2;
auto mid = evaluate_pixel(p, intrin, x, y, avg, point);
if (mid*near < 0) return approximate_intersection(p, intrin, x, y, min, avg);
return approximate_intersection(p, intrin, x, y, avg, max);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y)
{
return approximate_intersection(p, intrin, x, y, 0.f, 1000.f);
}
inline snapshot_metrics analyze_depth_image(
const rs2::video_frame& frame,
float units, float baseline_mm,
const rs2_intrinsics * intrin,
rs2::region_of_interest roi,
const int ground_truth_mm,
bool plane_fit_present,
std::vector<single_metric_data>& samples,
bool record,
callback_type callback)
{
auto pixels = (const uint16_t*)frame.get_data();
const auto w = frame.get_width();
const auto h = frame.get_height();
snapshot_metrics result{ w, h, roi, {} };
std::mutex m;
std::vector<rs2::float3> roi_pixels;
//#pragma omp parallel for - TODO optimization envisaged
for (int y = roi.min_y; y < roi.max_y; ++y)
for (int x = roi.min_x; x < roi.max_x; ++x)
{
auto depth_raw = pixels[y*w + x];
if (depth_raw)
{
// units is float
float pixel[2] = { float(x), float(y) };
float point[3];
auto distance = depth_raw * units;
rs2_deproject_pixel_to_point(point, intrin, pixel, distance);
std::lock_guard<std::mutex> lock(m);
roi_pixels.push_back({ point[0], point[1], point[2] });
}
}
if (roi_pixels.size() < 3) { // Not enough pixels in RoI to fit a plane
return result;
}
plane p = plane_from_points(roi_pixels);
if (p == plane{ 0, 0, 0, 0 }) { // The points in RoI don't span a valid plane
return result;
}
// Calculate intersection of the plane fit with a ray along the center of ROI
// that by design coincides with the center of the frame
float3 plane_fit_pivot = approximate_intersection(p, intrin, intrin->width / 2.f, intrin->height / 2.f);
float plane_fit_to_gt_offset_mm = (ground_truth_mm > 0.f) ? (plane_fit_pivot.z * 1000 - ground_truth_mm) : 0;
result.p = p;
result.plane_corners[0] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.min_y));
result.plane_corners[1] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.min_y));
result.plane_corners[2] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.max_y));
result.plane_corners[3] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.max_y));
// Distance of origin (the camera) from the plane is encoded in parameter D of the plane
// The parameter represents the euclidian distance (along plane normal) from camera to the plane
result.distance = static_cast<float>(-p.d * 1000);
// Angle can be calculated from param C
result.angle = static_cast<float>(std::acos(std::abs(p.c)) / M_PI * 180.);
callback(roi_pixels, p, roi, baseline_mm, intrin->fx, ground_truth_mm, plane_fit_present,
plane_fit_to_gt_offset_mm, result.distance, record, samples);
// Calculate normal
auto n = float3{ p.a, p.b, p.c };
auto cam = float3{ 0.f, 0.f, -1.f };
auto dot = n * cam;
auto u = cam - n * dot;
result.angle_x = u.x;
result.angle_y = u.y;
return result;
}
}
}
|
mlmstream.c
|
/ Copyright 2009-2015 Sandia Corporation. Under the terms
// of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2015, Sandia Corporation
// All rights reserved.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
#include "mlm.h"
#include <uint.h>
#include <sint.h>
#include <vector>
#include <utility>
int main(int argc, char* argv[]) {
const int LENGTH = 32768;
printf("Allocating arrays of size %d elements.\n", LENGTH);
double* a = (double*) mlm_malloc(sizeof(double) * LENGTH, 0);
double* b = (double*) mlm_malloc(sizeof(double) * LENGTH, 0);
double* fast_c = (double*) mlm_malloc(sizeof(double) * LENGTH, 0);
UInt<4> io_ins_0, io_ins_1, io_ins_2, io_ins_3, io_out;
UInt<1> io_load, io_shift;
UInt<4>* inp_ptr;
UInt<1>* ctrl_ptr;
TYPEINFO inp_info, ctrl_info;
mlm_set_pool(1);
printf("Allocation for fast_c is %llu\n", (unsigned long long int) fast_c);
double* c = (double*) malloc(sizeof(double) * LENGTH);
printf("Done allocating arrays.\n");
int i;
for(i = 0; i < LENGTH; ++i) {
a[i] = i;
b[i] = LENGTH - i;
c[i] = 0;
}
// Issue a memory copy
mlm_memcpy(fast_c, c, sizeof(double) * LENGTH);
printf("Perfoming the fast_c compute loop...\n");
#pragma omp parallel for
for(i = 0; i < LENGTH; ++i) {
//printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i]));
fast_c[i] = 2.0 * a[i] + 1.5 * b[i];
}
// Now copy results back
mlm_Tag copy_tag = mlm_memcpy(c, fast_c, sizeof(double) * LENGTH);
mlm_waitComplete(copy_tag);
double sum = 0;
for(i = 0; i < LENGTH; ++i) {
sum += c[i];
}
printf("Sum of arrays is: %f\n", sum);
printf("Freeing arrays...\n");
mlm_free(a);
mlm_free(b);
free(c);
printf("Done.\n");
io_shift = UInt<1>(1);
io_ins_0 = UInt<4>(9);
io_ins_1 = UInt<4>(4);
io_ins_2 = UInt<4>(2);
io_ins_3 = UInt<4>(7);
io_load = UInt<1>(0);
size_t inp_size = sizeof(UInt<4>) * 5;
size_t ctrl_size = sizeof(UInt<1>) * 2;
RTL_shmem_info* shmem = new RTL_shmem_info(inp_size, ctrl_size);
inp_ptr = (UInt<4>*)shmem->get_inp_ptr();
ctrl_ptr = (UInt<1>*)shmem->get_ctrl_ptr();
ctrl_info.push_back(make_pair("UInt", 1));
memcpy(ctrl_ptr, (void*)(&io_shift), sizeof(UInt<1>));
ctrl_ptr++;
inp_info.push_back(make_pair("UInt", 4));
memcpy(inp_ptr, (void*)(&io_ins_0), sizeof(UInt<4>));
inp_ptr++;
inp_info.push_back(make_pair("UInt", 4));
memcpy(inp_ptr, (void*)(&io_ins_1), sizeof(UInt<4>));
inp_ptr++;
inp_info.push_back(make_pair("UInt", 4));
memcpy(inp_ptr, (void*)(&io_ins_2), sizeof(UInt<4>));
inp_ptr++;
inp_info.push_back(make_pair("UInt", 4));
memcpy(inp_ptr, (void*)(&io_ins_3), sizeof(UInt<4>));
inp_ptr++;
ctrl_info.push_back(make_pair("UInt", 1));
memcpy(ctrl_ptr, (void*)(&io_load), sizeof(UInt<1>));
shmem->set_inp_info(inp_info);
shmem->set_ctrl_info(ctrl_info);
Update_RTL_Params* params = new Update_RTL_Params();
params->storetomem(shmem);
delete params;
start_RTL_sim(shmem);
/*io_shift = UInt<1>(1);
io_ins_0 = UInt<4>(9);
io_ins_1 = UInt<4>(7);
io_ins_2 = UInt<4>(2);
io_ins_3 = UInt<4>(4);*/
io_load = UInt<1>(1);
memcpy(ctrl_ptr, (void*)(&io_load), sizeof(UInt<1>));
Update_RTL_Params* params = new Update_RTL_Params(false, true, true, 1, true, true, false, false);
params->storetomem(shmem);
delete params;
update_RTL_signals();
io_load = UInt<1>(0);
memcpy(ctrl_ptr, (void*)(&io_load), sizeof(UInt<1>));
Update_RTL_Params* params = new Update_RTL_Params(false, true, true, 5, true, true, true, true);
params->storetomem(shmem);
delete params;
update_RTL_signals();
delete shmem;
return 0;
}
|
covariance.c
|
/**
* covariance.c: This file was adapted from PolyBench/GPU 1.0 test
* suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <[email protected]>
* Rafael Cardoso F Sousa <[email protected]>
* Luís Felipe Mattos <[email protected]>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
/* Problem size */
#define M SIZE
#define N SIZE
#define sqrt_of_array_cell(x, j) sqrt(x[j])
#define FLOAT_N 3214212.01
#define EPS 0.005
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *data) {
int i, j;
for (i = 1; i < (M + 1); i++) {
for (j = 1; j < (N + 1); j++) {
data[i * (N + 1) + j] = ((DATA_TYPE)i * j) / M;
}
}
}
int compareResults(DATA_TYPE *symmat, DATA_TYPE *symmat_outputFromGpu) {
int i, j, fail;
fail = 0;
for (i = 1; i < (M + 1); i++) {
for (j = 1; j < (N + 1); j++) {
if (percentDiff(symmat[i * (N + 1) + j],
symmat_outputFromGpu[i * (N + 1) + j]) >
ERROR_THRESHOLD) {
fail++;
}
}
}
return fail;
}
void covariance(DATA_TYPE *data, DATA_TYPE *symmat, DATA_TYPE *mean) {
int i, j, j1, j2;
/* Determine mean of column vectors of input data matrix */
for (j = 1; j < (M + 1); j++) {
mean[j] = 0.0;
for (i = 1; i < (N + 1); i++) {
mean[j] += data[i * (M + 1) + j];
}
mean[j] /= FLOAT_N;
}
/* Center the column vectors. */
for (i = 1; i < (N + 1); i++) {
for (j = 1; j < (M + 1); j++) {
data[i * (M + 1) + j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
for (j1 = 1; j1 < (M + 1); j1++) {
for (j2 = j1; j2 < (M + 1); j2++) {
symmat[j1 * (M + 1) + j2] = 0.0;
for (i = 1; i < N + 1; i++) {
symmat[j1 * (M + 1) + j2] +=
data[i * (M + 1) + j1] * data[i * (M + 1) + j2];
}
symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2];
}
}
}
void covariance_OMP(DATA_TYPE *data, DATA_TYPE *symmat, DATA_TYPE *mean) {
/* Determine mean of column vectors of input data matrix */
#pragma omp target data \
map(to: data[:(M + 1) * (N + 1)]) \
map(alloc: mean[:(M + 1)]) \
map(tofrom: symmat[:(M + 1) * (N + 1)]) device(OMP_DEVICE_ID)
{
#pragma omp target teams distribute parallel for device(OMP_DEVICE_ID)
for (int j = 1; j < (M + 1); j++) {
mean[j] = 0.0;
for (int i = 1; i < (N + 1); i++) {
mean[j] += data[i * (M + 1) + j];
}
mean[j] /= FLOAT_N;
}
/* Center the column vectors. */
#pragma omp target teams distribute parallel for collapse(2) device(OMP_DEVICE_ID)
for (int i = 1; i < (N + 1); i++) {
for (int j = 1; j < (M + 1); j++) {
data[i * (M + 1) + j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp target teams distribute parallel for device(OMP_DEVICE_ID)
for (int j1 = 1; j1 < (M + 1); j1++) {
for (int j2 = j1; j2 < (M + 1); j2++) {
symmat[j1 * (M + 1) + j2] = 0.0;
for (int i = 1; i < N + 1; i++) {
symmat[j1 * (M + 1) + j2] +=
data[i * (M + 1) + j1] * data[i * (M + 1) + j2];
}
symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2];
}
}
}
}
int main() {
fprintf(stdout, "<< Covariance Computation >>\n");
// declare arrays and allocate common memory
DATA_TYPE *data = NULL;
DATA_TYPE *data_OMP = NULL;
DATA_TYPE *symmat = NULL;
DATA_TYPE *symmat_OMP = NULL;
DATA_TYPE *mean = (DATA_TYPE *)calloc((M + 1), sizeof(DATA_TYPE));
// run OMP on GPU or CPU if enabled
#if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU)
symmat_OMP = (DATA_TYPE *)calloc((M + 1) * (M + 1), sizeof(DATA_TYPE));
data_OMP = (DATA_TYPE *)calloc((M + 1) * (N + 1), sizeof(DATA_TYPE));
init_arrays(data_OMP);
BENCHMARK_OMP(covariance_OMP(data_OMP, symmat_OMP, mean));
// prevent dead-code elimination
DCE_PREVENT(symmat_OMP, (M+1)*(M+1));
#endif
// run sequential version if enabled
#ifdef RUN_CPU_SEQ
symmat = (DATA_TYPE *)calloc((M + 1) * (M + 1), sizeof(DATA_TYPE));
data = (DATA_TYPE *)calloc((M + 1) * (N + 1), sizeof(DATA_TYPE));
init_arrays(data);
BENCHMARK_CPU(covariance(data, symmat, mean));
// prevent dead-code elimination
DCE_PREVENT(symmat, (M+1)*(M+1));
#endif
int fail = 0;
// if TEST is enabled, then compare OMP results against sequential mode
#ifdef RUN_TEST
fail = compareResults(symmat, symmat_OMP);
printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail);
#endif
// release memory
free(data);
free(data_OMP);
free(symmat);
free(symmat_OMP);
free(mean);
return fail;
}
|
omp.c
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
void main (){
int i,N,j,trouve;
printf("entrez-le nombre N :");
scanf("%d",&N);
i=1;
double debut,fin;
debut=omp_get_wtime();
omp_set_num_threads(4);
#pragma omp parallel for private (trouve,j) lastprivate(fin)
for (int cpt=1;cpt<N;cpt++)
{
trouve=0;
while ( trouve == 0 )
{
i=i+2;
for ( j=2; j<= sqrt((double)i) ;j++)
{
if (i%j==0) break;
}
if (j>sqrt((double)i))
{printf("%d|",i);
trouve=1;}
}
}
fin = omp_get_wtime()-debut;
printf("time = %f ",(float)(fin));
}
|
Trainer.h
|
/*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _TRAINER_
#define _TRAINER_
#include <limits.h>
#include <float.h>
DEFINE_bool(random_batch_processing, false, "Process batches in random order. Note this may disrupt catch-up.");
DEFINE_bool(random_per_batch_datapoint_processing, false, "Process datapoints in random order per batch. Note this may disrupt catch-up.");
DEFINE_int32(interval_print, 1, "Interval in which to print the loss.");
// Contains times / losses / etc
struct TrainStatistics {
std::vector<double> times;
std::vector<double> losses;
};
typedef struct TrainStatistics TrainStatistics;
class Trainer {
protected:
void TrackTimeLoss(double cur_time, double cur_loss, TrainStatistics *stats) {
stats->times.push_back(cur_time);
stats->losses.push_back(cur_loss);
}
void PrintPartitionTime(Timer &timer) {
printf("Partition Time(s): %f\n", timer.Elapsed());
}
void PrintTimeLoss(double cur_time, double cur_loss, int epoch) {
printf("Epoch: %d\tTime(s): %f\tLoss: %lf\t\n", epoch, cur_time, cur_loss);
}
void EpochBegin(int epoch, Timer &gradient_timer, Model *model, const std::vector<Datapoint *> &datapoints, TrainStatistics *stats) {
double cur_time = gradient_timer.Elapsed();
double cur_loss = model->ComputeLoss(datapoints);
this->TrackTimeLoss(cur_time, cur_loss, stats);
if (FLAGS_print_loss_per_epoch && epoch % FLAGS_interval_print == 0) {
this->PrintTimeLoss(cur_time, cur_loss, epoch);
}
}
public:
Trainer() {
// Some error checking.
if (FLAGS_n_threads > std::thread::hardware_concurrency()) {
std::cerr << "Trainer: Number of threads is greater than the number of physical cores." << std::endl;
//exit(0);
}
// Basic set up, like pinning to core, setting number of threads.
omp_set_num_threads(FLAGS_n_threads);
#pragma omp parallel
{
pin_to_core(omp_get_thread_num());
}
}
virtual ~Trainer() {}
// Main training method.
virtual TrainStatistics Train(Model *model, const std::vector<Datapoint *> & datapoints, Updater *updater) = 0;
};
#endif
|
3d7pt_var.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(8*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(4*t1+Ny+5,16)),floord(8*t2+Ny+4,16)),floord(8*t1-8*t2+Nz+Ny+3,16));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(16*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(16*t3+Nx+12,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),16*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),16*t3+14),256*t4+254),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__max_uint16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint16)
// A*D function (colscale): GB (_AxD__max_uint16)
// D*A function (rowscale): GB (_DxB__max_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint16)
// C=scalar+B GB (_bind1st__max_uint16)
// C=scalar+B' GB (_bind1st_tran__max_uint16)
// C=A+scalar GB (_bind2nd__max_uint16)
// C=A'+scalar GB (_bind2nd_tran__max_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT16 || GxB_NO_MAX_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint64_int8.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_int8)
// op(A') function: GB (_unop_tran__identity_uint64_int8)
// C type: uint64_t
// A type: int8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_int8)
(
uint64_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main_spmv.c
|
#include"common.h"
#include"mmio_highlevel.h"
#include"utils.h"
#include "encode.h"
# define INDEX_DATA_TYPE unsigned char
//# define VAL_DATA_TYPE double
typedef struct
{
MAT_VAL_TYPE *value;
int *columnindex;
MAT_PTR_TYPE *rowpointer;
}SMatrix;
int main(int argc, char ** argv)
{
if (argc < 2)
{
printf("Run the code by './test matrix.mtx'.\n");
return 0;
}
printf("--------------------------------!!!!!!!!------------------------------------\n");
struct timeval t1, t2;
int rowA;
int colA;
MAT_PTR_TYPE nnzA;
int isSymmetricA;
SMatrix matrixA;
char *filename;
filename = argv[1];
printf("MAT: -------------- %s --------------\n", filename);
// load mtx A data to the csr format
gettimeofday(&t1, NULL);
mmio_allinone(&rowA, &colA, &nnzA, &isSymmetricA, &matrixA.rowpointer, &matrixA.columnindex, &matrixA.value, filename);
gettimeofday(&t2, NULL);
double time_loadmat = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0;
printf("input matrix A: ( %i, %i ) nnz = %i\n loadfile time = %4.5f sec\n", rowA, colA, nnzA, time_loadmat/1000.0);
for (int i = 0; i < nnzA; i++)
matrixA.value[i] = i % 10;
if (rowA != colA)
{
printf("This code only computes square matrices.\n Exit.\n");
return 0;
}
// rowA=16;
// nnzA=matrixA.rowpointer[16];
// MAT_PTR_TYPE *cscColPtrA = (MAT_PTR_TYPE *)malloc((colA+1) * sizeof(MAT_PTR_TYPE));
// int *cscRowIdxA = (int *)malloc(nnzA * sizeof(int));
// MAT_VAL_TYPE *cscValA = (MAT_VAL_TYPE *)malloc(nnzA * sizeof(MAT_VAL_TYPE));
// transpose A from csr to csc
// matrix_transposition(rowA, colA, nnzA, matrixA.rowpointer, matrixA.columnindex, matrixA.value,cscRowIdxA, cscColPtrA, cscValA);
/* SMatrix matrixB;
int rowB=colA;
int colB=rowA;
matrixB.rowpointer = cscColPtrA;
matrixB.columnindex = cscRowIdxA;
matrixB.value = cscValA;
*/
if (BLOCK_SIZE>rowA){
printf("Error!\n");
return 0;
}
int rbnum=0;
int cbnum=0;
rbnum = rowA%BLOCK_SIZE==0 ? rowA/BLOCK_SIZE : (rowA/BLOCK_SIZE)+1 ;
cbnum = colA%BLOCK_SIZE==0 ? colA/BLOCK_SIZE : (colA/BLOCK_SIZE)+1 ;
char *flag=(char *)malloc(cbnum*sizeof(char));
int nnzbl=0;
for (int i=0;i<rbnum;i++)
{
memset(flag,0,cbnum*sizeof(char));
int start= i*BLOCK_SIZE;
int end = i==rbnum-1 ? rowA : (i+1)*BLOCK_SIZE ;
for (int j=matrixA.rowpointer[start];j<matrixA.rowpointer[end];j++){
int jc=matrixA.columnindex[j]/BLOCK_SIZE;
if (flag[jc]==0)
{
flag[jc]=1;
nnzbl++;
}
}
}
// printf("nnzbl=%d\n",nnzbl);
MAT_PTR_TYPE *rowblock_ptr; //block rowpointer of A
int *columnid; // block columnindex of A
int *nnzb_A;
int colid=0;
rowblock_ptr=(MAT_PTR_TYPE *)malloc((rbnum+1)*sizeof(MAT_PTR_TYPE));
columnid=(int *)malloc(nnzbl*sizeof(int));
memset(rowblock_ptr,0,(rbnum+1)*sizeof(MAT_PTR_TYPE));
int ptrA_length=0;
for (int i=0;i<rbnum;i++)
{
memset(flag,0,cbnum*sizeof(char));
int start= i*BLOCK_SIZE;
int end = i==rbnum-1 ? rowA : (i+1)*BLOCK_SIZE ;
for (int j=matrixA.rowpointer[start];j<matrixA.rowpointer[end];j++)
{
int jc=matrixA.columnindex[j]/BLOCK_SIZE;
if (flag[jc]==0)
{
flag[jc]=1;
rowblock_ptr[i+1]++;
columnid[colid]=jc;
colid++;
ptrA_length+=(end-start);
}
}
}
for (int i=1;i<rbnum+1;i++)
{
rowblock_ptr[i]+=rowblock_ptr[i-1];
}
/*
for(int blki =0;blki < rbnum ;blki ++)
{
quick_sort_key(columnid + rowblock_ptr[blki],rowblock_ptr[blki+1] - rowblock_ptr[blki]);
}
*/
// exclusive_scan(nnzb_A,nnzbl+1);
/* for (int i=0;i<rbnum+1;i++)
{
printf("%d ",rowblock_ptr[i]);
}
printf("\n");
for (int i=0;i<nnzbl;i++)
{
printf("%d ",columnid[i]);
}
printf("\n");
*/
//format
char * Format =(char *)malloc(rowblock_ptr[rbnum] * sizeof(char));
memset(Format,0,rowblock_ptr[rbnum] * sizeof(char));
/* int *csrnum = (int *)malloc(rowblock_ptr[rbnum] * sizeof(int));
int *coonum = (int *)malloc(rowblock_ptr[rbnum] * sizeof(int));
int *ellnum = (int *)malloc(rowblock_ptr[rbnum] * sizeof(int));
int *hybnum = (int *)malloc(rowblock_ptr[rbnum] * sizeof(int));
int *dnsnum = (int *)malloc(rowblock_ptr[rbnum] * sizeof(int));
int *dnsrnum = (int *)malloc(rowblock_ptr[rbnum] * sizeof(int));
int *dnscnum = (int *)malloc(rowblock_ptr[rbnum] * sizeof(int));
memset(csrnum,0,rowblock_ptr[rbnum] * sizeof(int));
memset(coonum,0,rowblock_ptr[rbnum] * sizeof(int));
memset(ellnum,0,rowblock_ptr[rbnum] * sizeof(int));
memset(hybnum,0,rowblock_ptr[rbnum] * sizeof(int));
memset(dnsnum,0,rowblock_ptr[rbnum] * sizeof(int));
memset(dnsrnum,0,rowblock_ptr[rbnum] * sizeof(int));
memset(dnscnum,0,rowblock_ptr[rbnum] * sizeof(int));
*/
//nnz
int *blknnz = (int *)malloc((nnzbl+1)*sizeof(int));
nnzb_A=(int *)malloc((nnzbl+1)*sizeof(int));
int nnzid=0;
//dense
int dense_size=0;
//denserow
int * denserowptr = (int *)malloc((rowblock_ptr[rbnum] + 1) * sizeof(int));
memset(denserowptr,0,(rowblock_ptr[rbnum]+ 1) * sizeof(int));
int denserow_size =0 ;
int denrowblknum =0 ;
// int denrowsum =0;
//densecolumn
int * densecolptr = (int *)malloc((rowblock_ptr[rbnum] + 1) * sizeof(int));
memset(densecolptr,0,(rowblock_ptr[rbnum]+ 1) * sizeof(int));
int densecol_size =0 ;
int dencolblknum =0 ;
// int dencolsum =0;
//CSR
int csrsize=0;
int csrptrlen=0;
// int csrblknum=0;
//ELL
int ellsize =0;
// int ellblknum=0;
//COO
int coosize =0;
// int cooblknum=0;
//HYB
int hybellsize =0;
int hybcoosize =0;
int hybsize =0;
int hybblknum =0;
char *blkwidth = (char *)malloc(rowblock_ptr[rbnum]*sizeof(char));
memset(blkwidth,0,rowblock_ptr[rbnum]) ;
int *hybcoo= (int *)malloc(rowblock_ptr[rbnum]*sizeof(int));
memset(hybcoo,0,rowblock_ptr[rbnum]) ;
int hybblk=0;
// int *blkhybcoonum = (int *)malloc(rowblock_ptr[rbnum]*sizeof(int));
// memset(blkhybcoonum,0,rowblock_ptr[rbnum]) ;
int *multicoonnz = (int * )malloc((rbnum +1) *sizeof(int));
memset(multicoonnz,0,(rbnum +1) *sizeof(int));
gettimeofday(&t1, NULL);
//#pragma omp parallel for
for (int blki=0;blki<rbnum;blki++)
{
int rowbnum=rowblock_ptr[blki+1]-rowblock_ptr[blki];
int *rownnzA=(int *)malloc((rowbnum + 1) * sizeof(int));
memset(rownnzA,0,(rowbnum + 1)*sizeof(int));
// SMatrix *subrowmatrixA=(SMatrix *)malloc(rowbnum*sizeof(SMatrix));
int rowlength= blki==rbnum-1 ? rowA-(rbnum-1)*BLOCK_SIZE : BLOCK_SIZE ;
int start= blki*BLOCK_SIZE;
int end = blki==rbnum-1 ? rowA : (blki+1)*BLOCK_SIZE ;
for (int j=matrixA.rowpointer[start];j<matrixA.rowpointer[end];j++)
{
int ki;
for (int k=rowblock_ptr[blki],ki=0;k<rowblock_ptr[blki+1],ki<rowbnum;k++,ki++)
{
int kcstart=columnid[k]*BLOCK_SIZE;
int kcend= columnid[k]== (cbnum-1) ? colA: (columnid[k]+1)*BLOCK_SIZE;
if (matrixA.columnindex[j]>=kcstart&&matrixA.columnindex[j]<kcend)
{
rownnzA[ki]++;
break;
}
}
}
int nnzsum= 0;
for(int bi=0;bi<rowbnum;bi++)
{
nnzb_A[rowblock_ptr[blki]+bi]=rownnzA[bi];
nnzsum += rownnzA[bi] ;
// printf("the %d's nnz=%d\n",rowblock_ptr[blki]+bi,nnzb_A[nnzid]);
// nnzid++;
}
exclusive_scan(rownnzA,rowbnum+1);
// printf("blki=%d,rowbnum=%d\n",blki,rowbnum) ;
MAT_PTR_TYPE *rowptr = (MAT_PTR_TYPE *)malloc((rowbnum * (rowlength+1)) *sizeof(MAT_PTR_TYPE));
memset(rowptr,0,(rowbnum * (rowlength+1))*sizeof(MAT_PTR_TYPE));
/* for (int bi=0;bi<rowbnum;bi++)
{
subrowmatrixA[bi].rowpointer=(MAT_PTR_TYPE *)malloc((rowlength+1)*sizeof(MAT_PTR_TYPE));
memset(subrowmatrixA[bi].rowpointer,0,(rowlength+1)*sizeof(MAT_PTR_TYPE));
}
*/
unsigned char *rbcol = (char *)malloc(nnzsum *sizeof(char)) ;
memset(rbcol , 0, nnzsum *sizeof(char));
int *num=(int*)malloc((rowbnum)*sizeof(int));
memset(num,0,(rowbnum)*sizeof(int));
for (int ri=0;ri<rowlength;ri++)
{
for (int j=matrixA.rowpointer[start+ri];j<matrixA.rowpointer[start+ri+1];j++)
{
int ki;
for (int k=rowblock_ptr[blki],ki=0;k<rowblock_ptr[blki+1],ki<rowbnum;k++,ki++)
{
int kcstart=columnid[k]*BLOCK_SIZE;
int kcend= columnid[k]== (cbnum-1) ? colA: (columnid[k]+1)*BLOCK_SIZE;
if (matrixA.columnindex[j]>=kcstart&&matrixA.columnindex[j]<kcend)
{
num[ki]++;
rbcol[rownnzA[ki]+num[ki]-1] = matrixA.columnindex[j]-columnid[k]*BLOCK_SIZE;
break;
}
}
}
for (int bi=0;bi<rowbnum;bi++)
{
rowptr[bi * (rowlength + 1) + ri +1]= num[bi];
// subrowmatrixA[bi].rowpointer[ri+1]=num[bi];
}
}
char * denserowflag = (char *)malloc(rowbnum * sizeof(char));
char * densecolflag = (char *)malloc(rowbnum * sizeof(char));
char * samecolcount = (char *)malloc (rowlength * sizeof(char));
for (int bi=0;bi<rowbnum;bi++)
{
/* if(1)
{
Format[rowblock_ptr[blki]+bi] =0 ;
csrsize += nnzb_A[rowblock_ptr[blki]+bi] ;
blknnz[rowblock_ptr[blki] + bi] = nnzb_A[rowblock_ptr[blki]+bi] ;
csrptrlen += rowlength ;
continue;
}
*/
/* int blkcol = columnid[rowblock_ptr[blki]+bi] ;
if (blki == blkcol) //CSR
{
Format[rowblock_ptr[blki]+bi] =0 ;
csrsize += nnzb_A[rowblock_ptr[blki]+bi] ;
blknnz[rowblock_ptr[blki] + bi] = nnzb_A[rowblock_ptr[blki]+bi] ;
csrptrlen += rowlength ;
}
*/
// else
{
int collength = columnid[rowblock_ptr[blki]+bi] == cbnum-1 ? colA - (cbnum-1 ) * BLOCK_SIZE : BLOCK_SIZE ;
int nnzthreshold = rowlength * collength * 0.5 ;
if (nnzb_A[rowblock_ptr[blki]+bi] >= nnzthreshold) //dense
{
Format[rowblock_ptr[blki]+bi] = 4 ;
blknnz[rowblock_ptr[blki]+bi] = rowlength * collength;
// dense_size += rowlength * collength ;
// dnsnum[rowblock_ptr[blki]+bi] = rowlength * collength;
continue;
}
else
{
int iocsr_size = nnzb_A[rowblock_ptr[blki]+bi] % 2 ==0 ? nnzb_A[rowblock_ptr[blki]+bi] * sizeof (MAT_VAL_TYPE) + (nnzb_A[rowblock_ptr[blki]+bi] * sizeof (char)) / 2 + (rowlength+1) * sizeof (int) :
nnzb_A[rowblock_ptr[blki]+bi] * sizeof (MAT_VAL_TYPE) + (nnzb_A[rowblock_ptr[blki]+bi] * sizeof (char)) /2 + 1 + (rowlength+1) * sizeof (int) ;
// int iodense_sise= rowlength * collength * sizeof (MAT_VAL_TYPE) ;
// int minsize= iocsr_size >= iodense_sise ? iodense_sise : iocsr_size ;
int denserownum =0;
int densecolnum =0;
if (nnzb_A[rowblock_ptr[blki]+bi] % collength ==0)
{
int dnsrowflag =0 ;
int dnscolflag =0;
for (int ri=0;ri < rowlength ;ri++)
{
if ((rowptr[bi * (rowlength + 1)+ ri +1] - rowptr[bi * (rowlength + 1)+ ri ]) % collength !=0)
// if ((subrowmatrixA[bi].rowpointer[ri+1] - subrowmatrixA[bi].rowpointer[ri] ) % collength !=0 )
{
dnsrowflag =0;
// denserowflag[bi]=0;
break;
}
else
{
if ((rowptr[bi * (rowlength + 1)+ ri +1] - rowptr[bi * (rowlength + 1)+ ri ]) ==collength)
// if (subrowmatrixA[bi].rowpointer[ri+1] - subrowmatrixA[bi].rowpointer[ri] == collength)
{
dnsrowflag =1;
// denserowflag[bi]=1;
denserownum ++ ;
}
}
}
//dense row
if (dnsrowflag == 1)
{
Format[rowblock_ptr[blki]+bi] = 5 ; //Dense Row
// denrowsum += denserownum ;
denserowptr[rowblock_ptr[blki]+bi] = denserownum ;
// denserow_size += denserownum * collength ;
// dnsrnum[rowblock_ptr[blki]+bi] = denserownum * collength;
blknnz[rowblock_ptr[blki]+bi] = denserownum * collength;
continue;
// break;
// denrowblknum ++ ;
}
//dense column
else
{
memset(samecolcount,0,rowlength * sizeof(char)) ;
for (int ci=rownnzA[bi];ci<rownnzA[bi+1];ci++)
{
for (int colid=0 ;colid <rowlength ;colid ++)
{
if (rbcol[ci]==colid)
{
samecolcount[colid] ++;
}
}
}
for (int ci=0;ci<rowlength ;ci ++)
{
if (samecolcount[ci] % rowlength !=0)
{
densecolflag[bi] =0;
break ;
}
else if (samecolcount[ci] ==rowlength)
{
densecolflag[bi]=1;
densecolnum ++ ;
}
}
if (densecolflag[bi] == 1)
{
Format[rowblock_ptr[blki]+bi] = 6 ; //Dense column
// dencolsum += densecolnum ;
densecolptr[rowblock_ptr[blki]+bi] = densecolnum ;
// densecol_size += densecolnum * rowlength ;
blknnz[rowblock_ptr[blki]+bi] = densecolnum * rowlength;
continue;;
// denrowblknum ++ ;
}
}
}
if (Format[rowblock_ptr[blki]+bi] != 5 && Format[rowblock_ptr[blki]+bi] != 6)
{
int bwidth=0;
int hybwidth=0;
for (int blkj=0;blkj<rowlength;blkj++)
{
if (bwidth < rowptr[bi * (rowlength+1) +blkj+1] - rowptr[bi * (rowlength+1) +blkj])
// if (bwidth < subrowmatrixA[bi].rowpointer[blkj+1]-subrowmatrixA[bi].rowpointer[blkj])
// bwidth = subrowmatrixA[bi].rowpointer[blkj+1]-subrowmatrixA[bi].rowpointer[blkj];
bwidth = rowptr[bi * (rowlength+1) +blkj+1] - rowptr[bi * (rowlength+1) +blkj] ;
}
if (nnzb_A[rowblock_ptr[blki] + bi] <= 12 )
{
/* if (bwidth <= 2) //ELL
{
Format[rowblock_ptr[blki]+bi] = 2;
ellsize += bwidth * rowlength ;
blkwidth[rowblock_ptr[blki]+bi]=bwidth;
blknnz[rowblock_ptr[blki]+bi] = bwidth * rowlength ;
}
else //COO
*/
{
Format[rowblock_ptr[blki]+bi] = 1;
// coosize += nnzb_A[rowblock_ptr[blki]+bi] ;
multicoonnz[blki] += nnzb_A[rowblock_ptr[blki]+bi] ;
blknnz[rowblock_ptr[blki] + bi] = nnzb_A[rowblock_ptr[blki]+bi] ;
continue;
}
}
else
{
double row_length_mean = ((double)nnzb_A[rowblock_ptr[blki] + bi]) / rowlength;
double variance = 0.0;
double row_length_skewness = 0.0;
for (int row = 0; row < rowlength; ++row)
{
int length = rowptr[bi * (rowlength + 1)+row + 1] - rowptr[bi * (rowlength + 1)+row ] ;
double delta = (double)(length - row_length_mean);
variance += (delta * delta);
row_length_skewness += (delta * delta * delta);
}
variance /= rowlength;
double row_length_std_dev = sqrt(variance);
row_length_skewness = (row_length_skewness / rowlength) / pow(row_length_std_dev, 3.0);
double row_length_variation = row_length_std_dev / row_length_mean;
/* if (row_length_variation >=1)
{
printf("row_length_variation = %f\n",row_length_variation);
}
*/
double ell_csr_threshold = 0.2;
double csr_hyb_threshold = 1;
// printf("row_length_variation = %f\n",row_length_variation);
if (row_length_variation <= ell_csr_threshold) //ELL
{
Format[rowblock_ptr[blki]+bi] = 2;
// ellsize += bwidth * rowlength ;
blkwidth[rowblock_ptr[blki]+bi]=bwidth;
blknnz[rowblock_ptr[blki]+bi] = bwidth * rowlength ;
continue;
}
else
{
int iopriorsize= (bwidth * rowlength %2) ==0 ? bwidth * rowlength * sizeof (MAT_VAL_TYPE) + bwidth * rowlength * sizeof (char) /2 :
bwidth * rowlength * sizeof (MAT_VAL_TYPE) + bwidth * rowlength * sizeof (char) /2 +1 ;
int ionextsize;
int coonextnum=0;
int coopriornum=0;
for (int wi=bwidth-1;wi>=0;wi--)
{
coonextnum=0;
for (int blkj=0;blkj<rowlength;blkj++)
{
if (rowptr[bi * (rowlength+1) +blkj+1] - rowptr[bi * (rowlength+1) +blkj] > wi)
// if (subrowmatrixA[bi].rowpointer[blkj+1]-subrowmatrixA[bi].rowpointer[blkj]>wi)
{
coonextnum += rowptr[bi * (rowlength+1) +blkj+1] - rowptr[bi * (rowlength+1) +blkj] - wi ;
// coonextnum+=subrowmatrixA[bi].rowpointer[blkj+1]-subrowmatrixA[bi].rowpointer[blkj]-wi;
}
}
ionextsize= (wi * rowlength) % 2 ==0 ? wi * rowlength * sizeof (MAT_VAL_TYPE )+ wi * rowlength * sizeof (char) /2 + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (char)) :
wi * rowlength * sizeof (MAT_VAL_TYPE )+ wi * rowlength * sizeof (char) /2 + 1 + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (char)) ;
if (iopriorsize<=ionextsize)
{
hybwidth=wi+1;
break;
}
else
{
iopriorsize=ionextsize;
coopriornum=coonextnum;
}
}
if (row_length_variation >= csr_hyb_threshold && coopriornum <= 12) //HYB
{
Format[rowblock_ptr[blki]+bi] = 3;
// hybcoosize+=coopriornum;
// hybellsize += hybwidth * rowlength ;
hybcoo[rowblock_ptr[blki]+bi] = coopriornum;
blkwidth[rowblock_ptr[blki]+bi]=hybwidth;
blknnz[rowblock_ptr[blki]+bi] = coopriornum + hybwidth * rowlength ;
continue;
}
else //CSR
{
Format[rowblock_ptr[blki]+bi] =0 ;
// csrsize += nnzb_A[rowblock_ptr[blki]+bi] ;
blknnz[rowblock_ptr[blki] + bi] = nnzb_A[rowblock_ptr[blki]+bi] ;
continue;
// csrptrlen += rowlength ;
}
}
}
/* else
{
int iopriorsize= (bwidth * rowlength %2) ==0 ? bwidth * rowlength * sizeof (MAT_VAL_TYPE) + bwidth * rowlength * sizeof (char) /2 :
bwidth * rowlength * sizeof (MAT_VAL_TYPE) + bwidth * rowlength * sizeof (char) /2 +1 ;
int ionextsize;
int coonextnum=0;
int coopriornum=0;
for (int wi=bwidth-1;wi>=0;wi--)
{
coonextnum=0;
for (int blkj=0;blkj<rowlength;blkj++)
{
if (rowptr[bi * (rowlength+1) +blkj+1] - rowptr[bi * (rowlength+1) +blkj] > wi)
// if (subrowmatrixA[bi].rowpointer[blkj+1]-subrowmatrixA[bi].rowpointer[blkj]>wi)
{
coonextnum += rowptr[bi * (rowlength+1) +blkj+1] - rowptr[bi * (rowlength+1) +blkj] - wi ;
// coonextnum+=subrowmatrixA[bi].rowpointer[blkj+1]-subrowmatrixA[bi].rowpointer[blkj]-wi;
}
}
ionextsize= (wi * rowlength) % 2 ==0 ? wi * rowlength * sizeof (MAT_VAL_TYPE )+ wi * rowlength * sizeof (char) /2 + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (char)) :
wi * rowlength * sizeof (MAT_VAL_TYPE )+ wi * rowlength * sizeof (char) /2 + 1 + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (char)) ;
if (iopriorsize<=ionextsize)
{
hybwidth=wi+1;
break;
}
else
{
iopriorsize=ionextsize;
coopriornum=coonextnum;
}
}
/* if (hybwidth = 0 || iocsr_size <= iopriorsize) //CSR
{
Format[rowblock_ptr[blki]+bi] =0 ;
csrsize += nnzb_A[rowblock_ptr[blki]+bi] ;
blknnz[rowblock_ptr[blki] + bi] = nnzb_A[rowblock_ptr[blki]+bi] ;
csrptrlen += rowlength ;
}
if (iocsr_size <= iopriorsize) //CSR
{
Format[rowblock_ptr[blki]+bi] =0 ;
csrsize += nnzb_A[rowblock_ptr[blki]+bi] ;
blknnz[rowblock_ptr[blki] + bi] = nnzb_A[rowblock_ptr[blki]+bi] ;
csrptrlen += rowlength ;
// break;
}
else
{
/* if (hybwidth == bwidth) //ELL
{
Format[rowblock_ptr[blki]+bi] = 2;
ellsize += bwidth * rowlength ;
blkwidth[rowblock_ptr[blki]+bi]=bwidth;
blknnz[rowblock_ptr[blki]+bi] = bwidth * rowlength ;
// break;
// ellblknum ++ ;
}
else if (coopriornum <= 10) //HYB
{
Format[rowblock_ptr[blki]+bi] = 3;
hybcoosize+=coopriornum;
hybellsize += hybwidth * rowlength ;
blkwidth[rowblock_ptr[blki]+bi]=hybwidth;
blknnz[rowblock_ptr[blki]+bi] = coopriornum + hybwidth * rowlength ;
// break;
// hybblknum ++ ;
}
else
// if (hybwidth == 0) //CSR
{
Format[rowblock_ptr[blki]+bi] =0 ;
csrsize += nnzb_A[rowblock_ptr[blki]+bi] ;
blknnz[rowblock_ptr[blki] + bi] = nnzb_A[rowblock_ptr[blki]+bi] ;
csrptrlen += rowlength ;
// printf("format =1\n");
// Format[rowblock_ptr[blki]+bi] = 1;
// coosize += nnzb_A[rowblock_ptr[blki]+bi] ;
// blknnz[rowblock_ptr[blki] + bi] = nnzb_A[rowblock_ptr[blki]+bi] ;
// break;
}
}
}
*/
}
}
}
}
free(samecolcount);
free(rownnzA);
/* for (int bi=0;bi<rowbnum;bi++)
{
free(subrowmatrixA[bi].rowpointer);
}
*/
free(rowptr);
// free(subrowmatrixA);
free(num);
free(rbcol) ;
}
gettimeofday(&t2, NULL);
// printf("t1=%f,t2=%f\n",t1,t2);
double time_transstep1 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0;
printf("transform_step1 runtime = %4.5f sec\n", time_transstep1/1000.0);
for (int blki=0;blki<rbnum;blki++)
{
int rowlength= blki==rbnum-1 ? rowA-(rbnum-1)*BLOCK_SIZE : BLOCK_SIZE ;
int rowbnum=rowblock_ptr[blki+1]-rowblock_ptr[blki];
for (int bi=0;bi<rowbnum;bi++)
{
char format= Format[rowblock_ptr[blki]+bi];
switch (format)
{
case 0: //csr
csrsize += blknnz[rowblock_ptr[blki]+bi];
csrptrlen += rowlength ;
break;
case 1: //coo
coosize += blknnz[rowblock_ptr[blki]+bi];
break;
case 2: //ell
ellsize += blknnz[rowblock_ptr[blki]+bi] ;
break;
case 3: //hyb
hybsize += blknnz[rowblock_ptr[blki]+bi];
hybellsize += blkwidth[rowblock_ptr[blki]+bi] * rowlength;
break;
case 4:
dense_size += blknnz[rowblock_ptr[blki]+bi];
break;
case 5:
denserow_size += blknnz[rowblock_ptr[blki]+bi];
break;
case 6:
densecol_size += blknnz[rowblock_ptr[blki]+bi];
break;
default:
break;
}
}
}
for(int i=0;i<rowblock_ptr[rbnum];i++)
{
hybcoosize += hybcoo[i];
}
exclusive_scan(denserowptr,rowblock_ptr[rbnum]+1);
exclusive_scan(densecolptr,rowblock_ptr[rbnum]+1);
exclusive_scan(multicoonnz,rbnum +1);
/* for (int i=0;i<rowblock_ptr[rbnum]+1 ;i++)
{
printf("%i , ",denserowptr[i]);
}
printf("\n") ;
*/
exclusive_scan(blknnz,(nnzbl+1));
int *formatnum = (int *)malloc(7 * sizeof(int));
memset(formatnum,0,7 * sizeof(int));
for (int j=0;j<7;j++)
{
for (int i=0;i<rowblock_ptr[rbnum];i++)
{
if (Format[i]==j)
{
formatnum[j]++;
// printf("%d ",Format[i]);
// break ;
}
}
}
for (int j=0;j<7;j++)
{
printf("format =%i,count =%i\n",j,formatnum[j]);
}
//for (int i = 0; i < rowblock_ptr[rbnum]; i++)
//{
// printf ("nnz= %i\n",nnzb_A[i]);
//}
/* for (int i=0;i<nnzbl+1;i++)
{
printf("%d ",blknnz[i]);
}
printf("\n");
*/
/*for (int i=0;i<rowblock_ptr[rbnum] + 1;i++)
{
// if (Format[i]==0)
// {
printf("%d ",densecolptr[i]);
// }
}
printf("\n");
*/
// printf("sum=%d\n",csrsize + coosize + ellsize + hybellsize + hybcoosize + denserow_size);
// MAT_VAL_TYPE *Block_Val=(MAT_VAL_TYPE*)malloc((csrsize + coosize + ellsize + hybellsize + hybcoosize + + dense_size + denserow_size)*sizeof(MAT_VAL_TYPE));
// memset(Block_Val,0,(csrsize + coosize + ellsize + hybellsize + hybcoosize + denserow_size)*sizeof(MAT_VAL_TYPE));
//CSR
MAT_VAL_TYPE *Blockcsr_Val=(MAT_VAL_TYPE*)malloc((csrsize)*sizeof(MAT_VAL_TYPE));
unsigned char *Blockcsr_Col=(char*)malloc((csrsize)*sizeof(char));
unsigned char *Blockcsr_Ptr=(char*)malloc((csrptrlen)*sizeof(char));
int csrvid=0;
int csrpid=0;
//COO
MAT_VAL_TYPE *Blockcoo_Val=(MAT_VAL_TYPE*)malloc((coosize)*sizeof(MAT_VAL_TYPE));
unsigned char *coo_colIdx=(char*)malloc((coosize)*sizeof(char));
unsigned char *coo_rowIdx=(char*)malloc((coosize)*sizeof(char));
int coovid=0;
int cooridxid=0 ;
//ELL
MAT_VAL_TYPE *Blockell_Val=(MAT_VAL_TYPE*)malloc((ellsize)*sizeof(MAT_VAL_TYPE));
memset(Blockell_Val,0,(ellsize)*sizeof(MAT_VAL_TYPE));
unsigned char *ell_colIdx=(char*)malloc((ellsize)*sizeof(char));
memset(ell_colIdx, 0, sizeof(INDEX_DATA_TYPE) * ellsize);
// unsigned char * ellblkwidth = (char *)malloc (ellblknum * sizeof(char));
// int ellblk=0;
int elloffset =0;
// memset(Blockell_Val, 0, sizeof(MAT_VAL_TYPE) * ellsize);
//HYB
MAT_VAL_TYPE *Blockhyb_Val=(MAT_VAL_TYPE*)malloc((hybellsize+hybcoosize)*sizeof(MAT_VAL_TYPE));
memset(Blockhyb_Val,0,(hybellsize+hybcoosize)*sizeof(MAT_VAL_TYPE));
unsigned char *hyb_ellcolIdx=(char*)malloc((hybellsize+hybcoosize)*sizeof(char));
unsigned char * hyb_coorowIdx=(char*)malloc((hybcoosize)*sizeof(char)) ;
memset(hyb_ellcolIdx, 0, sizeof(INDEX_DATA_TYPE) * (hybellsize+hybcoosize));
int hyboffset =0;
int hybcoonnzsum =0;
//dense
MAT_VAL_TYPE *Blockdense_Val=(MAT_VAL_TYPE*)malloc((dense_size)*sizeof(MAT_VAL_TYPE));
memset(Blockdense_Val,0,dense_size * sizeof(MAT_VAL_TYPE));
int denseoffset =0;
//denserow
// printf("denserow_size=%d\n",denserow_size);
MAT_VAL_TYPE *Blockdenserow_Val=(MAT_VAL_TYPE*)malloc((denserow_size) * sizeof(MAT_VAL_TYPE));
// char *denserowid = (char *)malloc(denrowsum * sizeof(char));
char *denserowid = (char *)malloc(denserowptr[rowblock_ptr[rbnum]] * sizeof(char));
int densrow_rid= 0;
int denserow_vid=0;
// int * denserowptr = (int *)malloc((denrowblknum + 1) * sizeof(int));
// memset(denserowptr,0,(denrowblknum + 1) * sizeof(int));
int denrowcount =0;
//dense column
MAT_VAL_TYPE *Blockdensecol_Val=(MAT_VAL_TYPE*)malloc((densecol_size) * sizeof(MAT_VAL_TYPE));
// char *densecolid = (char *)malloc(dencolsum * sizeof(char));
char *densecolid = (char *)malloc(densecolptr[rowblock_ptr[rbnum]] * sizeof(char));
int densecol_cid= 0;
// int densecol_vid=0;
int dencoloffset =0;
// int * densecolptr = (int *)malloc((dencolblknum + 1) * sizeof(int));
// memset(densecolptr,0,(dencolblknum + 1) * sizeof(int));
int dencolcount =0;
gettimeofday(&t1, NULL);
//for each row block
for (int blki=0;blki<rbnum;blki++)
{
int rowbnum=rowblock_ptr[blki+1]-rowblock_ptr[blki];
SMatrix *subrowmatrixA=(SMatrix *)malloc(rowbnum*sizeof(SMatrix));
int rowlength= blki==rbnum-1 ? rowA-(rbnum-1)*BLOCK_SIZE : BLOCK_SIZE ;
// printf("rowlength=%d\n",rowlength);
int start= blki*BLOCK_SIZE;
int end = blki==rbnum-1 ? rowA : (blki+1)*BLOCK_SIZE ;
for (int bi=0;bi<rowbnum;bi++)
{
subrowmatrixA[bi].value=(MAT_VAL_TYPE*)malloc((nnzb_A[rowblock_ptr[blki]+bi])*sizeof(MAT_VAL_TYPE));
subrowmatrixA[bi].columnindex=(int *)malloc((nnzb_A[rowblock_ptr[blki]+bi])*sizeof(int));
subrowmatrixA[bi].rowpointer=(MAT_PTR_TYPE *)malloc((rowlength+1)*sizeof(MAT_PTR_TYPE));
memset(subrowmatrixA[bi].rowpointer,0,(rowlength+1)*sizeof(MAT_PTR_TYPE));
}
int *num=(int*)malloc((rowbnum)*sizeof(int));
memset(num,0,(rowbnum)*sizeof(int));
for (int ri=0;ri<rowlength;ri++)
{
for (int j=matrixA.rowpointer[start+ri];j<matrixA.rowpointer[start+ri+1];j++)
{
int ki;
for (int k=rowblock_ptr[blki],ki=0;k<rowblock_ptr[blki+1],ki<rowbnum;k++,ki++)
{
int kcstart=columnid[k]*BLOCK_SIZE;
int kcend= columnid[k]== (cbnum-1) ? colA: (columnid[k]+1)*BLOCK_SIZE;
if (matrixA.columnindex[j]>=kcstart&&matrixA.columnindex[j]<kcend)
{
num[ki]++;
subrowmatrixA[ki].value[num[ki]-1]=matrixA.value[j];
subrowmatrixA[ki].columnindex[num[ki]-1]=matrixA.columnindex[j]-columnid[k]*BLOCK_SIZE;
break;
}
}
}
for (int bi=0;bi<rowbnum;bi++){
subrowmatrixA[bi].rowpointer[ri+1]=num[bi];
}
}
for(int bi=0;bi<rowbnum;bi++)
{
/* for (int kk=0;kk<blknnz[rowblock_ptr[blki]+bi + 1] - blknnz[rowblock_ptr[blki]+bi ]; kk++ )
{
printf("%d ",(int)subrowmatrixA[bi].value[kk]);
}
printf("\n") ;
printf("\n") ;
*/
int collength = columnid[rowblock_ptr[blki]+bi] == cbnum-1 ? colA - (cbnum-1 ) * BLOCK_SIZE : BLOCK_SIZE ;
//CSR
if (Format[rowblock_ptr[blki]+bi] == 0)
{
//CSR val&col
for (int k=0;k<blknnz[rowblock_ptr[blki] + bi + 1]-blknnz[rowblock_ptr[blki] + bi];k++)
{
Blockcsr_Val[csrvid]=subrowmatrixA[bi].value[k] ;
// Block_Val [blknnz[rowblock_ptr[blki] + bi] + k] = subrowmatrixA[bi].value[k] ;
Blockcsr_Col[csrvid]=subrowmatrixA[bi].columnindex[k];
csrvid++;
}
//CSR ptr
for (int jid=0;jid<rowlength;jid++)
{
Blockcsr_Ptr[csrpid]=subrowmatrixA[bi].rowpointer[jid];
csrpid++;
}
}
//COO
else if (Format[rowblock_ptr[blki]+bi] == 1)
{
/* for (int ri = 0; ri < rowlength; ri++)
{
for (int j = subrowmatrixA[bi].rowpointer[ri]; j < subrowmatrixA[bi].rowpointer[ri+1]; j++)
{
coo_rowIdx[cooridxid] = ri;
cooridxid++;
Blockcoo_Val[coovid] = subrowmatrixA[bi].value[j] ;
// Block_Val [blknnz[rowblock_ptr[blki] + bi] + k] = subrowmatrixA[bi].value[k] ;
coo_colIdx[coovid]=subrowmatrixA[bi].columnindex[j];
coovid++;
}
}
*/
for (int k=0;k<blknnz[rowblock_ptr[blki] + bi + 1]-blknnz[rowblock_ptr[blki] + bi];k++)
{
Blockcoo_Val[coovid] = subrowmatrixA[bi].value[k] ;
// Block_Val [blknnz[rowblock_ptr[blki] + bi] + k] = subrowmatrixA[bi].value[k] ;
coo_colIdx[coovid]=subrowmatrixA[bi].columnindex[k];
coovid++;
}
//COO rowidx
for (int ri = 0; ri < rowlength; ri++)
{
for (int j = subrowmatrixA[bi].rowpointer[ri]; j < subrowmatrixA[bi].rowpointer[ri+1]; j++)
{
coo_rowIdx[cooridxid] = ri;
cooridxid++;
}
}
}
//ELL col first
else if (Format[rowblock_ptr[blki]+bi] == 2)
{
int bwidth=0;
for (int bj=0;bj<rowlength;bj++)
{
if (bwidth<subrowmatrixA[bi].rowpointer[bj+1]-subrowmatrixA[bi].rowpointer[bj])
{
bwidth=subrowmatrixA[bi].rowpointer[bj+1]-subrowmatrixA[bi].rowpointer[bj];
}
}
// ellblkwidth[ellblk]=bwidth;
// ellblk++;
for (int ri = 0; ri < rowlength; ri++)
{
for (int j = subrowmatrixA[bi].rowpointer[ri]; j < subrowmatrixA[bi].rowpointer[ri+1]; j++)
{
int temp = j - subrowmatrixA[bi].rowpointer[ri];
ell_colIdx[elloffset + temp * rowlength + ri] = subrowmatrixA[bi].columnindex[j];
// Block_Val[blknnz[rowblock_ptr[blki] + bi] + temp * rowlength + ri] = subrowmatrixA[bi].value[j] ;
Blockell_Val[elloffset + temp * rowlength + ri] = subrowmatrixA[bi].value[j];
}
}
elloffset += bwidth * rowlength;
}
//HYB
else if (Format[rowblock_ptr[blki]+bi] == 3)
{
int coocount=0;
for (int ri = 0; ri < rowlength; ri++)
{
int stop= (subrowmatrixA[bi].rowpointer[ri+1]- subrowmatrixA[bi].rowpointer[ri]) <= blkwidth[rowblock_ptr[blki]+bi] ? subrowmatrixA[bi].rowpointer[ri+1] :
subrowmatrixA[bi].rowpointer[ri] + blkwidth[rowblock_ptr[blki]+bi] ;
for (int j = subrowmatrixA[bi].rowpointer[ri]; j <stop; j++)
{
int temp = j - subrowmatrixA[bi].rowpointer[ri];
hyb_ellcolIdx[hyboffset+temp * rowlength + ri] = subrowmatrixA[bi].columnindex[j];
// Block_Val[blknnz[rowblock_ptr[blki]+bi]+temp * rowlength + ri] = subrowmatrixA[bi].value[j];
Blockhyb_Val[hyboffset+temp * rowlength + ri] = subrowmatrixA[bi].value[j];
}
for (int k=stop;k<subrowmatrixA[bi].rowpointer[ri+1];k++)
{
// Block_Val[blknnz[rowblock_ptr[blki]+bi]+ blkwidth[rowblock_ptr[blki]+bi] * rowlength + coocount] = subrowmatrixA[bi].value[k] ;
Blockhyb_Val[hyboffset+ blkwidth[rowblock_ptr[blki]+bi] * rowlength + coocount] = subrowmatrixA[bi].value[k];
hyb_ellcolIdx[hyboffset+ blkwidth[rowblock_ptr[blki]+bi] * rowlength + coocount]= subrowmatrixA[bi].columnindex[k];
hyb_coorowIdx[hybcoonnzsum+coocount]=ri;
coocount++;
}
}
hybcoonnzsum += blknnz[rowblock_ptr[blki] +bi +1]- blknnz[rowblock_ptr[blki] +bi] - blkwidth[rowblock_ptr[blki] +bi] * rowlength ; //blkhybcoonum[rowblock_ptr[blki]+bi] ;
hyboffset += blkwidth[rowblock_ptr[blki]+bi] * rowlength + coocount;
}
//dense
else if (Format[rowblock_ptr[blki]+bi] == 4)
{
for (int ri = 0; ri < rowlength; ri++)
{
for (int j = subrowmatrixA[bi].rowpointer[ri]; j < subrowmatrixA[bi].rowpointer[ri+1]; j++)
{
Blockdense_Val[denseoffset + ri * collength + subrowmatrixA[bi].columnindex[j]]= subrowmatrixA[bi].value[j];
// Block_Val[blknnz[rowblock_ptr[blki]+bi] + ri * collength + subrowmatrixA[bi].columnindex[j]]= subrowmatrixA[bi].value[j];
}
}
denseoffset += rowlength *collength;
}
//dense row
else if (Format[rowblock_ptr[blki]+bi] == 5)
{
for (int ri = 0; ri < rowlength; ri++)
{
if (subrowmatrixA[bi].rowpointer[ri+1] - subrowmatrixA[bi].rowpointer[ri] == collength)
{
denserowid[densrow_rid]=ri;
densrow_rid ++;
for (int j = subrowmatrixA[bi].rowpointer[ri]; j < subrowmatrixA[bi].rowpointer[ri+1]; j++)
{
// Block_Val[blknnz[rowblock_ptr[blki]+bi]+denserow_vid] = subrowmatrixA[bi].value[j];
Blockdenserow_Val[denserow_vid]= subrowmatrixA[bi].value[j];
denserow_vid ++;
}
}
}
// denserowptr[denrowcount+1] = densrow_rid ;
// denrowcount ++ ;
}
//dense column
else if (Format[rowblock_ptr[blki]+bi] == 6)
{
for (int j=subrowmatrixA[bi].rowpointer[0];j < subrowmatrixA[bi].rowpointer[1];j ++)
{
int ci = subrowmatrixA[bi].columnindex[j] ;
densecolid[densecol_cid] =ci ;
densecol_cid ++;
}
// densecolptr[dencolcount +1] = densecol_cid ;
// dencolcount ++ ;
for (int ri = 0; ri < rowlength; ri++)
{
for (int j = subrowmatrixA[bi].rowpointer[ri]; j < subrowmatrixA[bi].rowpointer[ri+1]; j++)
{
int temp = j - subrowmatrixA[bi].rowpointer[ri];
Blockdensecol_Val[dencoloffset + temp * rowlength + ri] = subrowmatrixA[bi].value[j];
// printf("value[%d] = %d\n",dencoloffset + temp * rowlength + ri,(int )Blockell_Val[dencoloffset + temp * rowlength + ri]) ;
}
}
dencoloffset += blknnz[rowblock_ptr[blki]+bi + 1] - blknnz[rowblock_ptr[blki]+bi] ;
// printf("dencoloffset = %d\n",dencoloffset) ;
}
}
for (int bi=0;bi<rowbnum;bi++)
{
free(subrowmatrixA[bi].value);
free(subrowmatrixA[bi].columnindex);
free(subrowmatrixA[bi].rowpointer);
}
free(subrowmatrixA);
free(num);
}
gettimeofday(&t2, NULL);
double time_transstep2 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0;
printf("transform_step2 runtime = %4.5f sec\n", time_transstep2/1000.0);
//MultiCOO
MAT_VAL_TYPE *multicoo_Val=(MAT_VAL_TYPE*)malloc((coosize)*sizeof(MAT_VAL_TYPE));
int *multicoo_Col=(int*)malloc((coosize)*sizeof(int));
int *multicoo_Ptr=(int *)malloc(((BLOCK_SIZE +1) ) * rbnum *sizeof(int));
memset(multicoo_Ptr,0,((BLOCK_SIZE +1) ) * rbnum *sizeof(int)) ;
int multicoo_offset =0 ;
/* for (int blki =0;blki <rbnum ;blki ++)
{
int multicoo_vid =0;
int rowlen = blki == rbnum -1 ? rowA - (rbnum-1 ) * BLOCK_SIZE : BLOCK_SIZE ;
int rowbnum = rowblock_ptr[blki +1] - rowblock_ptr[blki];
for (int ri =0;ri < rowlen ; ri ++)
{
for (int bi =0;bi <rowbnum ;bi ++)
{
// int collen = columnid[rowblock_ptr[blki] + bi] == cbnum -1 ? colA - (cbnum-1 ) * BLOCK_SIZE : BLOCK_SIZE ;
if (Format[rowblock_ptr[blki] + bi] ==1) //if COO
{
// while (nnzid <blknnz[rowblock_ptr[blki] + bi +1]- blknnz[rowblock_ptr[blki] + bi])
for (int nnzid =0;nnzid < blknnz[rowblock_ptr[blki] + bi +1]- blknnz[rowblock_ptr[blki] + bi]; nnzid ++)
{
int row = coo_rowIdx[multicoo_offset + nnzid] ;
int col = coo_colIdx[multicoo_offset + nnzid] ;
int val = Blockcoo_Val[multicoo_offset + nnzid] ;
if (row == ri)
{
multicoo_Val[multicoonnz[blki]+ multicoo_vid] = val;
multicoo_Col[smulticoonnz[blki]+ multicoo_vid] = col + columnid[rowblock_ptr[blki] + bi] * BLOCK_SIZE ;
multicoo_vid ++ ;
}
}
multicoo_offset += blknnz[rowblock_ptr[blki] + bi +1]- blknnz[rowblock_ptr[blki] + bi] ;
}
}
multicoo_Ptr[blki * BLOCK_SIZE + ri +1] = multicoo_vid ;
}
}
for (int i=0;i<rbnum ;i++)
{
for (int j=0;j<BLOCK_SIZE +1 ;j++)
{
printf("%i ",multicoo_Ptr[i * BLOCK_SIZE + j]);
}
printf("\n");
printf("\n");
}
printf("\n");
*/
/* for (int i=0;i< hybellsize+hybcoosize ; i++)
{
if (Blockhyb_Val[i] < 0)
{
printf("errval= %f,colidx = %d\n",Blockhyb_Val[i],hyb_ellcolIdx[i]);
}
}
*/
/* for(int i =0;i< rowblock_ptr[rbnum]+1 ;i++)
{
printf("%d ",denserowptr[i]);
}
printf("\n");
printf("\n");
for(int i =0;i< denserowptr[rowblock_ptr[rbnum]] ;i++)
{
printf("%d ",denserowid[i]);
}
printf("\n");
printf("\n");
for(int i =0;i< denserow_size ;i++)
{
printf("%d ",(int)Blockdenserow_Val[i]);
}
printf("\n");
*/
/*for (int i=0;i<blknnz[nnzbl];i++)
{
printf("%f ",Block_Val[i]);
}
printf("csr\n");
for (int i=0;i<csrsize;i++)
{
printf("%f ",Blockcsr_Val[i]);
}
printf("\n");
printf("\n");
printf("coo\n");
for (int i=0;i<coosize;i++)
{
printf("%f ",Blockcoo_Val[i]);
}
printf("\n");
printf("\n");
printf("hyb\n");
for (int i=0;i<hybellsize+hybcoosize;i++)
{
printf("%f ",Blockhyb_Val[i]);
}
printf("\n");
printf("\n");
*/
/* printf("HYB_ell width: \n");
for (int i=0;i<rowblock_ptr[rbnum];i++)
{
printf("%d 's width = %d,coonum=%d \n ",i,blkwidth[i],blkhybcoonum[i]);
}
printf("\n");
printf("\n");
*/
// printf("hybellsize+hybcoosize=%d\n",hybellsize+hybcoosize) ;
/*
for (int i=0;i<hybellsize+hybcoosize;i++)
{
// if (Blockhyb_Val[i]!=0)
// printf("%f ,",Blockhyb_Val[i]);
printf("%d ,",hyb_ellcolIdx[i]);
// printf("\n");
}
printf("\n");
printf("\n");
printf("hyb_coorowIdx=%d\n",hybcoosize) ;
for (int i=0;i<hybcoosize;i++)
{
printf("%d ,",hyb_coorowIdx[i]);
}
printf("\n");
printf("\n");
*/
// exclusive_scan(nnzb_A,nnzbl+1);
//CSR compressed Colidx
int csr_csize = csrsize % 2 ==0 ? csrsize /2 : csrsize /2 +1 ;
unsigned char *csr_compressedIdx=(char*)malloc((csr_csize)*sizeof(char));
encode(Blockcsr_Col , csr_compressedIdx,csrsize ,0);
free(Blockcsr_Col);
//COO compressed cooIdx
unsigned char *coo_Idx=(char*)malloc((coosize)*sizeof(char));
int count = 0;
for (int i = 0; i < coosize; i++)
{
coo_Idx[count] = (coo_rowIdx[i] << 4) + coo_colIdx[i];
count++;
}
// free(coo_colIdx);
// free(coo_rowIdx);
//ELl compressed cooIdx
int ell_csize = ellsize % 2 ==0 ? ellsize /2 : ellsize /2 +1 ;
unsigned char *ell_compressedIdx=(unsigned char*)malloc((ell_csize)*sizeof(unsigned char));
encode(ell_colIdx , ell_compressedIdx ,ellsize, 0 );
free(ell_colIdx);
//HYB compressed Idx
int hyb_size = hybellsize%2==0 ? hybellsize/2 : (hybellsize/2)+1 ;
unsigned char *hybIdx=(unsigned char*)malloc((hyb_size+hybcoosize)*sizeof(unsigned char));
int blkoffset=0;
hybcoonnzsum=0;
int hc_offset=0;
for(int blki=0;blki<rbnum;blki++)
{
int rowlength= blki==rbnum-1 ? rowA-(rbnum-1)* BLOCK_SIZE : BLOCK_SIZE ;
for (int blkj=rowblock_ptr[blki];blkj<rowblock_ptr[blki+1];blkj++)
{
if (Format[blkj]==3)
{
encode(hyb_ellcolIdx + blkoffset , hybIdx, blkwidth[blkj]*rowlength,hc_offset);
hc_offset += (rowlength * blkwidth[blkj]) % 2 ==0 ? (rowlength * blkwidth[blkj]) / 2 : (rowlength * blkwidth[blkj] / 2 )+ 1 ;
// printf ("%d , %d\n",blkj,hc_offset);
for (int i = 0; i < blknnz[blkj+1]- blknnz[blkj] - blkwidth[blkj] * rowlength; i++)
{
hybIdx[hc_offset+i] = (hyb_coorowIdx[hybcoonnzsum+i] << 4) + hyb_ellcolIdx[blkoffset+ rowlength * blkwidth[blkj] + i];
}
hybcoonnzsum += blknnz[blkj+1]- blknnz[blkj] - blkwidth[blkj] * rowlength;
blkoffset += blkwidth[blkj] * rowlength + blknnz[blkj+1]- blknnz[blkj] - blkwidth[blkj] * rowlength;
hc_offset += blknnz[blkj+1]- blknnz[blkj] - blkwidth[blkj] * rowlength ;
}
}
}
free(hyb_ellcolIdx);
free(hyb_coorowIdx);
printf("Format transform success\n");
INDEX_DATA_TYPE num_f = 240;
INDEX_DATA_TYPE num_b = 15;
MAT_VAL_TYPE *x = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * colA);
for (int i = 0; i < colA; i++)
{
x[i] = i % 10;
}
MAT_VAL_TYPE *y_golden = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * rowA);
for (int i = 0; i < rowA; i++)
{
MAT_VAL_TYPE sum = 0;
for (int j = matrixA.rowpointer[i]; j < matrixA.rowpointer[i+1]; j++)
{
sum += matrixA.value[j] * x[matrixA.columnindex[j]];
}
y_golden[i] = sum;
}
// spmv using block csr
MAT_VAL_TYPE *y = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * rowA);
int ellwoffset=0;
int hybwoffset=0;
int hybidxoffset=0;
denseoffset =0;
int coooffset= 0 ;
int csroffset = 0;
int csrcount =0;
// dencolcount =0;
// denrowcount =0;
int dnsrowoffset =0;
int dnscoloffset =0;
gettimeofday(&t1, NULL);
//multicoo_offset =0;
for (int blki = 0; blki < rbnum; blki++)
{
int rowlength= blki== rbnum-1 ? rowA-(rbnum-1)*BLOCK_SIZE : BLOCK_SIZE ;
// clear y covered by the block row
// int blocksize;
// blocksize= blki == (rbnum-1) ?
for (int ri = 0; ri < rowlength; ri++)
{
y[blki * BLOCK_SIZE + ri] = 0;
}
// for each block in the block row
for (int blkj = rowblock_ptr[blki]; blkj < rowblock_ptr[blki+1]; blkj++)
{
int collength = columnid[blkj] == cbnum-1 ? colA - (cbnum-1 ) * BLOCK_SIZE : BLOCK_SIZE ;
int x_offset = columnid[blkj] * BLOCK_SIZE;
//CSR
if (Format[blkj]==0)
{
// for each row in the block
for (int ri = 0; ri < rowlength; ri++)
{
MAT_VAL_TYPE sum = 0;
// for each nonzero in the row of the block
// the last row uses nnzlocal
int stop = ri == rowlength - 1 ? (blknnz[blkj+1]-blknnz[blkj]) : Blockcsr_Ptr[ri+1+csrcount];
for (int rj = Blockcsr_Ptr[csrcount +ri]; rj < stop; rj++)
{
int csrcol = (csroffset + rj) % 2 ==0 ? (csr_compressedIdx[(csroffset + rj) / 2] & num_f )>> 4 :
csr_compressedIdx[(csroffset + rj) / 2 ] & num_b ;
sum += x[x_offset + csrcol] * Blockcsr_Val[csroffset+rj];
}
y[blki * BLOCK_SIZE + ri] += sum;
}
csroffset += blknnz[blkj+1]-blknnz[blkj] ;
csrcount += rowlength ;
}
//COO
else if (Format[blkj]==1 )
{
for (int bnnzid= 0; bnnzid < blknnz[blkj+1]-blknnz[blkj]; bnnzid++)
{
int row= (coo_Idx[coooffset+bnnzid] & num_f) >> 4;//coo_rowIdx[coooffset+bnnzid];//
int col= coo_Idx[coooffset+bnnzid] & num_b;//coo_colIdx[coooffset+bnnzid] ; //
// printf("row=%i,col= %i,val = %f,x=%f\n",row,col,Blockcoo_Val[coooffset+bnnzid],x[x_offset + col]);
y[blki * BLOCK_SIZE+ row] += Blockcoo_Val[coooffset+bnnzid] * x[x_offset + col];
// printf("y[%i ]= %f\n",blki * BLOCK_SIZE+ row,y[blki * BLOCK_SIZE+ row]);
MAT_VAL_TYPE sum = Blockcoo_Val[coooffset+bnnzid] * x[x_offset + col];
}
coooffset += blknnz[blkj+1]-blknnz[blkj] ;
// printf("coooffset = %i\n",coooffset);
}
//ELL
else if (Format[blkj]==2 )
{
// for each row in the block
for (int ri = 0; ri < rowlength; ri++)
{
MAT_VAL_TYPE sum = 0;
// for each nonzero in the row of the block
// the last row uses nnzlocal
for (int j = 0; j < blkwidth[blkj]; j++)
{
int ellcol = (ellwoffset+ j * rowlength + ri) % 2 ==0 ? (ell_compressedIdx[(ellwoffset+ j * rowlength + ri) / 2] & num_f )>> 4 :
ell_compressedIdx[(ellwoffset+ j * rowlength + ri) / 2 ] & num_b ;
if (Blockell_Val[ellwoffset + j * rowlength + ri ]!=0 )
{
sum += Blockell_Val[ellwoffset+j * rowlength + ri] * x[x_offset+ ellcol];
}
}
y[blki * BLOCK_SIZE + ri] += sum;
}
ellwoffset+=blkwidth[blkj]*rowlength;
}
//HYB
else if (Format[blkj]==3 )
{
for (int ri = 0; ri < rowlength; ri++)
{
MAT_VAL_TYPE sum = 0;
for (int j = 0; j < blkwidth[blkj]; j++)
{
// if (Blockhyb_Val[hybwoffset + j * rowlength + ri] != 0)
// {
int hybcol = ( j * rowlength + ri)%2 == 0 ? (hybIdx[hybidxoffset+(j * rowlength + ri)/2] & num_f )>> 4 :
hybIdx[hybidxoffset+(j * rowlength + ri)/2] & num_b ;
// printf("hybcol=%i,val = %f,x=%f\n",hybcol, Blockhyb_Val[hybwoffset + j * rowlength + ri],x[x_offset + hybcol]);
sum+= Blockhyb_Val[hybwoffset + j * rowlength + ri] * x[x_offset + hybcol];
// }
}
y[blki * BLOCK_SIZE + ri] += sum;
// printf("eformat=%i,sum= %f\n",Format[blkj],sum);
}
int offset = hybwoffset + rowlength * blkwidth[blkj];
hybidxoffset += (rowlength * blkwidth[blkj]) % 2 ==0 ? rowlength * blkwidth[blkj] / 2 : (rowlength * blkwidth[blkj] / 2 )+ 1 ;
for (int i = 0; i < blknnz[blkj+1]- blknnz[blkj] - blkwidth[blkj] * rowlength; i++)
{
int rowidx=(hybIdx[hybidxoffset + i] & num_f) >> 4;
int colidx= hybIdx[hybidxoffset + i] & num_b;
// if (rowidx <0)
// printf("rowidx=%d\n",rowidx);
y[blki * BLOCK_SIZE +rowidx] += Blockhyb_Val[offset + i] * x[x_offset + colidx];
}
hybwoffset+=blkwidth[blkj] * rowlength + blknnz[blkj+1]- blknnz[blkj] - blkwidth[blkj] * rowlength;
hybidxoffset += blknnz[blkj+1]- blknnz[blkj] - blkwidth[blkj] * rowlength ;
}
//dense
else if (Format[blkj] == 4)
{
for (int ri = 0; ri < BLOCK_SIZE; ri++)
{
MAT_VAL_TYPE sum = 0;
// for each nonzero in the row of the block
// the last row uses nnzlocal
// int stop = ri == BLOCK_SIZE - 1 ? (nnzb_A[blkj+1]-nnzb_A[blkj]) : BlockA_Ptr[ri+1+blkj*BLOCK_SIZE];
for (int rj = ri * collength; rj < (ri +1)*collength; rj++)
{
int densecol=rj % collength ;
sum += x[x_offset + densecol] * Blockdense_Val[denseoffset +rj];
}
y[blki * BLOCK_SIZE + ri] += sum;
}
denseoffset += rowlength * collength ;
}
//dense row
else if (Format[blkj] == 5)
{
for (int ri=denserowptr[blkj]; ri < denserowptr[ blkj +1 ];ri++)
{
MAT_VAL_TYPE sum = 0;
for (int rj = 0; rj < collength; rj++)
{
// int denserowcol=rj;
sum += x[x_offset + rj] * Blockdenserow_Val[dnsrowoffset + (ri - denserowptr[blkj]) * collength +rj];
}
y[blki * BLOCK_SIZE + denserowid[ri]] += sum;
}
dnsrowoffset += blknnz[blkj+1]-blknnz[blkj] ;
}
//dense column
else if (Format[blkj] == 6)
{
for (int ri=0 ; ri < rowlength;ri++)
{
MAT_VAL_TYPE sum = 0;
for (int rj = densecolptr[blkj]; rj < densecolptr[blkj +1]; rj++)
{
sum += Blockdensecol_Val[dnscoloffset + (rj-densecolptr[blkj]) * rowlength + ri] * x[x_offset+ densecolid[rj]];
}
y[blki * BLOCK_SIZE + ri] += sum;
}
dnscoloffset += blknnz[blkj+1]-blknnz[blkj] ;
}
}
/* //multicoo
for (int ri = 0; ri < rowlength; ri++)
{
MAT_VAL_TYPE sum = 0;
// for each nonzero in the row of the block
// the last row uses nnzlocal
// int stop = ri == rowlength - 1 ? (blknnz[blkj+1]-blknnz[blkj]) : Blockcsr_Ptr[ri+1+csrcount];
int stop = multicoo_Ptr[blki * BLOCK_SIZE + ri +1] ;
for (int rj = multicoo_Ptr[blki * BLOCK_SIZE + ri ]; rj < stop; rj++)
{
// int csrcol = (csroffset + rj) % 2 ==0 ? (csr_compressedIdx[(csroffset + rj) / 2] & num_f )>> 4 :
// csr_compressedIdx[(csroffset + rj) / 2 ] & num_b ;
int col = multicoo_Col[multicoo_Ptr[blki * BLOCK_SIZE ] + rj];
sum += x[col] * multicoo_Val[multicoo_Ptr[blki * BLOCK_SIZE ] + rj];
}
y[blki * BLOCK_SIZE + ri] += sum;
}
*/
}
gettimeofday(&t2, NULL);
double cputime = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0;
printf("CPU runtime = %4.5f sec\n", cputime/1000.0);
// check results
int errcount = 0;
for (int i = 0; i < rowA; i++)
{
if (y[i] != y_golden[i])
{
errcount++;
// printf("%f %f,%d\n",y[i],y_golden[i],i);
}
}
printf("spmv errcount = %i\n", errcount);
// for(int i=0;i<rowblock_ptr[rbnum];i++)
// {
// printf("i=%i,nnz = %i,format = %i\n",i,blknnz[i+1]-blknnz[i],Format[i] ) ;
// }
//CSR
free(Blockcsr_Val);
free(csr_compressedIdx);
free(Blockcsr_Ptr);
//COO
free(Blockcoo_Val);
free(coo_Idx);
//ELL
free(Blockell_Val);
free(ell_compressedIdx);
//HYB
free(Blockhyb_Val);
free(hybIdx);
//dense
free(Blockdense_Val);
//denserow
free(Blockdenserow_Val);
free(denserowid);
free(denserowptr);
//densecol
free(Blockdensecol_Val);
free(densecolid);
free(densecolptr);
free(matrixA.value);
free(matrixA.columnindex);
free(matrixA.rowpointer);
}
|
latency_ctx.h
|
/*
* Copyright (c) 2018 Intel Corporation. All rights reserved.
* This software is available to you under the BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
static inline
void streaming_put_latency_ctx(int len, perf_metrics_t *metric_info, int streaming_node)
{
double start = 0.0, end = 0.0;
unsigned long int i;
int dest = partner_node(metric_info);
static int check_once = 0;
if (!check_once) {
/* check to see whether sender and receiver are the same process */
if (dest == metric_info->my_node) {
fprintf(stderr, "Warning: Sender and receiver are the same "
"process (%d)\n", dest);
}
/* hostname validation for all sender and receiver processes */
int status = check_hostname_validation(metric_info);
if (status != 0) return;
check_once++;
}
shmem_barrier_all();
if (streaming_node) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
shmem_global_exit(1);
}
for (i = 0; i < metric_info->warmup; i++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
shmem_global_exit(1);
}
#pragma omp barrier
#pragma omp master
{
start = perf_shmemx_wtime();
}
for (i = 0; i < metric_info->trials; i++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node) {
end = perf_shmemx_wtime();
calc_and_print_results(start, end, len, metric_info);
}
shmem_barrier_all();
}
static inline
void streaming_get_latency_ctx(int len, perf_metrics_t *metric_info, int streaming_node)
{
double start = 0.0, end = 0.0;
unsigned long int i;
int dest = partner_node(metric_info);
static int check_once = 0;
if (!check_once) {
/* check to see whether sender and receiver are the same process */
if (dest == metric_info->my_node) {
fprintf(stderr, "Warning: Sender and receiver are the same "
"process (%d)\n", dest);
}
/* hostname validation for all sender and receiver processes */
int status = check_hostname_validation(metric_info);
if (status != 0) return;
check_once++;
}
shmem_barrier_all();
if (streaming_node) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
shmem_global_exit(1);
}
for (i = 0; i < metric_info->warmup; i++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_getmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
shmem_ctx_quiet(ctx);
#else
shmem_ctx_getmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
shmem_global_exit(1);
}
#pragma omp barrier
#pragma omp master
{
start = perf_shmemx_wtime();
}
for (i = 0; i < metric_info->trials; i++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_getmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
shmem_ctx_quiet(ctx);
#else
shmem_ctx_getmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node) {
end = perf_shmemx_wtime();
calc_and_print_results(start, end, len, metric_info);
}
shmem_barrier_all();
}
|
3d25pt_var.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(12*t1+Ny+15,24)),floord(24*t2+Ny+11,24)),floord(24*t1-24*t2+Nz+Ny+13,24));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(24*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(24*t3+Nx+11,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),6*t3+4),128*t4+126);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
rgb2yuv_openmp.c
|
/*
* Tecnologico de Costa Rica (www.tec.ac.cr)
* Course: MP-6171 High Performance Embedded Systems
* Developers Name: Verny Morales and Luis Carlos Alvarez
* Developers email: [email protected] and [email protected]
* General purpose:
* Input:
* Output:
*
*/
//gcc -fopenmp rgb2yuv_openmp.c -o rgb2yuv_openmp
//./rgb2yuv_openmp -i image.rgb -o outputOM.yuv
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <time.h>
#include <omp.h>
#define IMAGE_WIDTH 640
#define IMAGE_HEIGHT 480
int rgb2yuvPixel (int R, int G, int B){
int Y, V, U;
unsigned int pixel32;
unsigned char *pixel = (unsigned char *)&pixel32;
Y = (0.257 * R) + (0.504 * G) + (0.098 * B) + 16;
V = (0.439 * R) - (0.368 * G) - (0.071 * B) + 128;
U = -(0.148 * R) - (0.291 * G) + (0.439 * B) + 128;
if (Y > 255) {
Y = 255;
}
if (U > 255) {
U = 255;
}
if (V > 255) {
V = 255;
}
if (Y < 0) {
Y = 0;
}
if (U < 0) {
U = 0;
}
if (V < 0) {
V = 0;
}
pixel[0] = Y;
pixel[1] = U;
pixel[2] = V;
pixel[3] = 0;
return pixel32;
}
void rgb2yuv (char *input_image, char *output_image){
FILE *in, *out;
int R, G, B, y2, i, size;
size = IMAGE_WIDTH*IMAGE_HEIGHT;
unsigned int pixelRGB[size], pixel32;
unsigned char pixelYUV[size][3];
in = fopen(input_image, "rb");
out = fopen(output_image, "wb");
if (!in || !out) {
printf("Error..\n");
}
for(i=0; i<size; i++){
fread(&pixelRGB[i], 3, 1, in);
}
#pragma omp parallel shared(pixelYUV, pixelRGB, size) private(i, R, G, B, pixel32)
{
#pragma omp for
for(i=0; i<size; i++){
R = ((pixelRGB[i] & 0x000000ff));
G = ((pixelRGB[i] & 0x0000ff00)>>8);
B = ((pixelRGB[i] & 0x00ff0000)>>16);
pixel32 = rgb2yuvPixel(R, G, B);
pixelYUV[i][0] = (pixel32 & 0x000000ff);
pixelYUV[i][1] = (pixel32 & 0x0000ff00) >> 8;
pixelYUV[i][2] = (pixel32 & 0x00ff0000) >> 16;
}
}
for(i=0; i<size; i++){
fwrite(pixelYUV[i], 3, 1, out);
}
fclose(in);
fclose(out);
}
int main (int argc, char **argv) {
clock_t t;
int i = 0;
int opt = -1;
int rgbFlag = 0;
int yuvFlag = 0;
int infoAuhtorFlag = 0;
int infoExcutionAppFlag = 0;
char *pathRGBFile;
char *pathYUVFile;
char usageMessage[] = "\n# USAGE MODE: \n"
"./rgb2yuv_c [ -i RGBfile ] [ -o YUVfile ] [-h] [-a] \n"
"-i RGBfile specifies the RGB file to be converted. \n"
"-o YUVfile specifies the output file name. \n"
"-a displays the information of the author of the program. \n"
"-h displays the usage message to let the user know how to execute the application; \n"
"Yocto prompt: \n"
"rgb2yuv_c -i image.rgb -o outputC.yuv \n";
char authorsInfo[] = "\n# AUTHORS INFORMATION\n"
"# Tecnologico de Costa Rica (www.tec.ac.cr)\n"
"# Course: MP-6171 High Performance Embedded Systems\n"
"# Developers Name: Verny Morales and Luis Carlos Alvarez\n"
"# Developers email: [email protected] and [email protected]\n";
while ((opt = getopt(argc, argv, "ioah")) != -1) {
i++;
switch(opt) {
case 'i':
pathRGBFile = argv[i+1];
rgbFlag = 1;
i++;
break;
case 'o':
pathYUVFile = argv[i+1];
yuvFlag = 1;
i++;
break;
case 'a':
infoAuhtorFlag = 1;
break;
case 'h':
infoExcutionAppFlag = 1;
break;
default:
printf("Error..");
}
}
if (infoAuhtorFlag == 1){
printf("%s", authorsInfo);
}
if (infoExcutionAppFlag == 1){
printf("%s", usageMessage);
}
if (rgbFlag == 1 && yuvFlag == 1){
t = clock();
rgb2yuv (pathRGBFile, pathYUVFile);
t = clock() - t;
printf ("It took me %ld clicks (%f seconds).\n",t,((float)t)/CLOCKS_PER_SEC);
}
return 0;
}
|
reorder_ref.h
|
/* Copyright (c) 2018 NoobsHPC Authors, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef NBHPC_ICESWORD_OPERATOR_REORDER_H
#define NBHPC_ICESWORD_OPERATOR_REORDER_H
#pragma once
#include <vector>
#include "icesword/types.h"
namespace noobshpc{
namespace icesword{
// reorder_hw2wh<OP_dtypeDType>(weight, weight_reorder, N, dim_k);
template<typename dtype>
static inline Status reorder_hw2wh(const void* in, void* out, const size_t w, const size_t h) {
CHECK_EQ(h * w != 0, true) << "" << "wrong h,w value !";
auto src = (const dtype *)in;
auto dst = (dtype *)out;
#pragma omp parallel for collapse(1)
for (auto i = 0; i < h; i++) {
#pragma omp simd
for (auto j = 0; j < w; j++) {
dst[j * h + i] = src[i * w + j];
}
}
return S_Success;
}
// reorder_hw2wh<dtype>((const void**)&weight, &weight_reorder, N, dim_k);
template<typename dtype>
static inline Status reorder_hw2wh(const void** in, void** out, const size_t h, const size_t w) {
CHECK_EQ(h * w != 0, true) << "" << "wrong h,w value !";
auto src = (const dtype **)in;
auto dst = (dtype **)out;
#pragma omp parallel for collapse(1)
for (auto i = 0; i < h; i++) {
#pragma omp simd
for (auto j = 0; j < w; j++) {
(*dst)[j * h + i] = (*src)[i * w + j];
}
}
return S_Success;
}
} // namespace icesword
} // namespace noobshpc
#endif // NBHPC_ICESWORD_OPERATOR_REORDER_H
|
third.c
|
#include <stdio.h>
#include <omp.h>
int main(){
int A[10] = {1,2,3,4,5,6,7,8,9,10}, i, m, k;
omp_set_dynamic(0);
m = omp_get_num_procs();
omp_set_num_threads(m);
printf("Parallel\n------------");
#pragma omp parallel for shared(A) private(i)
for(i = 0; i < 10; i++){
printf("\nA[%d] = %d from thread %d of %d", i, A[i], omp_get_thread_num(), omp_get_num_threads());
}
printf("\n\nNon Parallel\n------------\n");
for(i = 0; i < 10; i++){
printf("%d ", i);
}
printf("\n");
return 0;
}
|
transform.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
register ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image,exception);
if (status == MagickFalse)
{
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
GB_binop__islt_int64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int64)
// A*D function (colscale): GB (_AxD__islt_int64)
// D*A function (rowscale): GB (_DxB__islt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int64)
// C=scalar+B GB (_bind1st__islt_int64)
// C=scalar+B' GB (_bind1st_tran__islt_int64)
// C=A+scalar GB (_bind2nd__islt_int64)
// C=A'+scalar GB (_bind2nd_tran__islt_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT64 || GxB_NO_ISLT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__plus_int16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int16)
// A*D function (colscale): GB (_AxD__plus_int16)
// D*A function (rowscale): GB (_DxB__plus_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int16)
// C=scalar+B GB (_bind1st__plus_int16)
// C=scalar+B' GB (_bind1st_tran__plus_int16)
// C=A+scalar GB (_bind2nd__plus_int16)
// C=A'+scalar GB (_bind2nd_tran__plus_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT16 || GxB_NO_PLUS_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ripemd_fmt_plug.c
|
/* ripemd cracker patch for JtR. Hacked together during April of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_ripemd_160;
extern struct fmt_main fmt_ripemd_128;
#elif FMT_REGISTERS_H
john_register_one(&fmt_ripemd_160);
john_register_one(&fmt_ripemd_128);
#else
#include <string.h>
#include "arch.h"
#include "sph_ripemd.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// OMP_SCALE tuned on core i7 quad core HT
// 128 160
// 1 - 234k 234k
// 64 - 7547k 6310k
// 128 - 9849k 7987k
// 256 - 11835k 9205k
// 512 - 13288k 10027k
// 1k - 14142k 10553k
// 2k - 14607k 11980k ** this level chosen
// 4k - 14828k 10871k
// 8k - 14639k 10794k
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 64
#else
#define OMP_SCALE 2048
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
#define FORMAT_TAG "$ripemd$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE160 20
#define BINARY_SIZE128 16
#define SALT_SIZE 0
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
static struct fmt_tests ripemd_160_tests[] = {
{"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""},
{"$ripemd$9c1185a5c5e9fc54612808977ee8f548b2258d31", ""},
{"56e11fdd5479b30020fc010551536af074e1b82f", "thisisalongstring"},
{"$ripemd$56e11fdd5479b30020fc010551536af074e1b82f", "thisisalongstring"},
{"a1a94e392ce7d861a4fdcaa291e453c082807f50", "string with space"},
{"$ripemd$a1a94e392ce7d861a4fdcaa291e453c082807f50", "string with space"},
{"98f3860a474d986964df9c1fd3621e68eaf76a25", "UPPERCASE"},
{"$ripemd$98f3860a474d986964df9c1fd3621e68eaf76a25", "UPPERCASE"},
{"d3d0379126c1e5e0ba70ad6e5e53ff6aeab9f4fa", "123456789"},
{"$ripemd$d3d0379126c1e5e0ba70ad6e5e53ff6aeab9f4fa", "123456789"},
{NULL}
};
static struct fmt_tests ripemd_128_tests[] = {
{"cdf26213a150dc3ecb610f18f6b38b46", ""},
{"$ripemd$cdf26213a150dc3ecb610f18f6b38b46", ""},
{"060d8817be332f6e6a9a09a209ea453e", "thisisalongstring"},
{"$ripemd$060d8817be332f6e6a9a09a209ea453e", "thisisalongstring"},
{"ed402bdf044344c34935ac93a2d90a13", "string with space"},
{"$ripemd$ed402bdf044344c34935ac93a2d90a13", "string with space"},
{"5e71f949a0d5c69f3c1aeaf245ba527a", "UPPERCASE"},
{"$ripemd$5e71f949a0d5c69f3c1aeaf245ba527a", "UPPERCASE"},
{"1886db8acdcbfeab1e7ee3780400536f", "123456789"},
{"$ripemd$1886db8acdcbfeab1e7ee3780400536f", "123456789"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE160 / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
if (!saved_key) {
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self, int len)
{
char *p;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (strlen(p) != len)
return 0;
while(*p)
if (atoi16[ARCH_INDEX(*p++)] == 0x7f)
return 0;
return 1;
}
static int valid160(char *ciphertext, struct fmt_main *self)
{
return valid(ciphertext, self, 40);
}
static int valid128(char *ciphertext, struct fmt_main *self)
{
return valid(ciphertext, self, 32);
}
static void *get_binary_160(char *ciphertext)
{
static union {
unsigned char c[20];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < 20; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_binary_128(char *ciphertext)
{
static union {
unsigned char c[16];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < 16; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int crypt_160(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_ripemd160_context ctx;
sph_ripemd160_init(&ctx);
sph_ripemd160(&ctx, saved_key[index], strlen(saved_key[index]));
sph_ripemd160_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int crypt_128(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_ripemd128_context ctx;
sph_ripemd128_init(&ctx);
sph_ripemd128(&ctx, saved_key[index], strlen(saved_key[index]));
sph_ripemd128_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one128(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE128);
}
static int cmp_one160(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE160);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void ripemd_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + 2 * BINARY_SIZE160 + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
strcpy(out, FORMAT_TAG);
strcpy(&out[TAG_LENGTH], ciphertext);
strlwr(&out[TAG_LENGTH]);
return out;
}
struct fmt_main fmt_ripemd_160 = {
{
"ripemd-160",
"RIPEMD 160",
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE160,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
ripemd_160_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid160,
split,
get_binary_160,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
ripemd_set_key,
get_key,
fmt_default_clear_keys,
crypt_160,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one160,
cmp_exact
}
};
struct fmt_main fmt_ripemd_128 = {
{
"ripemd-128",
"RIPEMD 128",
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE128,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
ripemd_128_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid128,
split,
get_binary_128,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
ripemd_set_key,
get_key,
fmt_default_clear_keys,
crypt_128,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one128,
cmp_exact
}
};
#endif /* plugin stanza */
|
axpy_float.c
|
//axpy.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 20
#define N 102400000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(float *X, float *Y) {
for (int i = 0; i<N; i++) {
X[i] = (float)rand()/(float)(RAND_MAX/10.0);
Y[i] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
void axpy(float *X, float *Y, float a) {
#pragma omp simd
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
// Debug functions
void axpy_serial(float *X, float *Y, float a) {
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
void print_vector(float *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
float check(float *A, float *B){
float difference = 0;
for(int i = 0;i<N; i++){
difference += A[i]- B[i];
}
return difference;
}
int main(int argc, char **argv) {
//Set everything up
float *X = malloc(sizeof(float)*N);
float *Y = malloc(sizeof(float)*N);
float *Y_serial = malloc(sizeof(float)*N);
float a = 3.14;
srand(time(NULL));
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
print_vector(Y);
print_vector(X);
printf("%.2f\n", a);
puts("=\n");
//warming up
axpy(X, Y, a);
axpy_serial(X, Y_serial, a);
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
double t = 0;
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy(X, Y, a);
t += (read_timer() - start);
double t_serial = 0;
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy_serial(X, Y_serial, a);
t_serial += (read_timer() - start_serial);
print_vector(Y);
puts("---------------------------------");
print_vector(Y_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("AXPY (SIMD):\t\t%4f\t%4f\n", t/N_RUNS, gflops);
printf("AXPY (Serial):\t\t%4f\t%4f\n", t_serial/N_RUNS, gflops_serial);
printf("Correctness check: %f\n", check(Y,Y_serial));
free(X);
free(Y);
free(Y_serial);
return 0;
}
|
aula2809_section.c
|
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
int main (int argc, char *argv[])
{
int i, nthreads, tid;
float a[N], b[N], c[N], d[N];
/* Some initializations */
for (i=0; i<N; i++) {
a[i] = i * 1.5;
b[i] = i + 22.35;
c[i] = d[i] = 0.0;
}
#pragma omp parallel shared(a,b,c,d,nthreads) private(i,tid) num_threads(4)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);
#pragma omp sections nowait
{
#pragma omp section
{
printf("Thread %d doing section 1\n",tid);
for (i=0; i<N; i++)
{
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
}
}
#pragma omp section
{
printf("Thread %d doing section 2\n",tid);
for (i=0; i<N; i++)
{
d[i] = a[i] * b[i];
printf("Thread %d: d[%d]= %f\n",tid,i,d[i]);
}
}
} /* end of sections */
printf("Thread %d done.\n",tid);
} /* end of parallel section */
}
|
GB_transpose.c
|
//------------------------------------------------------------------------------
// GB_transpose: C=A' or C=op(A'), with typecasting
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLS: GB_builder
// Transpose a matrix, C=A', and optionally apply a unary operator and/or
// typecast the values. The transpose may be done in-place, in which case C or
// A are modified in-place.
// There are two ways to use this method:
// C = A' C and A are different
// C = C' C is transposed in-place, (C==A aliased)
// In both cases, the header for C and A must already be allocated (either
// static or dynamic). A is never modified, unless C==A. C and A cannot be
// NULL on input. If in place (C == A) then C and A is a valid matrix on input
// (the input matrix A). If C != A, the contents of C are not defined on input,
// and any prior content is freed. Either header may be static or dynamic.
// The input matrix A may have shallow components (even if in-place), and the
// output C may also have shallow components (even if the input matrix is not
// shallow).
// This function is CSR/CSC agnostic; it sets the output matrix format from
// C_is_csc but otherwise ignores the CSR/CSC type of A and C.
// The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is
// m-by-n, then at most O(e/n) threads are used. The GB_builder method is more
// scalable, but not as fast with a modest number of threads.
#include "GB_transpose.h"
#include "GB_build.h"
#include "GB_apply.h"
#define GB_FREE_WORK \
{ \
GB_FREE (&iwork, iwork_size) ; \
GB_FREE (&jwork, jwork_size) ; \
GB_FREE (&Swork, Swork_size) ; \
GB_WERK_POP (Count, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_phbix_free (T) ; \
/* freeing C also frees A if transpose is done in-place */ \
GB_phbix_free (C) ; \
}
//------------------------------------------------------------------------------
// GB_transpose
//------------------------------------------------------------------------------
GrB_Info GB_transpose // C=A', C=(ctype)A' or C=op(A')
(
GrB_Matrix C, // output matrix C, possibly modified in-place
GrB_Type ctype, // desired type of C; if NULL use A->type.
// ignored if op is present (cast to op->ztype)
const bool C_is_csc, // desired CSR/CSC format of C
const GrB_Matrix A, // input matrix; C == A if done in place
// no operator is applied if both op1 and op2 are NULL
const GrB_UnaryOp op1_in, // unary operator to apply
const GrB_BinaryOp op2_in, // binary operator to apply
const GxB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,A) else binop(A,y)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs and determine if transpose is done in-place
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL) ;
ASSERT (A != NULL) ;
bool in_place = (A == C) ;
struct GB_Matrix_opaque T_header ;
GrB_Matrix T = GB_clear_static_header (&T_header) ;
GB_WERK_DECLARE (Count, int64_t) ;
int64_t *iwork = NULL ; size_t iwork_size = 0 ;
int64_t *jwork = NULL ; size_t jwork_size = 0 ;
GB_void *Swork = NULL ; size_t Swork_size = 0 ;
ASSERT_MATRIX_OK (A, "A input for GB_transpose", GB0) ;
ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ;
ASSERT_UNARYOP_OK_OR_NULL (op1_in, "unop for GB_transpose", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (op2_in, "binop for GB_transpose", GB0) ;
ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ;
if (in_place)
{
GBURBLE ("(in-place transpose) ") ;
}
// get the current sparsity control of A
float A_hyper_switch = A->hyper_switch ;
float A_bitmap_switch = A->bitmap_switch ;
int A_sparsity_control = A->sparsity_control ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
// wait if A has pending tuples or zombies; leave jumbled unless avdim == 1
if (GB_PENDING (A) || GB_ZOMBIES (A) || (avdim == 1 && GB_JUMBLED (A)))
{
GB_OK (GB_wait (A, "A", Context)) ;
}
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_IMPLIES (avdim == 1, !GB_JUMBLED (A))) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
GrB_Type atype = A->type ;
size_t asize = atype->size ;
GB_Type_code acode = atype->code ;
bool A_is_bitmap = GB_IS_BITMAP (A) ;
bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
int64_t anz = GB_nnz (A) ;
int64_t anz_held = GB_nnz_held (A) ;
int64_t anvec = A->nvec ;
int64_t anvals = A->nvals ;
//--------------------------------------------------------------------------
// determine the max number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// determine the type of C and get the unary or binary operator
//--------------------------------------------------------------------------
// If a unary or binary operator is present, C is always returned as
// the ztype of the operator. The input ctype is ignored.
GrB_UnaryOp op1 = NULL ;
GrB_BinaryOp op2 = NULL ;
GB_Opcode opcode = GB_NOP_opcode ;
if (op1_in != NULL)
{
// get the unary operator
opcode = op1_in->opcode ;
if (atype == op1_in->xtype && opcode == GB_IDENTITY_opcode)
{
// op1 is a built-in identity operator, with the same type as A, so
// do not apply the operator and do not typecast. op1 is NULL.
ctype = atype ;
}
else
{
// apply the operator, z=op1(x)
op1 = op1_in ;
ctype = op1->ztype ;
}
}
else if (op2_in != NULL)
{
// get the binary operator
GrB_Type op2_intype = binop_bind1st ? op2_in->xtype : op2_in->ytype ;
opcode = op2_in->opcode ;
// only GB_apply calls GB_transpose with op2_in, and it ensures this
// condition holds: first(A,y), second(x,A) have been renamed to
// identity(A), and PAIR has been renamed one(A), so these cases do not
// occur here.
ASSERT (!((opcode == GB_PAIR_opcode) ||
(opcode == GB_FIRST_opcode && !binop_bind1st) ||
(opcode == GB_SECOND_opcode && binop_bind1st))) ;
// apply the operator, z=op2(A,y) or op2(x,A)
op2 = op2_in ;
ctype = op2->ztype ;
}
else
{
// no operator. both op1 and op2 are NULL
if (ctype == NULL)
{
// no typecasting if ctype is NULL
ctype = atype ;
}
}
GB_Type_code ccode = ctype->code ;
size_t csize = ctype->size ;
//--------------------------------------------------------------------------
// check for positional operators
//--------------------------------------------------------------------------
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
GrB_UnaryOp save_op1 = op1 ;
GrB_BinaryOp save_op2 = op2 ;
if (op_is_positional)
{
// do not apply the op until after the transpose
op1 = NULL ;
op2 = NULL ;
// replace op1 with the ONE operator, as a placeholder. C will be
// constructed as iso, and needs to be expanded to non-iso when done.
ASSERT (ctype == GrB_INT64 || ctype == GrB_INT32) ;
op1 = (ctype == GrB_INT64) ? GxB_ONE_INT64 : GxB_ONE_INT32 ;
}
//--------------------------------------------------------------------------
// determine the iso status of C
//--------------------------------------------------------------------------
ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ;
GB_iso_code C_code_iso = GB_iso_unop_code (A, op1, op2, binop_bind1st) ;
bool C_iso = (C_code_iso != GB_NON_ISO) ;
ASSERT (GB_IMPLIES (A->iso, C_iso)) ;
if (C_iso && !op_is_positional)
{
GBURBLE ("(iso transpose) ") ;
}
else
{
GBURBLE ("(transpose) ") ;
}
//==========================================================================
// T = A', T = (ctype) A', or T = op (A')
//==========================================================================
if (anz == 0)
{
//----------------------------------------------------------------------
// A is empty
//----------------------------------------------------------------------
// create a new empty matrix T, with the new type and dimensions.
// set T->iso = false OK
GB_OK (GB_new_bix (&T, true, // hyper, static header
ctype, avdim, avlen, GB_Ap_calloc, C_is_csc, GxB_HYPERSPARSE,
true, A_hyper_switch, 1, 1, true, false, Context)) ;
}
else if (A_is_bitmap || GB_as_if_full (A))
{
//----------------------------------------------------------------------
// transpose a bitmap/as-if-full matrix or vector
//----------------------------------------------------------------------
// A is either bitmap or as-is-full (full, or sparse or hypersparse
// with all entries present, no zombies, no pending tuples, and not
// jumbled). T = A' is either bitmap or full.
int T_sparsity = (A_is_bitmap) ? GxB_BITMAP : GxB_FULL ;
bool T_cheap = // T can be done quickly if:
(avlen == 1 || avdim == 1) // A is a row or column vector,
&& op1 == NULL && op2 == NULL // no operator to apply,
&& atype == ctype ; // and no typecasting
// allocate T
if (T_cheap)
{
// just initialize the static header of T, not T->b or T->x
info = GB_new (&T, true, // bitmap or full, static header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
T_sparsity, A_hyper_switch, 1, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
// allocate all of T, including T->b and T->x
// set T->iso = C_iso OK
GB_OK (GB_new_bix (&T, true, // bitmap or full, static header
ctype, avdim, avlen, GB_Ap_null, C_is_csc, T_sparsity, true,
A_hyper_switch, 1, anz_held, true, C_iso, Context)) ;
}
T->magic = GB_MAGIC ;
if (T_sparsity == GxB_BITMAP)
{
T->nvals = anvals ; // for bitmap case only
}
//----------------------------------------------------------------------
// T = A'
//----------------------------------------------------------------------
int nthreads = GB_nthreads (anz_held + anvec, chunk, nthreads_max) ;
if (T_cheap)
{
// no work to do. Transposing does not change A->b or A->x
T->b = A->b ; T->b_size = A->b_size ;
T->x = A->x ; T->x_size = A->x_size ;
if (in_place)
{
// transplant A->b and A->x into T
T->b_shallow = A->b_shallow ;
T->x_shallow = A->x_shallow ;
A->b = NULL ;
A->x = NULL ;
}
else
{
// T is a purely shallow copy of A
T->b_shallow = (A->b != NULL) ;
T->x_shallow = true ;
}
T->iso = A->iso ; // OK
}
else if (op1 == NULL && op2 == NULL)
{
// do not apply an operator; optional typecast to T->type
GB_transpose_ix (T, A, NULL, NULL, 0, nthreads) ;
}
else
{
// apply an operator, T has type op->ztype
GB_transpose_op (T, C_code_iso, op1, op2, scalar, binop_bind1st, A,
NULL, NULL, 0, nthreads) ;
}
ASSERT_MATRIX_OK (T, "T dense/bitmap", GB0) ;
ASSERT (!GB_JUMBLED (T)) ;
}
else if (avdim == 1)
{
//----------------------------------------------------------------------
// transpose a "column" vector into a "row"
//----------------------------------------------------------------------
// transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen).
// A must be sorted first.
ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ;
ASSERT (!GB_JUMBLED (A)) ;
//----------------------------------------------------------------------
// allocate T
//----------------------------------------------------------------------
// Initialized the header of T, with no content, and initialize the
// type and dimension of T. T is hypersparse.
info = GB_new (&T, true, // hyper; static header
ctype, 1, avlen, GB_Ap_null, C_is_csc,
GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ;
ASSERT (info == GrB_SUCCESS) ;
// allocate T->p, T->i, and optionally T->x, but not T->h
T->p = GB_MALLOC (anz+1, int64_t, &(T->p_size)) ;
T->i = GB_MALLOC (anz , int64_t, &(T->i_size)) ;
bool allocate_Tx = (op1 != NULL || op2 != NULL || C_iso) ||
(ctype != atype) ;
if (allocate_Tx)
{
// allocate new space for the new typecasted numerical values of T
T->x = GB_XALLOC (C_iso, anz, csize, &(T->x_size)) ;
}
if (T->p == NULL || T->i == NULL || (allocate_Tx && T->x == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of T: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL || C_iso)
{
// T->x = op1 (A), op2 (A,scalar), or op2 (scalar,A), or
// compute the iso value of T = 1, A, or scalar, without any op
info = GB_apply_op ((GB_void *) T->x, ctype, C_code_iso, op1, op2,
scalar, binop_bind1st, A, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else if (ctype != atype)
{
// copy the values from A into T and cast from atype to ctype
GB_cast_matrix (T, A, Context) ;
}
else
{
// no type change; numerical values of T are a shallow copy of A.
ASSERT (!allocate_Tx) ;
T->x = A->x ; T->x_size = A->x_size ;
if (in_place)
{
// transplant A->x as T->x
T->x_shallow = A->x_shallow ;
A->x = NULL ;
}
else
{
// T->x is a shallow copy of A->x
T->x_shallow = true ;
}
}
// each entry in A becomes a non-empty vector in T;
// T is a hypersparse 1-by-avlen matrix
// transplant or shallow-copy A->i as the new T->h
T->h = A->i ; T->h_size = A->i_size ;
if (in_place)
{
// transplant A->i as T->h
T->h_shallow = A->i_shallow ;
A->i = NULL ;
}
else
{
// T->h is a shallow copy of A->i
T->h_shallow = true ;
}
// T->p = 0:anz and T->i = zeros (1,anz), newly allocated
T->plen = anz ;
T->nvec = anz ;
T->nvec_nonempty = anz ;
// fill the vector pointers T->p
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < anz ; k++)
{
T->i [k] = 0 ;
T->p [k] = k ;
}
T->p [anz] = anz ;
T->iso = C_iso ; // OK
T->magic = GB_MAGIC ;
}
else if (avlen == 1)
{
//----------------------------------------------------------------------
// transpose a "row" into a "column" vector
//----------------------------------------------------------------------
// transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1).
// if A->vlen is 1, all vectors of A are implicitly sorted
ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ;
//----------------------------------------------------------------------
// allocate workspace, if needed
//----------------------------------------------------------------------
int ntasks = 0 ;
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
if (nth > 1 && !A_is_hyper)
{
// ntasks and Count are not needed if nth == 1
ntasks = 8 * nth ;
ntasks = GB_IMIN (ntasks, avdim) ;
ntasks = GB_IMAX (ntasks, 1) ;
GB_WERK_PUSH (Count, ntasks+1, int64_t) ;
if (Count == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
}
// Allocate the header of T, with no content
// and initialize the type and dimension of T.
info = GB_new (&T, true, // sparse; static header
ctype, avdim, 1, GB_Ap_null, C_is_csc,
GxB_SPARSE, A_hyper_switch, 0, Context) ;
ASSERT (info == GrB_SUCCESS) ;
T->iso = C_iso ; // OK
// allocate new space for the values and pattern
T->p = GB_CALLOC (2, int64_t, &(T->p_size)) ;
if (!A_is_hyper)
{
// A is sparse, so new space is needed for T->i
T->i = GB_MALLOC (anz, int64_t, &(T->i_size)) ;
}
bool allocate_Tx = (op1 != NULL || op2 != NULL || C_iso) ||
(ctype != atype) ;
if (allocate_Tx)
{
// allocate new space for the new typecasted numerical values of T
T->x = GB_XALLOC (C_iso, anz, csize, &(T->x_size)) ;
}
if (T->p == NULL || (T->i == NULL && !A_is_hyper) ||
(T->x == NULL && allocate_Tx))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of T: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL || C_iso)
{
// T->x = op1 (A), op2 (A,scalar), or op2 (scalar,A), or
// compute the iso value of T = 1, A, or scalar, without any op
info = GB_apply_op ((GB_void *) T->x, ctype, C_code_iso, op1, op2,
scalar, binop_bind1st, A, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else if (ctype != atype)
{
// copy the values from A into T and cast from atype to ctype
GB_cast_matrix (T, A, Context) ;
}
else
{
// no type change; numerical values of T are a shallow copy of A.
ASSERT (!allocate_Tx) ;
T->x = A->x ; T->x_size = A->x_size ;
if (in_place)
{
// transplant A->x as T->x
T->x_shallow = A->x_shallow ;
A->x = NULL ;
}
else
{
// T->x is a shallow copy of A->x
T->x_shallow = true ;
}
}
//----------------------------------------------------------------------
// compute T->i
//----------------------------------------------------------------------
if (A_is_hyper)
{
//------------------------------------------------------------------
// each non-empty vector in A becomes an entry in T
//------------------------------------------------------------------
T->i = A->h ; T->i_size = A->h_size ;
if (in_place)
{
// transplant A->h as T->i
T->i_shallow = A->h_shallow ;
A->h = NULL ;
}
else
{
// T->i is a shallow copy of A->h
T->i_shallow = true ;
}
}
else
{
//------------------------------------------------------------------
// find the non-empty vectors of A, which become entries in T
//------------------------------------------------------------------
if (nth == 1)
{
//--------------------------------------------------------------
// construct T->i with a single thread
//--------------------------------------------------------------
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (A->p [j] < A->p [j+1])
{
T->i [k++] = j ;
}
}
ASSERT (k == anz) ;
}
else
{
//--------------------------------------------------------------
// construct T->i in parallel
//--------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = 0 ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (A->p [j] < A->p [j+1])
{
k++ ;
}
}
Count [tid] = k ;
}
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
ASSERT (Count [ntasks] == anz) ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (A->p [j] < A->p [j+1])
{
T->i [k++] = j ;
}
}
}
}
#ifdef GB_DEBUG
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (A->p [j] < A->p [j+1])
{
ASSERT (T->i [k] == j) ;
k++ ;
}
}
ASSERT (k == anz) ;
#endif
}
//---------------------------------------------------------------------
// vector pointers of T
//---------------------------------------------------------------------
// T->p = [0 anz]
ASSERT (T->plen == 1) ;
ASSERT (T->nvec == 1) ;
T->nvec_nonempty = (anz == 0) ? 0 : 1 ;
T->p [1] = anz ;
T->magic = GB_MAGIC ;
ASSERT (!GB_JUMBLED (T)) ;
}
else
{
//----------------------------------------------------------------------
// transpose a general sparse or hypersparse matrix
//----------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A for GB_transpose", GB0) ;
// T=A' with optional typecasting, or T=op(A')
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
int nworkspaces_bucket, nthreads_bucket ;
bool use_builder = GB_transpose_method (A,
&nworkspaces_bucket, &nthreads_bucket, Context) ;
//----------------------------------------------------------------------
// transpose the matrix with the selected method
//----------------------------------------------------------------------
if (use_builder)
{
//------------------------------------------------------------------
// transpose via GB_builder
//------------------------------------------------------------------
//------------------------------------------------------------------
// allocate and create iwork
//------------------------------------------------------------------
// allocate iwork of size anz
iwork = GB_MALLOC (anz, int64_t, &iwork_size) ;
if (iwork == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// Construct the "row" indices of C, which are "column" indices of
// A. This array becomes the permanent T->i on output.
GB_OK (GB_extract_vector_list (iwork, A, Context)) ;
//------------------------------------------------------------------
// allocate the output matrix and additional space (jwork and Swork)
//------------------------------------------------------------------
// initialize the header of T, with no content
// content, and initialize the type and dimension of T.
info = GB_new (&T, true, // hyper, static header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ;
ASSERT (info == GrB_SUCCESS) ;
// if in_place, the prior A->p and A->h can now be freed
if (in_place)
{
if (!A->p_shallow) GB_FREE (&A->p, A->p_size) ;
if (!A->h_shallow) GB_FREE (&A->h, A->h_size) ;
}
GB_void *S_input = NULL ;
// for the GB_builder method, if the transpose is done in-place and
// A->i is not shallow, A->i can be used and then freed.
// Otherwise, A->i is not modified at all.
bool ok = true ;
bool recycle_Ai = (in_place && !A->i_shallow) ;
if (!recycle_Ai)
{
// allocate jwork of size anz
jwork = GB_MALLOC (anz, int64_t, &jwork_size) ;
ok = ok && (jwork != NULL) ;
}
if ((op1 != NULL || op2 != NULL) && !C_iso)
{
Swork = (GB_void *) GB_XALLOC (C_iso, anz, csize, &Swork_size) ;
ok = ok && (Swork != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//------------------------------------------------------------------
// construct jwork and Swork
//------------------------------------------------------------------
// "row" indices of A become "column" indices of C
if (recycle_Ai)
{
// A->i is used as workspace for the "column" indices of C.
// jwork is A->i, and is freed by GB_builder.
jwork = A->i ;
jwork_size = A->i_size ;
A->i = NULL ;
ASSERT (in_place) ;
}
else
{
// copy A->i into jwork, making a deep copy. jwork is freed by
// GB_builder. A->i is not modified, even if out of memory.
GB_memcpy (jwork, A->i, anz * sizeof (int64_t), nthreads_max) ;
}
// numerical values: apply the op, typecast, or make shallow copy
GrB_Type stype ;
GB_void sscalar [GB_VLA(csize)] ;
if (C_iso)
{
// apply the op to the iso scalar
GB_iso_unop (sscalar, ctype, C_code_iso, op1, op2, A, scalar) ;
S_input = sscalar ; // S_input is used instead of Swork
Swork = NULL ;
stype = ctype ;
}
else if (op1 != NULL || op2 != NULL)
{
// Swork = op (A)
info = GB_apply_op (Swork, ctype, C_code_iso, op1, op2, scalar,
binop_bind1st, A, Context) ;
ASSERT (info == GrB_SUCCESS) ;
// GB_builder will not need to typecast Swork to T->x, and it
// may choose to transplant it into T->x
S_input = NULL ; // Swork is used instead of S_input
stype = ctype ;
}
else
{
// GB_builder will typecast S_input from atype to ctype if
// needed. S_input is a shallow copy of Ax, and must not be
// modified.
ASSERT (!C_iso) ;
ASSERT (!A->iso) ;
S_input = (GB_void *) A->x ; // S_input is used instead of Swork
Swork = NULL ;
stype = atype ;
}
//------------------------------------------------------------------
// build the matrix: T = (ctype) A' or op ((xtype) A')
//------------------------------------------------------------------
// internally, jwork is freed and then T->x is allocated, so the
// total memory usage is anz * max (csize, sizeof(int64_t)). T is
// always hypersparse. Either T, Swork, and S_input are all iso,
// or all non-iso, depending on C_iso.
GB_OK (GB_builder (
T, // create T using a static header
ctype, // T is of type ctype
avdim, // T->vlen = A->vdim, always > 1
avlen, // T->vdim = A->vlen, always > 1
C_is_csc, // T has the same CSR/CSC format as C
&iwork, // iwork_handle, becomes T->i on output
&iwork_size,
&jwork, // jwork_handle, freed on output
&jwork_size,
&Swork, // Swork_handle, freed on output
&Swork_size,
false, // tuples are not sorted on input
true, // tuples have no duplicates
anz, // size of iwork, jwork, and Swork
true, // is_matrix: unused
NULL, NULL, // original I,J indices: not used here
S_input, // array of values of type stype, not modified
C_iso, // iso property of T is the same as C->iso
anz, // number of tuples
NULL, // no dup operator needed (input has no duplicates)
stype, // type of S_input or Swork
Context
)) ;
// GB_builder always frees jwork, and either frees iwork or
// transplants it in to T->i and sets iwork to NULL. So iwork and
// jwork are always NULL on output. GB_builder does not modify
// S_input.
ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ;
ASSERT (!GB_JUMBLED (T)) ;
}
else
{
//------------------------------------------------------------------
// transpose via bucket sort
//------------------------------------------------------------------
// T = A' and typecast to ctype
GB_OK (GB_transpose_bucket (T, C_code_iso, ctype, C_is_csc, A,
op1, op2, scalar, binop_bind1st,
nworkspaces_bucket, nthreads_bucket, Context)) ;
ASSERT_MATRIX_OK (T, "T from bucket", GB0) ;
ASSERT (GB_JUMBLED_OK (T)) ;
}
}
//==========================================================================
// free workspace, apply positional op, and transplant/conform T into C
//==========================================================================
GB_FREE_WORK ;
// free prior space of A, if transpose is done in-place
if (in_place)
{
GB_phbix_free (A) ;
}
// transplant the control settings from A to C
C->hyper_switch = A_hyper_switch ;
C->bitmap_switch = A_bitmap_switch ;
C->sparsity_control = A_sparsity_control ;
// transplant T into the result C
GB_OK (GB_transplant (C, ctype, &T, Context)) ;
// apply a positional operator, after transposing the matrix
if (op_is_positional)
{
if (C->iso)
{
// If C was constructed as iso; it needs to be expanded first,
// but do not initialize the values. These are computed by
// GB_apply_op below.
// set C->iso = false OK: no need to burble
GB_OK (GB_convert_any_to_non_iso (C, false, Context)) ;
}
// the positional operator is applied in-place to the values of C
// Cx = op (C)
GB_OK (GB_apply_op ((GB_void *) C->x, ctype, GB_NON_ISO, // positional
save_op1, save_op2, scalar, binop_bind1st, C, Context)) ;
}
// conform the result to the desired sparsity structure of A
ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ;
GB_OK (GB_conform (C, Context)) ;
ASSERT_MATRIX_OK (C, "C output of GB_transpose", GB0) ;
return (GrB_SUCCESS) ;
}
|
GB_unaryop__abs_int16_int64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_int64
// op(A') function: GB_tran__abs_int16_int64
// C type: int16_t
// A type: int64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_int64
(
int16_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
declare_variant_messages.c
|
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
#pragma omp declare // expected-error {{expected an OpenMP directive}}
int foo(void);
#pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}}
#pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}}
#pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}} expected-error {{expected 'match' clause on}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}}
#pragma omp declare variant(foo) match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=yyy) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(xxx={) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv, vvv}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(implementation={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'implementation'; selector ignored}} expected-note {{context selector options are: 'vendor' 'extension' 'unified_address' 'unified_shared_memory' 'reverse_offload' 'dynamic_allocators' 'atomic_default_mem_order'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor}) // expected-warning {{the context selector 'vendor' in context set 'implementation' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}}
#pragma omp declare variant(foo) match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}}
#pragma omp declare variant(foo) match(implementation={vendor(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-warning {{the context selector 'vendor' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'vendor' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{the context selector 'kind' is not valid for the context set 'implementation'; selector ignored}} expected-note {{the context selector 'kind' can be nested in the context set 'device'; try 'match(device={kind(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'device'; selector ignored}} expected-note {{context selector options are: 'kind' 'arch' 'isa'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind}) // expected-warning {{the context selector 'kind' in context set 'device' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}}
#pragma omp declare variant(foo) match(device={kind(score cpu)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}}
#pragma omp declare variant(foo) match(device = {kind(score(ibm) }) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<recovery-expr>()'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(score(2 gpu)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('2'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{'ibm' is not a valid context property for the context selector 'kind' and the context set 'device'; property ignored}} expected-note {{try 'match(implementation={vendor(ibm)})'}} expected-note {{the ignored property spans until here}}
#pragma omp declare variant(foo) match(device={kind(score(5): host), kind(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'kind' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'kind' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'vendor' is not valid for the context set 'device'; selector ignored}} expected-note {{the context selector 'vendor' can be nested in the context set 'implementation'; try 'match(implementation={vendor(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={extension("aaa")}) // expected-warning {{'aaa' is not a valid context property for the context selector 'extension' and the context set 'implementation'; property ignored}} expected-note {{context property options are: 'match_all' 'match_any' 'match_none'}} expected-note {{the ignored property spans until here}}
int bar(void);
#pragma omp declare variant(foo) match(implementation = {vendor(score(foo) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation = {vendor(score(foo()) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation = {vendor(score(<expr>) :llvm)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}}
#pragma omp declare variant(foo) match(user = {condition(foo)}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo is not}}
#pragma omp declare variant(foo) match(user = {condition(foo())}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo() is not}}
#pragma omp declare variant(foo) match(user = {condition(<expr>)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-note {{the ignored selector spans until here}}
int score_and_cond_non_const();
#pragma omp declare variant(foo) match(construct={teams,parallel,for,simd})
#pragma omp declare variant(foo) match(construct={target teams}) // expected-error {{expected ')'}} expected-warning {{expected '}' after the context selectors for the context set "construct"; '}' assumed}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(construct={parallel for}) // expected-error {{expected ')'}} expected-warning {{expected '}' after the context selectors for the context set "construct"; '}' assumed}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(construct={for simd}) // expected-error {{expected ')'}} expected-warning {{expected '}' after the context selectors for the context set "construct"; '}' assumed}} expected-note {{to match this '('}}
int construct(void);
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int a; // expected-error {{'#pragma omp declare variant' can only be applied to functions}}
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp threadprivate(a) // expected-error {{'#pragma omp declare variant' can only be applied to functions}}
int var;
#pragma omp threadprivate(var)
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare // expected-error {{expected an OpenMP directive}}
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma options align=packed
int main();
#pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma init_seg(compiler)
int main();
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{single declaration is expected after 'declare variant' directive}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int b, c;
int no_proto();
#pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int no_proto_too();
int proto1(int);
#pragma omp declare variant(proto1) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int diff_proto(); // expected-note {{previous declaration is here}}
int diff_proto(double); // expected-error {{conflicting types for 'diff_proto'}}
#pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int diff_proto1(double);
int after_use_variant(void);
int after_use();
int bar() {
return after_use();
}
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied for function after first usage; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int after_use(void);
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int defined(void) { return 0; }
int defined1(void) { return 0; }
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied to the function that was defined already; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int defined1(void);
int diff_cc_variant(void);
#pragma omp declare variant(diff_cc_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'int (void) __attribute__((vectorcall))'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
__vectorcall int diff_cc(void);
int diff_ret_variant(void);
#pragma omp declare variant(diff_ret_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'void (void)'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
void diff_ret(void);
void marked(void);
void not_marked(void);
#pragma omp declare variant(not_marked) match(implementation={vendor(unknown)}, device={kind(cpu)}) // expected-note {{marked as 'declare variant' here}}
void marked_variant(void);
#pragma omp declare variant(marked_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
void marked(void);
#pragma omp declare variant(foo) match(device = {isa("foo")})
int unknown_isa_trait(void);
#pragma omp declare variant(foo) match(device = {isa(foo)})
int unknown_isa_trait2(void);
#pragma omp declare variant(foo) match(device = {kind(fpga), isa(bar)})
int ignored_isa_trait(void);
void caller() {
unknown_isa_trait(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
unknown_isa_trait2(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
ignored_isa_trait();
}
// Unknown arch
#pragma omp begin declare variant match(device={isa(sse2020)}) // expected-warning {{isa trait 'sse2020' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
#pragma omp end declare variant
// Unknown arch guarded by arch.
#pragma omp begin declare variant match(device={isa(sse2020), arch(ppc)})
#pragma omp end declare variant
#pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
// FIXME: If the scores are equivalent we should detect that and allow it.
#pragma omp begin declare variant match(implementation = {vendor(score(2) \
: llvm)})
#pragma omp declare variant(foo) match(implementation = {vendor(score(2) \
: llvm)}) // expected-error@-1 {{nested OpenMP context selector contains duplicated trait 'llvm' in selector 'vendor' and set 'implementation' with different score}}
int conflicting_nested_score(void);
#pragma omp end declare variant
// FIXME: We should build the conjuction of different conditions, see also the score fixme above.
#pragma omp begin declare variant match(user = {condition(1)})
#pragma omp declare variant(foo) match(user = {condition(1)}) // expected-error {{nested user conditions in OpenMP context selector not supported (yet)}}
int conflicting_nested_condition(void);
#pragma omp end declare variant
|
quantize.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
if (AcquireImageColormap(image,cube_info->colors) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AssignImageColors)
#endif
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
midpoint.index=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
register DoublePixelPacket
*magick_restrict q;
register MagickRealType
alpha,
beta,
distance;
register PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register MagickRealType
alpha;
register PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7*current[u-v].red/16;
pixel.green+=7*current[u-v].green/16;
pixel.blue+=7*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=previous[u+v].opacity/16;
}
pixel.red+=5*previous[u].red/16;
pixel.green+=5*previous[u].green/16;
pixel.blue+=5*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3*previous[u-v].red/16;
pixel.green+=3*previous[u-v].green/16;
pixel.blue+=3*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3*previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=p->weights[i]*p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) ResetMagickMemory(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) ResetMagickMemory(cube_info->cache,(-1),sizeof(*cube_info->cache)*
length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) ResetMagickMemory(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
index;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) ResetMagickMemory(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=1UL*GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PosterizeImageChannel)
#endif
proceed=SetImageProgress(image,PosterizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
if (image->storage_class == PseudoClass)
colormap_index=(ssize_t *) AcquireQuantumMemory(image->colors,
sizeof(*colormap_index));
else
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
ExceptionInfo
*exception;
(void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(unsigned short) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
custom_functions.h
|
//
// Project Name: Kratos
// Last Modified by: $Author: G.Casas ([email protected]) $
// Date: $Date: 2011-6-13 08:56:42 $
// Revision: $Revision: 1.5 $
//
//
//README::::look to the key word "VERSION" if you want to find all the points where you have to change something so that you can pass from a kdtree to a bin data search structure;
#if !defined(KRATOS_CUSTOM_FUNCTIONS)
#define KRATOS_CUSTOM_FUNCTIONS
// /* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
// System includes
#include <vector>
// Project includes
#include "includes/model_part.h"
#include "utilities/timer.h"
#include "utilities/openmp_utils.h"
#include "processes/find_elements_neighbours_process.h"
#include "processes/find_nodal_neighbours_process.h"
//Database includes
#include "custom_utilities/search/discrete_particle_configure.h"
#include "includes/define.h"
#include "../../DEMApplication/custom_elements/discrete_element.h"
#include "custom_elements/swimming_particle.h"
#include "custom_utilities/AuxiliaryFunctions.h"
#include "../../DEMApplication/custom_elements/spheric_particle.h"
#include "../swimming_DEM_application.h"
#include "../../../kratos/utilities/geometry_utilities.h"
namespace Kratos
{
template <std::size_t TDim>
class CustomFunctionsCalculator
{
public:
typedef ModelPart::ElementsContainerType::iterator ElementIterator;
typedef ModelPart::NodesContainerType::iterator NodeIterator;
typedef ModelPart::NodesContainerType NodesArrayType;
KRATOS_CLASS_POINTER_DEFINITION(CustomFunctionsCalculator);
CustomFunctionsCalculator(): mPressuresFilled(false), mFirstGradientRecovery(true), mFirstLaplacianRecovery(true), mSomeCloudsDontWork(false), mCalculatingTheGradient(false), mCalculatingTheLaplacian(false), mFirstTimeAppending(true){}
/// Calculator
virtual ~CustomFunctionsCalculator(){}
/// Default calculator
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CalculatePressureGradient(ModelPart& r_model_part)
{
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
noalias(inode->FastGetSolutionStepValue(PRESSURE_GRADIENT)) = ZeroVector(3);
}
array_1d <double, 3> grad = ZeroVector(3); // its dimension is always 3
array_1d <double, TDim + 1 > elemental_pressures;
array_1d <double, TDim + 1 > N; // shape functions vector
BoundedMatrix<double, TDim + 1, TDim> DN_DX;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
// computing the shape function derivatives
Geometry<Node<3> >& geom = ielem->GetGeometry();
double Volume;
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
// getting the pressure gradients;
for (unsigned int i = 0; i < TDim + 1; ++i){
elemental_pressures[i] = geom[i].FastGetSolutionStepValue(PRESSURE);
}
array_1d <double, TDim> grad_aux = prod(trans(DN_DX), elemental_pressures); // its dimension may be 2
for (unsigned int i = 0; i < TDim; ++i){
grad[i] = grad_aux[i];
}
double nodal_area = Volume / static_cast<double>(TDim + 1);
grad *= nodal_area;
for (unsigned int i = 0; i < TDim + 1; ++i){
geom[i].FastGetSolutionStepValue(PRESSURE_GRADIENT) += grad;
}
}
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
inode->FastGetSolutionStepValue(PRESSURE_GRADIENT) /= inode->FastGetSolutionStepValue(NODAL_AREA);
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// This function assesses the stationarity based on the pressure field variation.
// Its tolerance applies to the non-dimensional pressure variation between consecutive
// measurements.
bool AssessStationarity(ModelPart& r_model_part, const double& tol)
{
if (!mPressuresFilled){
PerformFirstStepComputations(r_model_part);
return(false);
}
else {
double max_pressure_change_rate = 0.0; // measure of stationarity
double mean_celerity = 0.0; // used to adimensionalize the time step
// filling up mPressures and calculating the mean velocities and the maximum nodal pressure change
unsigned int i = 0;
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
const array_1d<double, 3>& velocity = inode->FastGetSolutionStepValue(VELOCITY);
mean_celerity += SWIMMING_MODULUS_3(velocity);
const double new_pressure = inode->FastGetSolutionStepValue(PRESSURE);
double& old_pressure = mPressures[i];
const double delta_p = std::abs(new_pressure - old_pressure);
max_pressure_change_rate = std::max(delta_p, max_pressure_change_rate);
old_pressure = new_pressure;
++i;
}
mean_celerity /= i;
const double delta_t = r_model_part.GetProcessInfo()[TIME] - mLastMeasurementTime;
if (delta_t > 0.0){
max_pressure_change_rate /= delta_t;
// calculating coefficients for adimensionalization of the pressure change rate
const double characteristic_length = std::pow(mTotalDomainVolume, 1.0 / 3); // characteristic length of the model. Should be improved: a hydraulic radius or such
const double reciprocal_of_characteristic_time = mean_celerity / characteristic_length;
const double pressure_spatial_variation = GetRangeWithinVector(mPressures);
mLastPressureVariation = pressure_spatial_variation;
const double characteristic_pressure_variation = 0.5 * (pressure_spatial_variation + mLastPressureVariation);
if (characteristic_pressure_variation == 0.0 || reciprocal_of_characteristic_time == 0.0){ // unlikely
std::cout << "Uniform problem: stationarity check being performed with dimensional values...! " << "\n";
if (max_pressure_change_rate <= tol){ // go with the absolute value
return true;
}
}
max_pressure_change_rate /= reciprocal_of_characteristic_time * characteristic_pressure_variation ;
}
else {
KRATOS_THROW_ERROR(std::runtime_error, "Trying to calculate pressure variations between two coincident time steps! (null time variation since last recorded time)","");
}
std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n";
std::cout << "The stationarity condition tolerance is " << "\n";
KRATOS_INFO("SwimmingDEM") << tol << std::endl;
std::cout << "The stationarity residual is now " << "\n";
KRATOS_INFO("SwimmingDEM") << max_pressure_change_rate << std::endl;
std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n";
return max_pressure_change_rate <= tol;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateDomainVolume(ModelPart& r_fluid_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_volume = 0.0;
#pragma omp parallel for reduction(+ : added_volume)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
added_volume += CalculateElementalVolume(it->GetGeometry());
}
}
return added_volume;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
void CalculateTotalHydrodynamicForceOnParticles(ModelPart& r_dem_model_part, array_1d <double, 3>& force)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_dem_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
std::vector<array_1d <double, 3> > added_force_vect;
added_force_vect.resize(OpenMPUtils::GetNumThreads());
for (unsigned int k = 0; k < added_force_vect.size(); ++k){
added_force_vect[k] = ZeroVector(3);
}
#pragma omp parallel for
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_dem_model_part, k); it != GetElementPartitionEnd(r_dem_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
array_1d <double, 3> element_force;
if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_FORCE)){
element_force = geom[0].FastGetSolutionStepValue(HYDRODYNAMIC_FORCE);
}
else {
element_force = ZeroVector(3);
}
added_force_vect[k] += element_force;
}
}
force = added_force_vect[0];
for (unsigned int k = 1; k < added_force_vect.size(); ++k){
force += added_force_vect[k];
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
void CalculateTotalHydrodynamicForceOnFluid(ModelPart& r_fluid_model_part, array_1d <double, 3>& instantaneous_force, array_1d <double, 3>& mean_force)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
std::vector<array_1d <double, 3> > added_force_vect;
added_force_vect.resize(OpenMPUtils::GetNumThreads());
std::vector<array_1d <double, 3> > added_mean_force_vect;
added_mean_force_vect.resize(OpenMPUtils::GetNumThreads());
for (unsigned int k = 0; k < added_force_vect.size(); ++k){
added_force_vect[k] = ZeroVector(3);
added_mean_force_vect[k] = ZeroVector(3);
}
#pragma omp parallel for
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
double element_volume;
array_1d <double, 3> element_force;
array_1d <double, 3> element_mean_force;
if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, HYDRODYNAMIC_REACTION, element_volume);
}
else {
element_force = ZeroVector(3);
}
if (geom[0].SolutionStepsDataHas(MEAN_HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_mean_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, MEAN_HYDRODYNAMIC_REACTION, element_volume);
}
else {
element_mean_force = ZeroVector(3);
}
added_force_vect[k] += element_force;
added_mean_force_vect[k] += element_mean_force;
}
}
instantaneous_force = added_force_vect[0];
mean_force = added_force_vect[0];
for (unsigned int k = 1; k < added_force_vect.size(); ++k){
instantaneous_force += added_force_vect[k];
mean_force += added_mean_force_vect[k];
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
double CalculateGlobalFluidVolume(ModelPart& r_fluid_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_fluid_volume = 0.0;
#pragma omp parallel for reduction(+ : added_fluid_volume)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
double element_volume;
double element_fluid_volume;
if (geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_fluid_volume = CalculateScalarIntegralOfLinearInterpolation(geom, FLUID_FRACTION, element_volume);
}
else {
element_fluid_volume = CalculateElementalVolume(geom);
}
added_fluid_volume += element_fluid_volume;
}
}
return added_fluid_volume;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
template<class matrix_T>
double determinant(boost::numeric::ublas::matrix_expression<matrix_T> const& mat_r)
{
double det = 1.0;
matrix_T mLu(mat_r() );
boost::numeric::ublas::permutation_matrix<std::size_t> pivots(mat_r().size1() );
int is_singular = lu_factorize(mLu, pivots);
if (!is_singular)
{
for (std::size_t i=0; i < pivots.size(); ++i)
{
if (pivots(i) != i)
det *= -1.0;
det *= mLu(i,i);
}
}
else
det = 0.0;
return det;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
const DenseMatrix<double> Inverse(
const DenseMatrix<double>& m)
{
assert(m.size1() == m.size2() && "Can only calculate the inverse of square matrices");
switch(m.size1())
{
case 1:
{
assert(m.size1() == 1 && m.size2() == 1 && "Only for 1x1 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
assert(m(0,0) != 0.0 && "Cannot take the inverse of matrix [0]");
DenseMatrix<double> n(1,1);
n(0,0) = 1.0 / determinant;
return n;
}
case 2:
{
assert(m.size1() == 2 && m.size2() == 2 && "Only for 2x2 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
const double a = m(0,0);
const double b = m(0,1);
const double c = m(1,0);
const double d = m(1,1);
DenseMatrix<double> n(2,2);
n(0,0) = d / determinant;
n(0,1) = -b / determinant;
n(1,0) = -c / determinant;
n(1,1) = a / determinant;
return n;
}
case 3:
{
assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
const double a = m(0,0);
const double b = m(0,1);
const double c = m(0,2);
const double d = m(1,0);
const double e = m(1,1);
const double f = m(1,2);
const double g = m(2,0);
const double h = m(2,1);
const double k = m(2,2);
DenseMatrix<double> n(3,3);
const double new_a = ((e*k)-(f*h)) / determinant;
const double new_b = -((d*k)-(f*g)) / determinant;
const double new_c = ((d*h)-(e*g)) / determinant;
const double new_d = -((b*k)-(c*h)) / determinant;
const double new_e = ((a*k)-(c*g)) / determinant;
const double new_f = -((a*h)-(b*g)) / determinant;
const double new_g = ((b*f)-(c*e)) / determinant;
const double new_h = -((a*f)-(c*d)) / determinant;
const double new_k = ((a*e)-(b*d)) / determinant;
n(0,0) = new_a;
n(1,0) = new_b;
n(2,0) = new_c;
n(0,1) = new_d;
n(1,1) = new_e;
n(2,1) = new_f;
n(0,2) = new_g;
n(1,2) = new_h;
n(2,2) = new_k;
return n;
}
default:
{
//Use blockwise inversion
//Matrix::Chop returns a std::vector
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
const std::vector<DenseMatrix<double> > v = Chop(m);
const DenseMatrix<double>& a = v[0];
assert(a.size1() == a.size2());
const DenseMatrix<double> a_inv = Inverse(a);
const DenseMatrix<double>& b = v[1];
const DenseMatrix<double>& c = v[2];
const DenseMatrix<double>& d = v[3];
const DenseMatrix<double> term
= d
- prod(
DenseMatrix<double>(prod(c,a_inv)),
b
);
const DenseMatrix<double> term_inv = Inverse(term);
const DenseMatrix<double> new_a
= a_inv
+ DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
a_inv,
b)),
term_inv)),
c)),
a_inv));
const DenseMatrix<double> new_b
=
- DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
a_inv,
b)),
term_inv));
const DenseMatrix<double> new_c
=
- DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
term_inv,
c)),
a_inv));
const DenseMatrix<double> new_d = term_inv;
std::vector<DenseMatrix<double> > w;
w.push_back(new_a);
w.push_back(new_b);
w.push_back(new_c);
w.push_back(new_d);
const DenseMatrix<double> result = Unchop(w);
return result;
}
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<double>& origin_variable, const Variable<double>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
double& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
const double& origin_value = p_node->FastGetSolutionStepValue(origin_variable);
destination_value = origin_value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<array_1d<double, 3>>& origin_variable, const Variable<array_1d<double, 3>>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
const array_1d<double, 3>& origin_value = p_node->FastGetSolutionStepValue(origin_variable);
noalias(destination_value) = origin_value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void SetValueOfAllNotes(ModelPart& r_model_part, const double& value, const Variable<double>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
double& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
destination_value = value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void SetValueOfAllNotes(ModelPart& r_model_part, const array_1d<double, 3>& value, const Variable<array_1d<double, 3>>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
noalias(destination_value) = value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
private:
bool mPressuresFilled;
bool mFirstGradientRecovery;
bool mFirstLaplacianRecovery;
bool mSomeCloudsDontWork;
bool mCalculatingTheGradient;
bool mCalculatingTheLaplacian;
bool mFirstTimeAppending;
double mLastMeasurementTime;
double mLastPressureVariation;
double mTotalDomainVolume;
std::vector<double> mPressures;
std::vector<DenseVector<double> > mFirstRowsOfB;
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline double CalculateArea(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2)
{
const double x10 = x1 - x0;
const double y10 = y1 - y0;
const double x20 = x2 - x0;
const double y20 = y2 - y0;
const double area = 0.5 * std::abs(x10 * y20 - x20 * y10);
return area;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 +
y10 * z20 * x30 - y10 * x20 * z30 +
z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666666667;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateElementalVolume(const Geometry<Node <3> >& geom)
{
double vol;
if (TDim == 2){
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
vol = CalculateArea(x0, y0, x1, y1, x2, y2);
}
else {
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
}
if (vol == 0.0){
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found with the current geometry ", geom);
}
return vol;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateScalarIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<double>& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
if (vol == 0.0){
KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom);
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
double value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var, 0);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
if (vol == 0.0){
KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom);
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
if (vol == 0.0){
KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom);
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var) * geom[0].FastGetSolutionStepValue(DENSITY) * geom[0].FastGetSolutionStepValue(FLUID_FRACTION);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var) * geom[i].FastGetSolutionStepValue(DENSITY) * geom[i].FastGetSolutionStepValue(FLUID_FRACTION);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void PerformFirstStepComputations(ModelPart& r_model_part)
{
mTotalDomainVolume = CalculateDomainVolume(r_model_part);
mPressures.resize(r_model_part.Nodes().size());
mLastMeasurementTime = r_model_part.GetProcessInfo()[TIME];
unsigned int i = 0;
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode) {
mPressures[i] = inode->FastGetSolutionStepValue(PRESSURE);
++i;
}
mPressuresFilled = true;
mLastPressureVariation = GetRangeWithinVector(mPressures);
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
struct IsCloser{
bool operator()(std::pair<unsigned int, double> const& first_pair, std::pair<unsigned int, double> const& second_pair)
{
return(first_pair.second < second_pair.second || (first_pair.second == second_pair.second && first_pair.first < second_pair.first));
}
};
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline int Factorial(const unsigned int n){
if (n == 0){
return 1;
}
unsigned int k = n;
for (unsigned int i = n - 1; i > 0; --i){
k *= i;
}
return k;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateTheMaximumEdgeLength(ModelPart& r_model_part)
{
double max_distance_yet = 0.0;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
Geometry<Node<3> >& geom = ielem->GetGeometry();
unsigned int n_nodes = static_cast<unsigned int>(TDim + 1);
for (unsigned int k = 1; k < n_nodes - 1; ++k){
for (unsigned int i = k; i < n_nodes; ++i){
array_1d <double, 3> delta_i = geom[k - 1] - geom[i];
double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i);
max_distance_yet = max_distance_yet > distance_2 ? max_distance_yet : distance_2;
}
}
}
return(std::sqrt(max_distance_yet));
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateTheMinumumEdgeLength(ModelPart& r_model_part)
{
double min_distance_yet = 0.0;
bool first_node = true;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
Geometry<Node<3> >& geom = ielem->GetGeometry();
if (first_node){ // assign the distance (squared) between any two nodes to min_distance_yet
array_1d <double, 3> delta = geom[0] - geom[1];
double distance_2 = DEM_INNER_PRODUCT_3(delta, delta);
min_distance_yet = distance_2;
}
unsigned int n_nodes = static_cast<unsigned int>(TDim + 1);
for (unsigned int k = 1; k < n_nodes - 1; ++k){
for (unsigned int i = k; i < n_nodes; ++i){
array_1d <double, 3> delta_i = geom[k - 1] - geom[i];
double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i);
min_distance_yet = min_distance_yet < distance_2 ? min_distance_yet : distance_2;
}
}
}
return(std::sqrt(min_distance_yet));
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// The following block of functions is used to calculate explicit matrix inverses and was taken from
// Richel BilderBeek's website (http://www.richelbilderbeek.nl/CppUblasMatrixExample6.htm), and it is
// transcribed here with a very minor modification
double CalcDeterminant(const DenseMatrix<double>& m)
{
assert(m.size1() == m.size2() && "Can only calculate the determinant of square matrices");
switch(m.size1())
{
case 1:
{
return m(0,0);
}
case 2:
{
const double a = m(0,0);
const double b = m(0,1);
const double c = m(1,0);
const double d = m(1,1);
const double determinant = (a * d) - (b * c);
return determinant;
}
case 3:
{
assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices");
const double a = m(0,0);
const double b = m(0,1);
const double c = m(0,2);
const double d = m(1,0);
const double e = m(1,1);
const double f = m(1,2);
const double g = m(2,0);
const double h = m(2,1);
const double k = m(2,2);
const double determinant
= (a * ((e*k) - (f*h)))
- (b * ((k*d) - (f*g)))
+ (c * ((d*h) - (e*g)));
return determinant;
}
default:
assert(!"Should not get here: unsupported matrix size");
throw std::runtime_error("Unsupported matrix size");
}
}
///Chop returns a std::vector of sub-matrices
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
const std::vector<DenseMatrix<double> > Chop(
const DenseMatrix<double>& m)
{
using boost::numeric::ublas::range;
using boost::numeric::ublas::matrix_range;
std::vector<matrix<double> > v;
v.reserve(4);
const int midy = m.size1() / 2;
const int midx = m.size2() / 2;
const matrix_range<const matrix<double> > top_left( m,range(0 ,midy ),range(0 ,midx ));
const matrix_range<const matrix<double> > bottom_left( m,range(midy,m.size1()),range(0 ,midx ));
const matrix_range<const matrix<double> > top_right( m,range(0 ,midy ),range(midx,m.size2()));
const matrix_range<const matrix<double> > bottom_right(m,range(midy,m.size1()),range(midx,m.size2()));
v.push_back(matrix<double>(top_left));
v.push_back(matrix<double>(top_right));
v.push_back(matrix<double>(bottom_left));
v.push_back(matrix<double>(bottom_right));
return v;
}
///Unchop merges the 4 std::vector of sub-matrices produced by Chop
const DenseMatrix<double> Unchop(
const std::vector<DenseMatrix<double> >& v)
{
//Chop returns a std::vector of sub-matrices
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
using boost::numeric::ublas::range;
using boost::numeric::ublas::matrix_range;
assert(v.size() == 4);
assert(v[0].size1() == v[1].size1());
assert(v[2].size1() == v[3].size1());
assert(v[0].size2() == v[2].size2());
assert(v[1].size2() == v[3].size2());
DenseMatrix<double> m(v[0].size1() + v[2].size1(),v[0].size2() + v[1].size2());
for (int quadrant=0; quadrant!=4; ++quadrant)
{
const DenseMatrix<double>& w = v[quadrant];
const std::size_t n_rows = v[quadrant].size1();
const std::size_t n_cols = v[quadrant].size2();
const int offset_x = quadrant % 2 ? v[0].size2() : 0;
const int offset_y = quadrant / 2 ? v[0].size1() : 0;
for (std::size_t row=0; row!=n_rows; ++row)
{
for (std::size_t col=0; col!=n_cols; ++col)
{
m(offset_y + row, offset_x + col) = w(row,col);
}
}
}
assert(v[0].size1() + v[2].size1() == m.size1());
assert(v[1].size1() + v[3].size1() == m.size1());
assert(v[0].size2() + v[1].size2() == m.size2());
assert(v[2].size2() + v[3].size2() == m.size2());
return m;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
///@}
///@name Member r_variables
///@{
DenseVector<unsigned int> mElementsPartition;
///@}
///@name Un accessible methods
///@{
double GetRangeWithinVector(const std::vector<double>& vector)
{
double min = vector[0];
double max = vector[0];
for (unsigned int i = 0; i != vector.size(); ++i){
min = std::min(min, mPressures[i]);
max = std::max(max, mPressures[i]);
}
return (max - min);
}
DenseVector<unsigned int>& GetElementPartition()
{
return mElementsPartition;
}
ElementIterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k)
{
return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k];
}
ElementIterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k)
{
return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k + 1];
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
}; // Class CustomFunctionsCalculator
} // namespace Kratos.
#endif // KRATOS_CREATE_AND_DESTROY defined
|
model_initializer.h
|
// -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_MODEL_INITIALIZER_H_
#define CORE_MODEL_INITIALIZER_H_
#include <ctime>
#include <string>
#include <vector>
#include "core/container/math_array.h"
#include "core/diffusion_grid.h"
#include "core/resource_manager.h"
#include "core/simulation.h"
#include "core/util/random.h"
namespace bdm {
struct ModelInitializer {
/// Creates a 3D cubic grid of agents and adds them to the
/// ExecutionContext. Type of the agent is determined by the return
/// type of parameter agent_builder.
///
/// ModelInitializer::Grid3D(8, 10, [](const Double3& pos){
/// return Cell(pos); });
/// @param agents_per_dim number of agents on each axis.
/// Number of generated agents =
/// `agents_per_dim ^ 3`
/// @param space space between the positions - e.g space = 10:
/// positions = `{(0, 0, 0), (0, 0, 10), (0, 0,
/// 20), ... }`
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void Grid3D(size_t agents_per_dim, double space,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t x = 0; x < agents_per_dim; x++) {
auto x_pos = x * space;
for (size_t y = 0; y < agents_per_dim; y++) {
auto y_pos = y * space;
for (size_t z = 0; z < agents_per_dim; z++) {
auto* new_agent = agent_builder({x_pos, y_pos, z * space});
ctxt->AddAgent(new_agent);
}
}
}
}
}
/// Creates a 3D grid of agents and adds them to the
/// ExecutionContext. Type of the agent is determined by the return
/// type of parameter agent_builder.
///
/// ModelInitializer::Grid3D({8,6,4}, 10, [](const Double3&
/// pos){ return Cell(pos); });
/// @param agents_per_dim number of agents on each axis.
/// Number of generated agents =
/// `agents_per_dim[0] * agents_per_dim[1] *
/// agents_per_dim[2]`
/// @param space space between the positions - e.g space = 10:
/// positions = `{(0, 0, 0), (0, 0, 10), (0, 0,
/// 20), ... }`
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void Grid3D(const std::array<size_t, 3>& agents_per_dim, double space,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t x = 0; x < agents_per_dim[0]; x++) {
auto x_pos = x * space;
for (size_t y = 0; y < agents_per_dim[1]; y++) {
auto y_pos = y * space;
for (size_t z = 0; z < agents_per_dim[2]; z++) {
auto* new_agent = agent_builder({x_pos, y_pos, z * space});
ctxt->AddAgent(new_agent);
}
}
}
}
}
/// Adds agents to the ExecutionContext. Type of the simulation
/// object is determined by the return type of parameter agent_builder.
///
/// @param positions positions of the agents to be
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void CreateAgents(const std::vector<Double3>& positions,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t i = 0; i < positions.size(); i++) {
auto* new_agent =
agent_builder({positions[i][0], positions[i][1], positions[i][2]});
ctxt->AddAgent(new_agent);
}
}
}
/// Adds agents with random positions to the ExecutionContext.
/// Type of the agent is determined by the return type of
/// parameter agent_builder.
///
/// @param[in] min The minimum position value
/// @param[in] max The maximum position value
/// @param[in] num_agents The number agents
/// @param[in] agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void CreateAgentsRandom(double min, double max, uint64_t num_agents,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
auto* random = sim->GetRandom();
#pragma omp for
for (uint64_t i = 0; i < num_agents; i++) {
auto* new_agent = agent_builder(random->UniformArray<3>(min, max));
ctxt->AddAgent(new_agent);
}
}
}
/// Allows agents to secrete the specified substance. Diffusion throughout the
/// simulation space is automatically taken care of by the DiffusionGrid class
///
/// @param[in] substance_id The substance identifier
/// @param[in] substance_name The substance name
/// @param[in] diffusion_coeff The diffusion coefficient
/// @param[in] decay_constant The decay constant
/// @param[in] resolution The resolution of the diffusion grid
///
static void DefineSubstance(size_t substance_id, std::string substance_name,
double diffusion_coeff, double decay_constant,
int resolution = 10) {
assert(resolution > 0 && "Resolution needs to be a positive integer value");
auto* sim = Simulation::GetActive();
auto* rm = sim->GetResourceManager();
DiffusionGrid* d_grid =
new DiffusionGrid(substance_id, substance_name, diffusion_coeff,
decay_constant, resolution);
rm->AddDiffusionGrid(d_grid);
}
template <typename F>
static void InitializeSubstance(size_t substance_id, F function) {
auto* sim = Simulation::GetActive();
auto* rm = sim->GetResourceManager();
auto diffusion_grid = rm->GetDiffusionGrid(substance_id);
diffusion_grid->AddInitializer(function);
}
};
} // namespace bdm
#endif // CORE_MODEL_INITIALIZER_H_
|
GB_unaryop__ainv_uint8_uint8.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_uint8
// op(A') function: GB_tran__ainv_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_uint8
(
uint8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3.taskloop.c
|
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include "omp.h"
/* Q1: Execute the program several times and make sure you are able */
/* to explain when each thread in the threads team is actually */
/* contributing to the execution of work (tasks) generated in the */
/* taskloop. */
void long_running_task(int value) {
printf("Thread %d going to sleep for %d seconds\n", omp_get_thread_num(), value);
sleep(value);
printf("Thread %d weaking up after a %d seconds siesta, willing to work ...\n", omp_get_thread_num(), value);
}
void loop_body(int i, int j) {
printf("Thread %d executing loop body (%d, %d)\n", omp_get_thread_num(), i, j);
sleep(1);
}
int main(int argc, char *argv[]) {
#pragma omp parallel num_threads(4)
#pragma omp single
{
printf("I am thread %d and going to create T1 and T2\n", omp_get_thread_num());
#pragma omp task // Task T1
long_running_task(5);
#pragma omp task // Task T2
{
#pragma omp task // Task T3
long_running_task(10); // can execute concurrently
#pragma omp task // Task T4
{
#pragma omp taskloop grainsize(1) nogroup // Tasks TL
for (long i = 0; i < 10; i++)
for (long j = 0; j < i; j++)
loop_body(i, j);
printf("Thread %d finished the creation of all tasks in taskloop TL\n", omp_get_thread_num());
}
printf("Thread %d finished the execution of task creating T3 and T4\n", omp_get_thread_num());
}
printf("I am still thread %d after creating T1 and T2, ready to enter in the taskwait\n", omp_get_thread_num());
#pragma omp taskwait
printf("I am still thread %d, but now after exiting from the taskwait\n", omp_get_thread_num());
}
return 0;
}
|
shear.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotatation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob-private.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/shear.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const MagickRealType x_shear,const MagickRealType x_shear,
% const MagickRealType width,const MagickRealType height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const MagickRealType x_shear,const MagickRealType y_shear,
const MagickRealType width,const MagickRealType height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The amount of rotation calculated to deskew the image is saved in the
% artifact "deskew:angle".
%
% If the artifact "deskew:auto-crop" is given the image will be automatically
% cropped of the excess background. The value is the border width of all
% pixels around the edge that will be used to determine an average border
% color for the automatic trim.
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RadonInfo
{
CacheType
type;
size_t
width,
height;
MagickSizeType
length;
MagickBooleanType
mapped;
char
path[MaxTextExtent];
int
file;
unsigned short
*cells;
} RadonInfo;
static RadonInfo *DestroyRadonInfo(RadonInfo *radon_info)
{
assert(radon_info != (RadonInfo *) NULL);
switch (radon_info->type)
{
case MemoryCache:
{
if (radon_info->mapped == MagickFalse)
radon_info->cells=(unsigned short *) RelinquishMagickMemory(
radon_info->cells);
else
radon_info->cells=(unsigned short *) UnmapBlob(radon_info->cells,
(size_t) radon_info->length);
RelinquishMagickResource(MemoryResource,radon_info->length);
break;
}
case MapCache:
{
radon_info->cells=(unsigned short *) UnmapBlob(radon_info->cells,(size_t)
radon_info->length);
RelinquishMagickResource(MapResource,radon_info->length);
}
case DiskCache:
{
if (radon_info->file != -1)
(void) close(radon_info->file);
(void) RelinquishUniqueFileResource(radon_info->path);
RelinquishMagickResource(DiskResource,radon_info->length);
break;
}
default:
break;
}
return((RadonInfo *) RelinquishMagickMemory(radon_info));
}
static MagickBooleanType ResetRadonCells(RadonInfo *radon_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned short
value;
if (radon_info->type != DiskCache)
{
(void) ResetMagickMemory(radon_info->cells,0,(size_t) radon_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(radon_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) radon_info->height; y++)
{
for (x=0; x < (ssize_t) radon_info->width; x++)
{
count=write(radon_info->file,&value,sizeof(*radon_info->cells));
if (count != (ssize_t) sizeof(*radon_info->cells))
break;
}
if (x < (ssize_t) radon_info->width)
break;
}
return(y < (ssize_t) radon_info->height ? MagickFalse : MagickTrue);
}
static RadonInfo *AcquireRadonInfo(const Image *image,const size_t width,
const size_t height,ExceptionInfo *exception)
{
MagickBooleanType
status;
RadonInfo
*radon_info;
radon_info=(RadonInfo *) AcquireMagickMemory(sizeof(*radon_info));
if (radon_info == (RadonInfo *) NULL)
return((RadonInfo *) NULL);
(void) ResetMagickMemory(radon_info,0,sizeof(*radon_info));
radon_info->width=width;
radon_info->height=height;
radon_info->length=(MagickSizeType) width*height*sizeof(*radon_info->cells);
radon_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,radon_info->length);
if ((status != MagickFalse) &&
(radon_info->length == (MagickSizeType) ((size_t) radon_info->length)))
{
status=AcquireMagickResource(MemoryResource,radon_info->length);
if (status != MagickFalse)
{
radon_info->mapped=MagickFalse;
radon_info->cells=(unsigned short *) AcquireMagickMemory((size_t)
radon_info->length);
if (radon_info->cells == (unsigned short *) NULL)
{
radon_info->mapped=MagickTrue;
radon_info->cells=(unsigned short *) MapBlob(-1,IOMode,0,(size_t)
radon_info->length);
}
if (radon_info->cells == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,radon_info->length);
}
}
radon_info->file=(-1);
if (radon_info->cells == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,radon_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(DestroyRadonInfo(radon_info));
}
radon_info->type=DiskCache;
(void) AcquireMagickResource(MemoryResource,radon_info->length);
radon_info->file=AcquireUniqueFileResource(radon_info->path);
if (radon_info->file == -1)
return(DestroyRadonInfo(radon_info));
status=AcquireMagickResource(MapResource,radon_info->length);
if (status != MagickFalse)
{
status=ResetRadonCells(radon_info);
if (status != MagickFalse)
{
radon_info->cells=(unsigned short *) MapBlob(radon_info->file,
IOMode,0,(size_t) radon_info->length);
if (radon_info->cells != (unsigned short *) NULL)
radon_info->type=MapCache;
else
RelinquishMagickResource(MapResource,radon_info->length);
}
}
}
return(radon_info);
}
static inline size_t MagickMin(const size_t x,const size_t y)
{
if (x < y)
return(x);
return(y);
}
static inline ssize_t ReadRadonCell(const RadonInfo *radon_info,
const MagickOffsetType offset,const size_t length,unsigned char *buffer)
{
register ssize_t
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PPREAD)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ReadRadonCell)
#endif
{
i=(-1);
if (lseek(radon_info->file,offset,SEEK_SET) >= 0)
{
#endif
count=0;
for (i=0; i < (ssize_t) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PPREAD)
count=read(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count > 0)
continue;
count=0;
if (errno != EINTR)
{
i=(-1);
break;
}
}
#if !defined(MAGICKCORE_HAVE_PPREAD)
}
}
#endif
return(i);
}
static inline ssize_t WriteRadonCell(const RadonInfo *radon_info,
const MagickOffsetType offset,const size_t length,const unsigned char *buffer)
{
register ssize_t
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WriteRadonCell)
#endif
{
if (lseek(radon_info->file,offset,SEEK_SET) >= 0)
{
#endif
count=0;
for (i=0; i < (ssize_t) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count > 0)
continue;
count=0;
if (errno != EINTR)
{
i=(-1);
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
}
}
#endif
return(i);
}
static inline unsigned short GetRadonCell(const RadonInfo *radon_info,
const ssize_t x,const ssize_t y)
{
MagickOffsetType
i;
unsigned short
value;
i=(MagickOffsetType) radon_info->height*x+y;
if ((i < 0) ||
((MagickSizeType) (i*sizeof(*radon_info->cells)) >= radon_info->length))
return(0);
if (radon_info->type != DiskCache)
return(radon_info->cells[i]);
value=0;
(void) ReadRadonCell(radon_info,i*sizeof(*radon_info->cells),
sizeof(*radon_info->cells),(unsigned char *) &value);
return(value);
}
static inline MagickBooleanType SetRadonCell(const RadonInfo *radon_info,
const ssize_t x,const ssize_t y,const unsigned short value)
{
MagickOffsetType
i;
ssize_t
count;
i=(MagickOffsetType) radon_info->height*x+y;
if ((i < 0) ||
((MagickSizeType) (i*sizeof(*radon_info->cells)) >= radon_info->length))
return(MagickFalse);
if (radon_info->type != DiskCache)
{
radon_info->cells[i]=value;
return(MagickTrue);
}
count=WriteRadonCell(radon_info,i*sizeof(*radon_info->cells),
sizeof(*radon_info->cells),(const unsigned char *) &value);
if (count != (ssize_t) sizeof(*radon_info->cells))
return(MagickFalse);
return(MagickTrue);
}
static void RadonProjection(const Image *image,RadonInfo *source_cells,
RadonInfo *destination_cells,const ssize_t sign,size_t *projection)
{
RadonInfo
*swap;
register ssize_t
x;
register RadonInfo
*p,
*q;
size_t
step;
p=source_cells;
q=destination_cells;
for (step=1; step < p->width; step*=2)
{
for (x=0; x < (ssize_t) p->width; x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
cell;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (p->height-i-1); y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell+GetRadonCell(p,x+i+(ssize_t)
step,y+i));
(void) SetRadonCell(q,x+2*i+1,y,cell+GetRadonCell(p,x+i+(ssize_t)
step,y+i+1));
}
for ( ; y < (ssize_t) (p->height-i); y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell+GetRadonCell(p,x+i+(ssize_t) step,
y+i));
(void) SetRadonCell(q,x+2*i+1,y,cell);
}
for ( ; y < (ssize_t) p->height; y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell);
(void) SetRadonCell(q,x+2*i+1,y,cell);
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (x=0; x < (ssize_t) p->width; x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (p->height-1); y++)
{
ssize_t
delta;
delta=GetRadonCell(p,x,y)-(ssize_t) GetRadonCell(p,x,y+1);
sum+=delta*delta;
}
projection[p->width+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
RadonInfo
*destination_cells,
*source_cells;
register ssize_t
i;
size_t
count,
width;
ssize_t
y;
unsigned char
byte;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_cells=AcquireRadonInfo(image,width,image->rows,exception);
destination_cells=AcquireRadonInfo(image,width,image->rows,exception);
if ((source_cells == (RadonInfo *) NULL) ||
(destination_cells == (RadonInfo *) NULL))
{
if (destination_cells != (RadonInfo *) NULL)
destination_cells=DestroyRadonInfo(destination_cells);
if (source_cells != (RadonInfo *) NULL)
source_cells=DestroyRadonInfo(source_cells);
return(MagickFalse);
}
if (ResetRadonCells(source_cells) == MagickFalse)
{
destination_cells=DestroyRadonInfo(destination_cells);
source_cells=DestroyRadonInfo(source_cells);
return(MagickFalse);
}
for (i=0; i < 256; i++)
{
byte=(unsigned char) i;
for (count=0; byte != 0; byte>>=1)
count+=byte & 0x01;
bits[i]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
(void) SetRadonCell(source_cells,--i,y,bits[byte]);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
(void) SetRadonCell(source_cells,--i,y,bits[byte]);
}
}
RadonProjection(image,source_cells,destination_cells,-1,projection);
(void) ResetRadonCells(source_cells);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
(void) SetRadonCell(source_cells,i++,y,bits[byte]);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
(void) SetRadonCell(source_cells,i++,y,bits[byte]);
}
}
RadonProjection(image,source_cells,destination_cells,1,projection);
image_view=DestroyCacheView(image_view);
destination_cells=DestroyRadonInfo(destination_cells);
source_cells=DestroyRadonInfo(source_cells);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickPixelPacket
background;
MagickRealType
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetMagickPixelPacket(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(p);
background.green+=QuantumScale*GetPixelGreen(p);
background.blue+=QuantumScale*GetPixelBlue(p);
background.opacity+=QuantumScale*GetPixelOpacity(p);
count++;
p++;
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=ClampToQuantum((MagickRealType) QuantumRange*
background.red/count);
image->background_color.green=ClampToQuantum((MagickRealType) QuantumRange*
background.green/count);
image->background_color.blue=ClampToQuantum((MagickRealType) QuantumRange*
background.blue/count);
image->background_color.opacity=ClampToQuantum((MagickRealType) QuantumRange*
background.opacity/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MaxTextExtent];
(void) FormatLocaleString(angle,MaxTextExtent,"%g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (artifact == (const char *) NULL)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
if (rotations == 0)
return(CloneImage(image,0,0,MagickTrue,exception));
if ((rotations == 1) || (rotations == 3))
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
else
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
switch (rotations)
{
case 0:
{
/*
Rotate 0 degrees.
*/
break;
}
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict rotate_indexes;
register PixelPacket
*restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
register const PixelPacket
*restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels-=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
register const IndexPacket
*restrict tile_indexes;
tile_indexes=indexes+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes-=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict rotate_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(rotate_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict rotate_indexes;
register PixelPacket
*restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
register const PixelPacket
*restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels+=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
register const IndexPacket
*restrict tile_indexes;
tile_indexes=indexes+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes+=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
X shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
register IndexPacket
*restrict indexes,
*restrict shear_indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=x_offset;
indexes+=x_offset;
displacement=degrees*(MagickRealType) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width;
indexes+=width;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) width; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (x_offset+width+step-i) >= image->columns)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_XShearImage)
#endif
proceed=SetImageProgress(image,XShearImageTag,progress++,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
x;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
Y Shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
ssize_t
step;
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
register IndexPacket
*restrict indexes,
*restrict shear_indexes;
register ssize_t
i;
register PixelPacket
*restrict p,
*restrict q;
ShearDirection
direction;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=y_offset;
indexes+=y_offset;
displacement=degrees*(MagickRealType) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height;
indexes+=height;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) height; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (y_offset+height+step-i) >= image->rows)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_YShearImage)
#endif
proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
ssize_t
x_offset,
y_offset;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info;
size_t
y_width;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
y_width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
x_offset=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
y_offset=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*y_width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) x_offset;
border_info.height=(size_t) y_offset;
shear_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->matte == MagickFalse)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,x_offset,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,y_width,image->rows,(ssize_t)
(shear_image->columns-y_width)/2,y_offset,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
ssize_t
x_offset,
y_offset;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info;
size_t
height,
rotations,
width,
y_width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
width=image->columns;
height=image->rows;
if ((rotations == 1) || (rotations == 3))
{
width=image->rows;
height=image->columns;
}
y_width=width+(ssize_t) floor(fabs(shear.x)*height+0.5);
x_offset=(ssize_t) ceil((double) width+((fabs(shear.y)*height)-width)/2.0-
0.5);
y_offset=(ssize_t) ceil((double) height+((fabs(shear.y)*y_width)-height)/2.0-
0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) x_offset;
border_info.height=(size_t) y_offset;
rotate_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,x_offset,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,y_width,height,(ssize_t)
(rotate_image->columns-y_width)/2,y_offset,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,y_width,rotate_image->rows,(ssize_t)
(rotate_image->columns-y_width)/2,0,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
stacks.c
|
// -*-Mode: C++;-*- // technically C99
// * BeginRiceCopyright *****************************************************
//
// $HeadURL$
// $Id$
//
// --------------------------------------------------------------------------
// Part of HPCToolkit (hpctoolkit.org)
//
// Information about sources of support for research and development of
// HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
// --------------------------------------------------------------------------
//
// Copyright ((c)) 2002-2022, Rice University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Rice University (RICE) nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// This software is provided by RICE and contributors "as is" and any
// express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular
// purpose are disclaimed. In no event shall RICE or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages (including, but not limited to, procurement of
// substitute goods or services; loss of use, data, or profits; or
// business interruption) however caused and on any theory of liability,
// whether in contract, strict liability, or tort (including negligence
// or otherwise) arising in any way out of the use of this software, even
// if advised of the possibility of such damage.
//
// ******************************************************* EndRiceCopyright *
//*****************************************************************************
// local includes
//*****************************************************************************
#include "stacks.h"
//*****************************************************************************
// interface functions
//*****************************************************************************
#define Ad(q) q.aptr
#define Ap(q) q->aptr
void
sstack_ptr_set
(
s_element_ptr_t *p,
s_element_t *v
)
{
atomic_store_explicit(&Ap(p), v, memory_order_relaxed);
}
s_element_t *
sstack_ptr_get
(
s_element_ptr_t *e
)
{
return (s_element_t *) atomic_load_explicit(&Ap(e), memory_order_relaxed);
}
s_element_t *
sstack_swap
(
s_element_ptr_t *q,
s_element_t *r
)
{
return (s_element_t *) atomic_exchange_explicit(&Ap(q), r, memory_order_relaxed);
}
void
sstack_push
(
s_element_ptr_t *q,
s_element_t *e
)
{
s_element_t *first =
(s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), first, memory_order_relaxed);
atomic_store_explicit(&Ap(q), e, memory_order_relaxed);
}
s_element_t *
sstack_pop
(
s_element_ptr_t *q
)
{
s_element_t *e = (s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
if (e) {
s_element_t *next =
(s_element_t *) atomic_load_explicit(&(e->Ad(next)), memory_order_relaxed);
atomic_store_explicit(&Ap(q), next, memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), 0, memory_order_relaxed);
}
return e;
}
s_element_t *
sstack_steal
(
s_element_ptr_t *q
)
{
s_element_t *e = sstack_swap(q, 0);
return e;
}
void
sstack_reverse
(
s_element_ptr_t *q
)
{
s_element_t *prev = NULL;
s_element_t *e = (s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
while (e) {
s_element_t *next =
(s_element_t *) atomic_load_explicit(&(e->Ad(next)), memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), prev, memory_order_relaxed);
prev = e;
e = next;
}
atomic_store_explicit(&Ap(q), prev, memory_order_relaxed);
}
void
sstack_forall
(
s_element_ptr_t *q,
stack_forall_fn_t fn,
void *arg
)
{
s_element_t *current =
(s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
while (current) {
fn(current, arg);
current =
(s_element_t *) atomic_load_explicit(¤t->Ad(next), memory_order_relaxed);
}
}
void
cstack_ptr_set
(
s_element_ptr_t *e,
s_element_t *v
)
{
atomic_init(&Ap(e), (s_element_ptr_t *) v);
}
s_element_t *
cstack_ptr_get
(
s_element_ptr_t *e
)
{
return (s_element_t *) atomic_load(&Ap(e));
}
s_element_t *
cstack_swap
(
s_element_ptr_t *q,
s_element_t *r
)
{
s_element_t *e = (s_element_t *) atomic_exchange(&Ap(q), r);
return e;
}
void
cstack_push
(
s_element_ptr_t *q,
s_element_t *e
)
{
s_element_t *head = (s_element_t *) atomic_load(&Ap(q));
s_element_t *new_head = e;
// push a singleton or a chain on the list
for (;;) {
s_element_t *enext = (s_element_t *) atomic_load(&e->Ad(next));
if (enext == 0) break;
e = enext;
}
do {
atomic_store(&e->Ad(next), head);
} while (!atomic_compare_exchange_strong(&Ap(q), &head, new_head));
}
s_element_t *
cstack_pop
(
s_element_ptr_t *q
)
{
s_element_t *oldhead = (s_element_t *) atomic_load(&Ap(q));
s_element_t *next = 0;
do {
if (oldhead == 0) return 0;
next = (s_element_t *) atomic_load(&oldhead->Ad(next));
} while (!atomic_compare_exchange_strong(&Ap(q), &oldhead, next));
atomic_store(&oldhead->Ad(next), 0);
return oldhead;
}
s_element_t *
cstack_steal
(
s_element_ptr_t *q
)
{
s_element_t *e = cstack_swap(q, 0);
return e;
}
void
cstack_forall
(
s_element_ptr_t *q,
stack_forall_fn_t fn,
void *arg
)
{
s_element_t *current = (s_element_t *) atomic_load(&Ap(q));
while (current) {
fn(current, arg);
current = (s_element_t *) atomic_load(¤t->Ad(next));
}
}
//*****************************************************************************
// unit test
//*****************************************************************************
#define UNIT_TEST 0
#if UNIT_TEST
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
typedef struct {
s_element_ptr_t next;
int value;
} typed_stack_elem(int); // int_q_element_t
typed_stack_declare_type(int);
typed_stack_impl(int, cstack);
typed_stack_elem_ptr(int) queue;
void
print(typed_stack_elem(int) *e, void *arg)
{
printf("%d\n", e->value);
}
int main(int argc, char **argv)
{
int i;
for (i = 0; i < 10; i++) {
typed_stack_elem_ptr(int)
item = (typed_stack_elem_ptr(int)) malloc(sizeof(typed_stack_elem(int)));
item->value = i;
typed_stack_elem_ptr_set(int, cstack)(item, 0);
typed_stack_push(int, cstack)(&queue, item);
}
typed_stack_forall(int, cstack)(&queue, print, 0);
}
#endif
#if 0
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
typedef struct {
s_element_ptr_t next;
int value;
} typed_stack_elem(int); // int_q_element_t
typed_stack_elem_ptr(int) queue;
#define qtype cstack
typed_stack(int, qtype)
typed_stack_elem(int) *
typed_stack_elem_fn(int,new)(int value)
{
typed_stack_elem(int) *e =
(typed_stack_elem(int) *) malloc(sizeof(int_s_element_t));
e->value = value;
typed_stack_elem_ptr_set(int, qtype)(&e->next, 0);
}
void
pop
(
int n
)
{
int i;
for(i = 0; i < n; i++) {
typed_stack_elem(int) *e = typed_stack_pop(int, qtype)(&queue);
if (e == 0) {
printf("%d queue empty\n", omp_get_thread_num());
break;
} else {
printf("%d popping %d\n", omp_get_thread_num(), e->value);
}
}
}
void
push
(
int min,
int n
)
{
int i;
for(i = min; i < min+n; i++) {
printf("%d pushing %d\n", omp_get_thread_num(), i);
typed_stack_push(int, qtype)(&queue, typed_stack_elem_fn(int, new)(i));
}
}
void
dump
(
int_s_element_t *e
)
{
int i;
for(; e;
e = (int_s_element_t *) typed_stack_elem_ptr_get(int,qtype)(&e->next)) {
printf("%d stole %d\n", omp_get_thread_num(), e->value);
}
}
int
main
(
int argc,
char **argv
)
{
typed_stack_elem_ptr_set(int, qtype)(&queue, 0);
#pragma omp parallel
{
push(0, 30);
pop(10);
push(100, 12);
// pop(100);
int_s_element_t *e = typed_stack_steal(int, qtype)(&queue);
dump(e);
push(300, 30);
typed_stack_push(int, qtype)(&queue, e);
pop(100);
}
}
#endif
|
nesting.c
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
static inline void foo(void)
{
printf("foo called by %d of %d\n", omp_get_thread_num(), omp_get_num_threads() );
#pragma omp parallel
{
printf("foo parallel reached by %d of %d\n", omp_get_thread_num(), omp_get_num_threads() );
}
return;
}
static inline void bar(void)
{
printf("bar called by %d of %d\n", omp_get_thread_num(), omp_get_num_threads() );
#pragma omp parallel
{
printf("bar parallel reached by %d of %d\n", omp_get_thread_num(), omp_get_num_threads() );
}
return;
}
int main(int argc, char* argv[])
{
#pragma omp parallel
{
printf("main parallel 1 from %d of %d\n", omp_get_thread_num(), omp_get_num_threads() );
foo();
bar();
}
fflush(stdout);
printf("=========================\n");
fflush(stdout);
#pragma omp parallel
{
printf("main parallel 2 from %d of %d\n", omp_get_thread_num(), omp_get_num_threads() );
}
foo();
bar();
fflush(stdout);
return 0;
}
|
dynamic_fmt.c
|
/*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2009-2013. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2009-2013 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
* Generic 'scriptable' hash cracker for JtR
*
* Renamed and changed from md5_gen* to dynamic*. We handle MD5 and SHA1
* at the present time. More crypt types 'may' be added later.
* Added SHA2 (SHA224, SHA256, SHA384, SHA512), GOST, Whirlpool crypt types.
* Whirlpool use oSSSL if OPENSSL_VERSION_NUMBER >= 0x10000000, otherwise use sph_* code.
*
* There used to be a todo list, and other commenting here. It has been
* moved to ./docs/dynamic_history.txt
*
* KNOWN issues, and things to do.
*
* 1. create a new optimize flag, MGF_PASS_AFTER_FIXEDSALT and
* MGF_PASS_BEFORE_FIXEDSALT. Then create DynamicFunc__appendsalt_after_pass[12]
* These would only be valid for a FIXED length salted format. Then
* we can write the pass right into the buffer, and get_key() would read
* it back from there, either skipping over the salt, or removing the salt
* from the end. This would allow crypt($s.$p) and crypt($p.s) to be optimized
* in the way of string loading, and many fewer buffer copies. So dyna_1 could
* be optimized to something like:
// dynamic_1 Joomla md5($p.$s)
static DYNAMIC_primitive_funcp _Funcs_1[] =
{
//Flags=MGF_PASS_BEFORE_FIXEDSALT | MGF_SALTED
// saltlen=3 (or whatever). This fixed size is 'key'
DynamicFunc__appendsalt_after_pass1,
DynamicFunc__crypt_md5,
NULL
};
* WELL, the fixed size salt, it 'may' not be key for the MGF_PASS_BEFORE_FIXEDSALT,
* I think I can make that 'work' for variable sized salts. But for the
* MGF_PASS_AFTER_FIXEDSALT, i.e. crypt($s.$p) the fixed size salt IS key. I would
* like to store all PW's at salt_len offset in the buffer, and simply overwrite the
* first part of each buffer with the salt, never moving the password after the first
* time it is written. THEN it is very important this ONLY be allowed when we KNOW
* the salt length ahead of time.
*
* 2. Change regen-salts to be generic. Add the logic to dynamic_fmt.c proper, and change
* the fake-salts.c, and options so that 'generic' regen-salts can be done.
*/
#include <string.h>
#include <time.h>
#if AC_BUILT
#include "autoconfig.h"
#endif
#include "arch.h"
#if !FAST_FORMATS_OMP
#ifdef _OPENMP
# define FORCE_THREAD_MD5_body
#endif
#undef _OPENMP
#endif
#ifndef DYNAMIC_DISABLED
#ifdef SIMD_COEF_32
#include "simd-intrinsics.h"
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "md5.h"
#include "md4.h"
#include "dynamic.h"
#include "options.h"
#include "config.h"
#include "sha.h"
#include "sha2.h"
#include "gost.h"
#include "sph_haval.h"
#include "sph_ripemd.h"
#include "sph_tiger.h"
#include "sph_md2.h"
#include "sph_panama.h"
#include "sph_skein.h"
#include "sph_whirlpool.h"
#include "memory.h"
#include "unicode.h"
#include "johnswap.h"
#include "crc32.h"
#include "aligned.h"
#include "fake_salts.h"
#include "base64_convert.h"
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
#include <openssl/whrlpool.h>
#else
// on my 32 bit cygwin builds, this code is about 4x slower than the oSSL code.
#define WHIRLPOOL_CTX sph_whirlpool_context
#define WHIRLPOOL_Init(a) sph_whirlpool_init(a)
#define WHIRLPOOL_Update(a,b,c) sph_whirlpool(a,b,c)
#define WHIRLPOOL_Final(a,b) sph_whirlpool_close(b,a)
#endif
#include "KeccakHash.h"
#define KECCAK_CTX Keccak_HashInstance
#define KECCAK_Update(a,b,c) Keccak_HashUpdate(a,b,(c)*8)
#define KECCAK_Final(a,b) Keccak_HashFinal(b,a)
#define KECCAK_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x01)
#define KECCAK_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x01)
// FIPS202 complient
#define SHA3_224_Init(hash) Keccak_HashInitialize(hash, 1152, 448, 224, 0x06)
#define SHA3_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x06)
#define SHA3_384_Init(hash) Keccak_HashInitialize(hash, 832, 768, 384, 0x06)
#define SHA3_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x06)
#ifdef _OPENMP
#include <omp.h>
static unsigned int m_ompt;
#endif
#include "dynamic_types.h"
#include "memdbg.h"
#if (defined (_OPENMP)||defined(FORCE_THREAD_MD5_body)) && defined (_MSC_VER)
unsigned DES_bs_max_kpc, DES_bs_min_kpc, DES_bs_all_p;
#undef MD5_body
extern void MD5_body(MD5_word x[15],MD5_word out[4]);
#endif
#define STRINGIZE2(s) #s
#define STRINGIZE(s) STRINGIZE2(s)
static struct fmt_main fmt_Dynamic;
static struct fmt_main *pFmts;
static int nFmts;
static int nLocalFmts;
static struct fmt_main *pLocalFmts;
static int force_md5_ctx;
static void dynamic_RESET(struct fmt_main *fmt);
#define eLargeOut dyna_eLargeOut
eLargeOut_t *eLargeOut;
#define nLargeOff dyna_nLargeOff
unsigned *nLargeOff;
#if ARCH_LITTLE_ENDIAN
#define MD5_swap(x, y, count)
#define MD5_swap2(a,b,c,d,e)
#else
extern char *MD5_DumpHexStr(void *p);
static void MD5_swap(MD5_word *x, MD5_word *y, int count)
{
do {
*y++ = JOHNSWAP(*x++);
} while (--count);
}
#if MD5_X2
static void MD5_swap2(MD5_word *x, MD5_word *x2, MD5_word *y, MD5_word *y2, int count)
{
do {
*y++ = JOHNSWAP(*x++);
*y2++ = JOHNSWAP(*x2++);
} while (--count);
}
#endif
#endif
#define FORMAT_LABEL "dynamic"
#define FORMAT_NAME "Generic MD5"
#ifdef SIMD_COEF_32
# define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + ((i)&3) )
# define SHAGETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) ) //for endianity conversion
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define BINARY_SIZE_SHA 20
#define BINARY_ALIGN MEM_ALIGN_WORD
// Computation for 'salt_size' The salt (and salt2) is appended to the end of the hash entry.
// The format of a salted entry is: $dynamic_#$hash$SALT_VAL[$$2SALT2_VAL]
// salt 64 bytes,
// salt2 64 bytes,
// salt signature $ 1 byte
// salt2 signature $$2 3 bytes
// null termination 1 byte. This this allows 2 64 byte salt's.
// Note, we now have up to 10 of these.
#define SALT_SIZE (64*4+1+3+1)
#define SALT_ALIGN MEM_ALIGN_WORD
// slots to do 24 'tests'. Note, we copy the
// same 3 tests over and over again. Simply to validate that
// tests use 'multiple' blocks.
static struct fmt_tests dynamic_tests[] = {
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL}
};
#ifdef SIMD_COEF_32
// SSE2 works only with 54 byte keys. Thus, md5(md5($p).md5($s)) can NOT be used
// with the SSE2, since that final md5 will be over a 64 byte block of data.
static union SIMD_inpup {
ARCH_WORD_32 w[(64*SIMD_COEF_32)/sizeof(ARCH_WORD_32)];
unsigned char c[64*SIMD_COEF_32];
} *input_buf, *input_buf2;
static union SIMD_crypt {
ARCH_WORD_32 w[(BINARY_SIZE*SIMD_COEF_32)/sizeof(ARCH_WORD_32)];
unsigned char c[BINARY_SIZE*SIMD_COEF_32];
} *crypt_key, *crypt_key2;
static unsigned int (*total_len)[SIMD_COEF_32];
static unsigned int (*total_len2)[SIMD_COEF_32];
#define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS)
#define MMX_INP_BUF2_SZ (sizeof(input_buf2[0])*BLOCK_LOOPS)
#define MMX_TOT_LEN_SZ (sizeof(*total_len) *BLOCK_LOOPS)
#define MMX_TOT_LEN2_SZ (sizeof(*total_len2)*BLOCK_LOOPS)
#define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS)
#define MMX_CRYPT_KEY_SZ (sizeof(crypt_key[0]) *BLOCK_LOOPS+sizeof(crypt_key[0]))
#define MMX_CRYPT_KEY2_SZ (sizeof(crypt_key2[0])*BLOCK_LOOPS)
#endif
#define FLAT_INP_BUF_SZ (sizeof(MD5_IN)*(MAX_KEYS_PER_CRYPT_X86>>MD5_X2))
#define FLAT_TOT_LEN_SZ (sizeof(unsigned int)*(MAX_KEYS_PER_CRYPT_X86))
MD5_OUT *crypt_key_X86;
MD5_OUT *crypt_key2_X86;
MD5_IN *input_buf_X86;
MD5_IN *input_buf2_X86;
unsigned int *total_len_X86;
unsigned int *total_len2_X86;
BIG_HASH_OUT dynamic_BHO[4];
static int keys_dirty;
// We store the salt here
static unsigned char *cursalt;
// length of salt (so we don't have to call strlen() all the time.
static int saltlen;
int get_dynamic_fmt_saltlen() { return saltlen; }
// This array is for the 2nd salt in the hash. I know of no hashes with double salts,
// but test type dynamic_16 (which is 'fake') has 2 salts, and this is the data/code to
// handle double salts.
static unsigned char *cursalt2;
static int saltlen2;
static unsigned char *username;
static int usernamelen;
static unsigned char *flds[10];
static int fld_lens[10];
const char *dynamic_itoa16 = itoa16;
#if !defined (_DEBUG)
#define itoa16_w2 __Dynamic_itoa_w2
#define itoa16_w2_u __Dynamic_itoa_w2_u
#define itoa16_w2_l __Dynamic_itoa_w2_l
#endif
unsigned short itoa16_w2_u[256], itoa16_w2_l[256];
unsigned short *itoa16_w2=itoa16_w2_l;
// array of the keys. Also lengths of the keys. NOTE if store_keys_in_input, then the
// key array will NOT be used (but the length array still is).
#ifndef MAX_KEYS_PER_CRYPT
#define MAX_KEYS_PER_CRYPT MAX_KEYS_PER_CRYPT_X86
#endif
#ifndef PLAINTEXT_LENGTH
#define PLAINTEXT_LENGTH PLAINTEXT_LENGTH_X86
#endif
#define EFFECTIVE_MKPC (MAX_KEYS_PER_CRYPT > MAX_KEYS_PER_CRYPT_X86 ? MAX_KEYS_PER_CRYPT : MAX_KEYS_PER_CRYPT_X86)
#define EFFECTIVE_MAX_LENGTH (PLAINTEXT_LENGTH > PLAINTEXT_LENGTH_X86 ? PLAINTEXT_LENGTH : PLAINTEXT_LENGTH_X86)
// Used to compute length of each string to clean. This is needed, since we have to clean a little more than
// just the length, IF we are cleaning strings that are in different endianity than native for the CPU.
// This is seen on SHA224 (etc) on Intel, or MD5 of BE systems. We still try to clean 'only' as much as
// we need to, but that is usually MORE than what the length of the stored string is. 8 gives us 7 byte spill
// over, plus 1 byte for the 0x80
#define COMPUTE_EX_LEN(a) ( (a) > (sizeof(input_buf_X86[0].x1.b)-8) ) ? sizeof(input_buf_X86[0].x1.b) : ((a)+8)
// this new 'ENCODED_EFFECTIVE_MAX_LENGTH' needed, since we grab up to 125 bytes of data WHEN in -encode:utf8 mode for a unicode format.
#define ENCODED_EFFECTIVE_MAX_LENGTH (EFFECTIVE_MAX_LENGTH > 125 ? EFFECTIVE_MAX_LENGTH : 125)
static char saved_key[EFFECTIVE_MKPC][ENCODED_EFFECTIVE_MAX_LENGTH + 1];
static int saved_key_len[EFFECTIVE_MKPC];
// this is the max generic location we should target. This keeps us from having blown MD buffers or overwrite
// when in utf8->utf16 mode, where we are handling data that likely is larger than we should handle. We have to
// handle this larger data, so that we get as many strings with 1 byte utf8 that would convert to data that would
// blow our buffers. But we want as many as possible for the 2 and 3 byte utf data.
#define MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE (256-17)
// Used in 'get_key' if we are running in store_keys_in_input mode
static char out[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
// This is the GLOBAL count of keys. ALL of the primitives which deal with a count
// will read from this variable.
#if !defined (_DEBUG)
#define m_count m_Dynamic_Count
#endif
unsigned int m_count;
// If we are run in 'specific' mode (say, -format=dynamic -subformat=dynamic_0, then we
// want to 'allow' bare hashes to be 'valid'. This is how we will do this. We have a boolean
// that if set to true, we will perform a 1 time check within the valid function. If at
// that time we find out that we are cracking (or showing, etc) that we will accept lines
// that are either format of $dynamic_0$hhhhhh...32 or simply in the format of hhhhhhh..32
int dynamic_allow_rawhash_fixup = 0;
// this one IS in the private_dat, but since it is accessed SO much, we pull it
// out prior to 'internal' processing. The others are accessed right from
// the structure, since there are accessed infrequently enough to not matter.
static int dynamic_use_sse;
// If set to 1, then do unicode conversion is many string setting functions.
static int *md5_unicode_convert;
#if !defined (_DEBUG)
#define curdat Dynamic_curdat
#endif
private_subformat_data curdat;
// Helper function that loads out 256 unsigned short array that does base-16 conversions
// This function is called at the 'validation' call that loads our preloads (i.e. only
// called one time, pre 'run' (but will be called multiple times when benchmarking, but
// will NOT impact benchmark times.) Loading a word at a time (2 bytes), sped up
// the overall run time of dynamic_2 almost 5%, thus this conversion is MUCH faster than
// the fastest byte by byte I could put together. I tested several ways to access this
// array of unsigned shorts, and the best way was a 2 step method into an array of long
// integer pointers (thus, load 1/2 the 32 bit word, then the other 1/2, into a 32 bit word).
/*********************************************************************************
*********************************************************************************
* Start of the 'normal' *_fmt code for md5-gen
*********************************************************************************
*********************************************************************************/
char *RemoveHEX(char *output, char *input)
{
char *cpi = input;
char *cpo = output;
char *cpH = strstr(input, "$HEX$");
if (!cpH) {
// should never get here, we have a check performed before this function is called.
strcpy(output, input);
return output;
}
while (cpi < cpH)
*cpo++ = *cpi++;
*cpo++ = *cpi;
cpi += 5;
while (*cpi) {
if (*cpi == '0' && cpi[1] == '0') {
strcpy(output, input);
return output;
}
if (atoi16[ARCH_INDEX(*cpi)] != 0x7f && atoi16[ARCH_INDEX(cpi[1])] != 0x7f) {
*cpo++ = atoi16[ARCH_INDEX(*cpi)]*16 + atoi16[ARCH_INDEX(cpi[1])];
cpi += 2;
} else if (*cpi == '$') {
while (*cpi && strncmp(cpi, "$HEX$", 5)) {
*cpo++ = *cpi++;
}
if (!strncmp(cpi, "$HEX$", 5)) {
*cpo++ = *cpi;
cpi += 5;
}
} else {
strcpy(output, input);
return output;
}
}
*cpo = 0;
return output;
}
/*********************************************************************************
* Detects a 'valid' md5-gen format. This function is NOT locked to anything. It
* takes its detection logic from the provided fmt_main pointer. Within there,
* is a 'private' data pointer. When john first loads the md5-gen, it calls a
* function which builds proper 'private' data for EACH type of md5-gen. Then
* john will call valid on EACH of those formats, asking each one if a string is
* valid. Each format has a 'private' properly setup data object.
*********************************************************************************/
static int valid(char *ciphertext, struct fmt_main *pFmt)
{
unsigned int i, cipherTextLen;
char *cp, fixed_ciphertext[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (!pPriv)
return 0;
if (strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG)))
return 0;
// this is now simply REMOVED totally, if we detect it. Doing this solves MANY other problems
// of leaving it in there. The ONLY problem we still have is NULL bytes.
if (strstr(ciphertext, "$HEX$")) {
if (strlen(ciphertext) < sizeof(fixed_ciphertext))
ciphertext = RemoveHEX(fixed_ciphertext, ciphertext);
}
cp = &ciphertext[strlen(pPriv->dynamic_WHICH_TYPE_SIG)];
if (pPriv->dynamic_base64_inout == 1 || pPriv->dynamic_base64_inout == 3 || pPriv->dynamic_base64_inout == 5)
{
// jgypwqm.JsMssPLiS8YQ00$BaaaaaSX
unsigned int len;
len = base64_valid_length(cp, pPriv->dynamic_base64_inout==3?e_b64_mime:e_b64_crypt, flg_Base64_MIME_TRAIL_EQ_CNT, 0);
if (len < 20 || len > pPriv->dynamic_SALT_OFFSET+4) return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE == 0)
return !cp[len];
if (pPriv->dynamic_FIXED_SALT_SIZE && cp[len] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[len+1]) != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[len+1]) > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
return 1;
}
if (pPriv->dynamic_base64_inout == 2)
{
// h3mJrcH0901pqX/m$alex
unsigned int i;
for (i = 0; i < 16; ++i) {
if (atoi64[ARCH_INDEX(cp[i])] == 0x7F)
return 0;
}
if (pPriv->dynamic_FIXED_SALT_SIZE == 0)
return !cp[i];
if (pPriv->dynamic_FIXED_SALT_SIZE && cp[16] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[17]) != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[17]) > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
if (strlen(cp) < 16)
return 0;
return 1;
}
if (strlen(cp) < 32)
return 0;
cipherTextLen = CIPHERTEXT_LENGTH;
if (pPriv->dynamic_40_byte_input) {
cipherTextLen = 40;
} else if (pPriv->dynamic_48_byte_input) {
cipherTextLen = 48;
} else if (pPriv->dynamic_64_byte_input) {
cipherTextLen = 64;
} else if (pPriv->dynamic_56_byte_input) {
cipherTextLen = 56;
} else if (pPriv->dynamic_80_byte_input) {
cipherTextLen = 80;
} else if (pPriv->dynamic_96_byte_input) {
cipherTextLen = 96;
} else if (pPriv->dynamic_128_byte_input) {
cipherTextLen = 128;
}
for (i = 0; i < cipherTextLen; i++) {
if (atoi16[ARCH_INDEX(cp[i])] == 0x7f)
return 0;
}
if ((pPriv->pSetup->flags&MGF_SALTED) == 0) {
if (!cp[cipherTextLen])
return 1;
return 0;
}
if (cp[cipherTextLen] && cp[cipherTextLen] != '$')
return 0;
// NOTE if looking at this in the future, this was not my fix.
if (strlen(&cp[cipherTextLen]) > SALT_SIZE)
return 0;
// end NOTE.
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && ciphertext[pPriv->dynamic_SALT_OFFSET-1] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) != pPriv->dynamic_FIXED_SALT_SIZE) {
// first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then
// validate length with this in mind.
if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) {
int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]);
len = (len-4)>>1;
if (len != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
} else {
// check if there is a 'salt-2' or 'username', etc If that is the case, then this is still valid.
if (strncmp(&ciphertext[pPriv->dynamic_SALT_OFFSET+pPriv->dynamic_FIXED_SALT_SIZE], "$$", 2))
return 0;
}
}
else if (!regen_salts_options && pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) {
char *cpX;
// first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then
// validate length with this in mind.
if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) {
int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]);
len = (len-4)>>1;
if (len > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
} else {
// check if there is a 'salt-2' or 'username', etc If that is the case, then this is still 'valid'
cpX = mem_alloc(-(pPriv->dynamic_FIXED_SALT_SIZE) + 3);
strnzcpy(cpX, &ciphertext[pPriv->dynamic_SALT_OFFSET], -(pPriv->dynamic_FIXED_SALT_SIZE) + 3);
if (!strstr(cpX, "$$")) {
MEM_FREE(cpX);
return 0;
}
MEM_FREE(cpX);
}
}
if (pPriv->b2Salts==1 && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$2"))
return 0;
if (pPriv->nUserName && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$U"))
return 0;
if (pPriv->FldMask) {
for (i = 0; i < 10; ++i) {
if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) {
char Fld[5];
sprintf(Fld, "$$F%d", i);
if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld))
return 0;
}
}
}
return 1;
}
static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv);
static struct fmt_main *dynamic_Get_fmt_main(int which);
static char *HandleCase(char *cp, int caseType);
// 'wrapper' functions. These are here, so we can call these functions to work on ALL data (not simply within the
// thead, which ONLY wants to work on a subset of the data. These functions should NOT be called by threading
// code, EVER. But this functions KNOW what to do. Some actually have threads, others do not need them.
#ifdef _OPENMP
#ifndef SIMD_COEF_32
const unsigned int OMP_INC = (MD5_X2+1);
const unsigned int OMP_MD5_INC = (MD5_X2+1);
const unsigned int OMP_MD4_INC = (MD5_X2+1);
const unsigned int OMP_SHA1_INC = (MD5_X2+1);
#else
const unsigned int OMP_INC = (MD5_X2+1);
const unsigned int OMP_MD5_INC = (SIMD_PARA_MD5*SIMD_COEF_32);
const unsigned int OMP_MD4_INC = (SIMD_PARA_MD4*SIMD_COEF_32);
const unsigned int OMP_SHA1_INC = (SIMD_PARA_SHA1*SIMD_COEF_32);
#endif // SIMD_COEF_32
#endif // _OPENMP
static inline void __nonMP_DynamicFunc__SSEtoX86_switch_output2()
{
#ifdef _OPENMP
DynamicFunc__SSEtoX86_switch_output2(0,m_count,0);
#else
DynamicFunc__SSEtoX86_switch_output2();
#endif
}
static inline void __nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16()
{
#ifdef _OPENMP
DynamicFunc__append_from_last_output2_to_input1_as_base16(0,m_count,0);
#else
DynamicFunc__append_from_last_output2_to_input1_as_base16();
#endif
}
void __nonMP_eLargeOut(eLargeOut_t what)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
eLargeOut[i] = what;
#endif
eLargeOut[0] = what;
}
void __nonMP_nLargeOff(unsigned val)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
nLargeOff[i] = val;
#endif
nLargeOff[0] = val;
}
static inline void md5_unicode_convert_set(int what, int tid)
{
md5_unicode_convert[tid] = what;
}
static inline int md5_unicode_convert_get(int tid)
{
return md5_unicode_convert[tid];
}
void __nonMP_md5_unicode_convert(int what)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
md5_unicode_convert[i] = what;
#endif
md5_unicode_convert[0] = what;
}
#if !defined (_OPENMP)
#define md5_unicode_convert_set(what, tid) md5_unicode_convert_set(what, 0)
#define md5_unicode_convert_get(tid) md5_unicode_convert_get(0)
#define eLargeOut_set(what, tid) eLargeOut_set(what, 0)
#define eLargeOut_get(tid) eLargeOut_get(0)
#define nLargeOff_set(val, tid) nLargeOff_set(val, 0)
#define nLargeOff_get(tid) nLargeOff_get(0)
#endif
static inline void __nonMP_DynamicFunc__append_keys2()
{
#ifdef _OPENMP
DynamicFunc__append_keys2(0,m_count,0);
#else
DynamicFunc__append_keys2();
#endif
}
static void __possMP_DynamicFunc__crypt2_md5()
{
#ifdef _OPENMP
int i;
unsigned int inc = OMP_MD5_INC;
// if (dynamic_use_sse!=1)
// inc = OMP_INC;
#pragma omp parallel for
for (i = 0; i < m_count; i += inc)
DynamicFunc__crypt2_md5(i,i+inc,omp_get_thread_num());
#else
DynamicFunc__crypt2_md5();
#endif
}
static void __nonMP_DynamicFunc__clean_input()
{
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(input_buf, 0, MMX_INP_BUF_SZ);
memset(total_len, 0, MMX_TOT_LEN_SZ);
return;
}
#endif
for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {
//if (total_len_X86[i]) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
//}
}
return;
}
static void __nonMP_DynamicFunc__clean_input2()
{
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(input_buf2, 0, MMX_INP_BUF2_SZ);
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
return;
}
#endif
if (curdat.using_flat_buffers_sse2_ok) {
memset(total_len2_X86, 0, sizeof(total_len2_X86[0])*MAX_KEYS_PER_CRYPT_X86);
return;
}
for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {
//if (total_len2_X86[i]) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
//}
}
return;
}
static void __nonMP_DynamicFunc__clean_input_full()
{
#ifdef SIMD_COEF_32
memset(input_buf, 0, MMX_INP_BUF_SZ);
memset(total_len, 0, MMX_TOT_LEN_SZ);
#endif
memset(input_buf_X86, 0, FLAT_INP_BUF_SZ);
memset(total_len_X86, 0, FLAT_TOT_LEN_SZ);
}
static void __nonMP_DynamicFunc__clean_input2_full()
{
#ifdef SIMD_COEF_32
memset(input_buf2, 0, MMX_INP_BUF2_SZ);
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
#endif
memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ);
memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ);
}
static void __nonMP_DynamicFunc__clean_input_kwik()
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(total_len, 0, MMX_TOT_LEN_SZ);
return;
}
#endif
memset(total_len_X86, 0, FLAT_TOT_LEN_SZ);
#if !ARCH_LITTLE_ENDIAN
memset(input_buf_X86, 0, FLAT_INP_BUF_SZ);
#endif
}
#ifndef _OPENMP
static void __nonMP_DynamicFunc__clean_input2_kwik()
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
return;
}
#endif
memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ);
#if !ARCH_LITTLE_ENDIAN
memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ);
#endif
}
#endif
/*********************************************************************************
* init() here does nothing. NOTE many formats LINKING into us will have a valid
* that DOES do something, but ours does nothing.
*********************************************************************************/
static void init(struct fmt_main *pFmt)
{
private_subformat_data *pPriv = pFmt->private.data;
unsigned int i;
//fprintf(stderr, "init(%s)\n", pPriv->dynamic_WHICH_TYPE_SIG);
/* first off, SAVE the original format structure (owned by JtR). We may need this later */
pPriv->pFmtMain = pFmt;
#ifdef _OPENMP
m_ompt = omp_get_max_threads();
if (!md5_unicode_convert) {
md5_unicode_convert = (int*)mem_calloc(m_ompt, sizeof(int));
eLargeOut = (eLargeOut_t*)mem_calloc(m_ompt, sizeof(eLargeOut_t));
nLargeOff = (unsigned*)mem_calloc(m_ompt, sizeof(unsigned));
for (i = 0; i < m_ompt; ++i) {
eLargeOut[i] = eBase16;
nLargeOff[i] = 0;
}
}
#else
if (!md5_unicode_convert) {
md5_unicode_convert = (int*)mem_calloc(1, sizeof(int));
eLargeOut = (eLargeOut_t*)mem_calloc(1, sizeof(eLargeOut_t));
eLargeOut[0] = eBase16;
nLargeOff = (unsigned*)mem_calloc(1, sizeof(unsigned));
nLargeOff[0] = 0;
}
#endif
#ifdef SIMD_COEF_32
if (!input_buf) {
input_buf = mem_calloc_align(1, MMX_INP_BUF_SZ, MEM_ALIGN_SIMD);
total_len = mem_calloc_align(1, MMX_TOT_LEN_SZ, MEM_ALIGN_SIMD);
total_len2 = mem_calloc_align(1, MMX_TOT_LEN2_SZ, MEM_ALIGN_SIMD);
input_buf2 = mem_calloc_align(1, MMX_INP_BUF2_SZ, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(1, MMX_CRYPT_KEY_SZ, MEM_ALIGN_SIMD);
crypt_key2 = mem_calloc_align(1, MMX_CRYPT_KEY2_SZ, MEM_ALIGN_SIMD);
}
#endif
if (!crypt_key_X86) {
crypt_key_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key_X86));
crypt_key2_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key2_X86));
input_buf_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf_X86));
input_buf2_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf2_X86));
total_len_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len_X86));
total_len2_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len2_X86));
}
for (i = 0; i < 4; ++i)
dynamic_BHO[i].dat = mem_calloc_align(BLOCK_LOOPS, sizeof(*(dynamic_BHO[0].dat)), MEM_ALIGN_SIMD);
gost_init_table();
if (!pPriv || (pPriv->init == 1 && !strcmp(curdat.dynamic_WHICH_TYPE_SIG, pPriv->dynamic_WHICH_TYPE_SIG)))
return;
__nonMP_DynamicFunc__clean_input_full();
__nonMP_DynamicFunc__clean_input2_full();
// Some builds (omp vs non omp, etc) do not call these functions, so to avoid 'unused' warnings, we simply
// call them here.
__nonMP_DynamicFunc__clean_input_kwik();
dynamic_RESET(pFmt);
if (!pPriv)
return;
pPriv->init = 1;
memcpy(&curdat, pPriv, sizeof(private_subformat_data));
dynamic_use_sse = curdat.dynamic_use_sse;
force_md5_ctx = curdat.force_md5_ctx;
fmt_Dynamic.params.max_keys_per_crypt = pFmt->params.max_keys_per_crypt;
fmt_Dynamic.params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
fmt_Dynamic.params.flags = pFmt->params.flags;
fmt_Dynamic.params.format_name = pFmt->params.format_name;
fmt_Dynamic.params.algorithm_name = pFmt->params.algorithm_name;
fmt_Dynamic.params.benchmark_comment = pFmt->params.benchmark_comment;
fmt_Dynamic.params.benchmark_length = pFmt->params.benchmark_length;
// we allow for 3 bytes of utf8 data to make up the number of plaintext_length unicode chars.
if ( (pFmt->params.flags&FMT_UNICODE) && options.target_enc == UTF_8 ) {
//printf ("Here pFmt->params.plaintext_length=%d pPriv->pSetup->MaxInputLen=%d\n", pFmt->params.plaintext_length, pPriv->pSetup->MaxInputLen);
pFmt->params.plaintext_length = MIN(125, pFmt->params.plaintext_length * 3);
}
else
fmt_Dynamic.params.plaintext_length = pFmt->params.plaintext_length;
fmt_Dynamic.params.salt_size = pFmt->params.salt_size;
fmt_Dynamic.params.flags = pFmt->params.flags;
fmt_Dynamic.methods.cmp_all = pFmt->methods.cmp_all;
fmt_Dynamic.methods.cmp_one = pFmt->methods.cmp_one;
fmt_Dynamic.methods.cmp_exact = pFmt->methods.cmp_exact;
fmt_Dynamic.methods.set_salt = pFmt->methods.set_salt;
fmt_Dynamic.methods.salt = pFmt->methods.salt;
fmt_Dynamic.methods.salt_hash = pFmt->methods.salt_hash;
fmt_Dynamic.methods.split = pFmt->methods.split;
fmt_Dynamic.methods.set_key = pFmt->methods.set_key;
fmt_Dynamic.methods.get_key = pFmt->methods.get_key;
fmt_Dynamic.methods.clear_keys = pFmt->methods.clear_keys;
fmt_Dynamic.methods.crypt_all = pFmt->methods.crypt_all;
for (i = 0; i < PASSWORD_HASH_SIZES; ++i)
{
fmt_Dynamic.methods.binary_hash[i] = pFmt->methods.binary_hash[i];
fmt_Dynamic.methods.get_hash[i] = pFmt->methods.get_hash[i];
}
#if !MD5_IMM
{
extern void MD5_std_init(struct fmt_main *pFmt);
MD5_std_init(pFmt);
}
#endif
if (curdat.input2_set_len32) {
for (i = 0; i < MAX_KEYS_PER_CRYPT_X86; ++i)
total_len2_X86[i] = 32;
#ifdef SIMD_COEF_32
for (i = 0; i < BLOCK_LOOPS; ++i) {
unsigned int j;
for (j = 0; j < SIMD_COEF_32; j++) {
input_buf2[i].c[GETPOS(32, j)] = 0x80;
input_buf2[i].c[GETPOS(57, j)] = 0x1;
total_len2[i][j] = 0x20;
}
}
#endif
}
}
static void done(void)
{
int i;
MEM_FREE(total_len2_X86);
MEM_FREE(total_len_X86);
MEM_FREE(input_buf2_X86);
MEM_FREE(input_buf_X86);
MEM_FREE(crypt_key2_X86);
MEM_FREE(crypt_key_X86);
#ifdef SIMD_COEF_32
MEM_FREE(crypt_key2);
MEM_FREE(crypt_key);
MEM_FREE(input_buf2);
MEM_FREE(total_len2);
MEM_FREE(total_len);
MEM_FREE(input_buf);
#endif
MEM_FREE(nLargeOff);
MEM_FREE(eLargeOut);
MEM_FREE(md5_unicode_convert);
for (i = 0; i < 4; ++i)
MEM_FREE(dynamic_BHO[i].dat);
}
/*********************************************************************************
* This function will add a $dynamic_#$ IF there is not one, and if we have a specific
* format requested. Also, it will add things like UserID, Domain, Fld3, Fld4,
* Fld5, etc.
*********************************************************************************/
static char *prepare(char *split_fields[10], struct fmt_main *pFmt)
{
private_subformat_data *pPriv = pFmt->private.data;
char Tmp[80];
int i;
int trim_u=0;
char *cpBuilding=split_fields[1];
if (!pPriv)
return split_fields[1];
// ANY field[1] longer than 490 will simply be ignored, and returned 'as is'.
// the rest of this function makes this assumption.
if (!cpBuilding || strlen(cpBuilding) > 490)
return cpBuilding;
// mime. We want to strip off ALL trailing '=' characters to 'normalize' them
if (pPriv->dynamic_base64_inout == 3 && !strncmp(cpBuilding, "$dynamic_", 9))
{
static char ct[496];
int len;
char *cp = strchr(&cpBuilding[9], '$'), *cp2;
if (!cp) return cpBuilding;
++cp;
len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0);
if (len && cp[len-1] == '=') {
strnzcpy(ct, cpBuilding, cp-cpBuilding+len+1);
cp2 = &ct[strlen(ct)-1];
while (*cp2 == '=')
*cp2-- = 0;
if (cp[len])
strcat(cp2, &cp[len]);
cpBuilding = ct;
}
}
if (pFmt->params.salt_size && !strchr(split_fields[1], '$')) {
if (!pPriv->nUserName && !pPriv->FldMask && options.regen_lost_salts == 0)
return split_fields[1];
}
// handle 'older' md5_gen(x) signature, by simply converting to $dynamic_x$ signature
// Thus older md5_gen() is a valid input (or from john.pot), but ONLY the newer
// $dynamic_x$ will be written out (into .pot, output lines, etc).
if (!strncmp(cpBuilding, "md5_gen(", 8))
{
static char ct[496];
char *cp = &cpBuilding[8], *cpo = &ct[sprintf(ct, "$dynamic_")];
while (*cp >= '0' && *cp <= '9')
*cpo++ = *cp++;
*cpo++ = '$';
++cp;
strcpy(cpo, cp);
cpBuilding = ct;
}
// At this point, max length of cpBuilding is 491 (if it was a md5_gen signature)
// allow a raw hash, if there is a $u but no salt
if (pPriv->nUserName && strlen(split_fields[0]) && !strchr(cpBuilding, '$') && strcmp(split_fields[0], "?")) {
static char ct[496];
strcpy(ct, cpBuilding);
strcat(ct, "$$U");
cpBuilding = ct;
trim_u=1;
}
cpBuilding = FixupIfNeeded(cpBuilding, pPriv);
if (trim_u)
cpBuilding[strlen(cpBuilding)-3] = 0;
// at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506
if (strncmp(cpBuilding, "$dynamic_", 9)) {
// ok, here we add the 'generic' regen salt code
if (options.regen_lost_salts && !strchr(cpBuilding, '$')) {
char *cp = load_regen_lost_salt_Prepare(cpBuilding);
if (cp)
return cp;
}
return split_fields[1];
}
if ( (pPriv->pSetup->flags&MGF_SALTED) == 0)
return cpBuilding;
/* at this point, we want to convert ANY and all $HEX$hex into values */
/* the reason we want to do this, is so that things read from john.pot file will be in proper 'native' format */
/* the ONE exception to this, is if there is a NULL byte in the $HEX$ string, then we MUST leave that $HEX$ string */
/* alone, and let the later calls in dynamic.c handle them. */
if (strstr(cpBuilding, "$HEX$")) {
char *cp, *cpo;
int bGood=1;
static char ct[512];
strcpy(ct, cpBuilding);
cp = strstr(ct, "$HEX$");
cpo = cp;
*cpo++ = *cp;
cp += 5;
while (*cp && bGood) {
if (*cp == '0' && cp[1] == '0') {
bGood = 0;
break;
}
if (atoi16[ARCH_INDEX(*cp)] != 0x7f && atoi16[ARCH_INDEX(cp[1])] != 0x7f) {
*cpo++ = atoi16[ARCH_INDEX(*cp)]*16 + atoi16[ARCH_INDEX(cp[1])];
*cpo = 0;
cp += 2;
} else if (*cp == '$') {
while (*cp && strncmp(cp, "$HEX$", 5)) {
*cpo++ = *cp++;
}
*cpo = 0;
if (!strncmp(cp, "$HEX$", 5)) {
*cpo++ = *cp;
cp += 5;
}
} else {
return split_fields[1];
}
}
if (bGood)
cpBuilding = ct;
// if we came into $HEX$ removal, then cpBuilding will always be shorter
}
// at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506
if (pPriv->nUserName && !strstr(cpBuilding, "$$U")) {
if (split_fields[0] && strlen(split_fields[0]) && strcmp(split_fields[0], "?")) {
char *userName=split_fields[0], *cp;
static char ct[1024];
// assume field[0] is in format: username OR DOMAIN\\username If we find a \\, then use the username 'following' it.
cp = strchr(split_fields[0], '\\');
if (cp)
userName = &cp[1];
userName = HandleCase(userName, pPriv->nUserName);
snprintf (ct, sizeof(ct), "%s$$U%s", cpBuilding, userName);
cpBuilding = ct;
}
}
if (pPriv->FldMask) {
for (i = 0; i < 10; ++i) {
if (pPriv->FldMask&(MGF_FLDx_BIT<<i)) {
sprintf(Tmp, "$$F%d", i);
if (split_fields[i] && strlen(split_fields[i]) && strcmp(split_fields[i], "/") && !strstr(cpBuilding, Tmp)) {
static char ct[1024];
char ct2[1024];
snprintf (ct2, sizeof(ct2), "%s$$F%d%s", cpBuilding, i, split_fields[i]);
strcpy(ct, ct2);
cpBuilding = ct;
}
}
}
}
return cpBuilding;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (strlen(ciphertext) > 950)
return ciphertext;
// mime. We want to strip off ALL trailing '=' characters to 'normalize' them
if (pPriv->dynamic_base64_inout == 3 && !strncmp(ciphertext, "$dynamic_", 9))
{
static char ct[496];
unsigned int len;
char *cp = strchr(&ciphertext[9], '$'), *cp2;
if (cp) {
++cp;
len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0);
if (len && cp[len-1] == '=') {
strnzcpy(ct, ciphertext, cp-ciphertext+len+1);
cp2 = &ct[strlen(ct)-1];
while (*cp2 == '=')
*cp2-- = 0;
if (cp[len])
strcat(cp2, &cp[len]);
ciphertext = ct;
}
}
}
if (!strncmp(ciphertext, "$dynamic", 8)) {
if (strstr(ciphertext, "$HEX$"))
return RemoveHEX(out, ciphertext);
return ciphertext;
}
if (!strncmp(ciphertext, "md5_gen(", 8)) {
ciphertext += 8;
do ++ciphertext; while (*ciphertext != ')') ;
++ciphertext;
}
if (strstr(ciphertext, "$HEX$")) {
char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG);
RemoveHEX(cp, ciphertext);
} else
snprintf(out, sizeof(out), "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext);
return out;
}
// This split unifies case.
static char *split_UC(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (!strncmp(ciphertext, "$dynamic", 8)) {
if (strstr(ciphertext, "$HEX$"))
RemoveHEX(out, ciphertext);
else
strcpy(out, ciphertext);
} else {
if (!strncmp(ciphertext, "md5_gen(", 8)) {
ciphertext += 8;
do ++ciphertext; while (*ciphertext != ')') ;
++ciphertext;
}
if (strstr(ciphertext, "$HEX$")) {
char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG);
RemoveHEX(cp, ciphertext);
} else
sprintf(out, "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext);
}
ciphertext = strchr(&out[8], '$')+1;
while (*ciphertext && *ciphertext != '$') {
if (*ciphertext >= 'A' && *ciphertext <= 'Z')
*ciphertext += 0x20; // ASCII specific, but I really do not care.
++ciphertext;
}
// printf("%s\n", out);
return out;
}
/*********************************************************************************
* Stores the new salt provided into our 'working' salt
*********************************************************************************/
static void set_salt(void *salt)
{
unsigned char *cpsalt;
unsigned int todo_bits=0, i, bit;
if (!salt || curdat.dynamic_FIXED_SALT_SIZE == 0) {
saltlen = 0;
return;
}
cpsalt = *((unsigned char**)salt);
saltlen = *cpsalt++ - '0';
saltlen <<= 3;
saltlen += *cpsalt++ - '0';
#if ARCH_ALLOWS_UNALIGNED
if (*((ARCH_WORD_32*)cpsalt) != 0x30303030)
#else
if (memcmp(cpsalt, "0000", 4))
#endif
{
// this is why we used base-8. Takes an extra byte, but there is NO conditional
// logic, building this number, and no multiplication. We HAVE added one conditional
// check, to see if we can skip the entire load, if it is 0000.
todo_bits = *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
}
else
cpsalt += 4;
cursalt = cpsalt;
if (!todo_bits) return;
cpsalt += saltlen;
if (todo_bits & 1) {
todo_bits ^= 1; // clear that bit.
saltlen2 = *cpsalt++;
cursalt2 = cpsalt;
if (todo_bits == 0) return;
cpsalt += saltlen2;
}
if (todo_bits & 2) {
todo_bits ^= 2; // clear that bit.
usernamelen = *cpsalt++;
username = cpsalt;
if (todo_bits == 0) return;
cpsalt += usernamelen;
}
bit = 4;
for (i = 0; i < 10; ++i, bit<<=1) {
if (todo_bits & bit) {
todo_bits ^= bit; // clear that bit.
fld_lens[i] = *cpsalt++;
flds[i] = cpsalt;
if (todo_bits == 0) return;
cpsalt += fld_lens[i];
}
}
}
/*********************************************************************************
* Sets this key. It will either be dropped DIRECTLY into the input buffer
* number 1, or put into an array of keys. Which one happens depends upon
* HOW the generic functions were laid out for this type. Not all types can
* load into the input. If not they MUST use the key array. Using the input
* buffer is faster, when it can be safely done.
*********************************************************************************/
static void set_key(char *key, int index)
{
unsigned int len;
//printf("idx=%d key=%s\n", index, key);
#ifdef SIMD_COEF_32
if (curdat.store_keys_in_input==2)
dynamic_use_sse = 3;
else if (curdat.md5_startup_in_x86)
dynamic_use_sse = 2;
else if (dynamic_use_sse==2)
dynamic_use_sse = 1;
#endif
if (curdat.nPassCase>1)
key = HandleCase(key, curdat.nPassCase);
// Ok, if the key is in unicode/utf8, we switch it here one time, and are done with it.
if (curdat.store_keys_in_input)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
// code derived from rawMD5_fmt_plug.c code from magnum
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_32 *key32 = (ARCH_WORD_32*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const ARCH_WORD_32 *key32 = is_aligned(key, sizeof(uint32_t)) ?
(uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key);
#endif
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
ARCH_WORD_32 *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((temp = *key32++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xff) | (0x80 << 8);
++len;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xffff) | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = temp | (0x80U << 24);
len+=3;
goto key_cleaning;
}
*keybuf_word = temp;
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[14*SIMD_COEF_32] = len << 3;
return;
}
#endif
len = strlen(key);
if (len > 110) // we never do UTF-8 -> UTF-16 in this mode
len = 110;
// if(index==0) {
// we 'have' to use full clean here. NOTE 100% sure why, but 10 formats fail if we do not.
// __nonMP_DynamicFunc__clean_input_full();
// }
#if MD5_X2
if (index & 1)
memcpy(input_buf_X86[index>>MD5_X2].x2.b2, key, len);
else
#endif
memcpy(input_buf_X86[index>>MD5_X2].x1.b, key, len);
saved_key_len[index] = total_len_X86[index] = len;
}
else
{
len = strlen(key);
if (len > 110 && !(fmt_Dynamic.params.flags & FMT_UNICODE))
len = 110;
// if(index==0) {
// __nonMP_DynamicFunc__clean_input_full();
// }
keys_dirty = 1;
memcpy(((char*)(saved_key[index])), key, len);
saved_key_len[index] = len;
}
}
static void clear_keys(void)
{
#ifdef SIMD_COEF_32
if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED) {
__nonMP_DynamicFunc__clean_input_full();
return;
}
if (curdat.store_keys_in_input==1 || curdat.store_keys_in_input==3)
return;
if (curdat.md5_startup_in_x86)
__nonMP_DynamicFunc__clean_input_full();
// This clean was causing failures (dirty buffers left) for dyna_51, 61 and formspring.
// once commented out, dyna fully passes. I see no reason to keep this here at all.
// else
// __nonMP_DynamicFunc__clean_input_kwik();
#else
__nonMP_DynamicFunc__clean_input_full();
#endif
}
/*********************************************************************************
* Returns the key. NOTE how it gets it depends upon if we are storing
* into the array of keys (there we simply return it), or if we are
* loading into input buffer #1. If in input buffer, we have to re-create
* the key, prior to returning it.
*********************************************************************************/
static char *get_key(int index)
{
if (curdat.store_keys_in_input)
{
unsigned int i;
unsigned char *cp;
#ifdef SIMD_COEF_32
//if (dynamic_use_sse==1) {
// Note, if we are not in
if (dynamic_use_sse && !curdat.md5_startup_in_x86) {
unsigned int s;
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
//if (curdat.store_keys_in_input && dynamic_use_sse==1)
// s = saved_key_len[index]; // NOTE, we now have to get the length from the buffer, we do NOT store it into a saved_key_len buffer.
ARCH_WORD_32 *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)];
s = keybuffer[14*SIMD_COEF_32] >> 3;
for(i=0;i<s;i++)
out[i] = input_buf[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
out[i] = 0;
return (char*)out;
}
#endif
#if MD5_X2
if (index & 1)
cp = input_buf_X86[index>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[index>>MD5_X2].x1.B;
for(i=0;i<saved_key_len[index];++i)
out[i] = cp[i];
out[i] = 0;
return (char*)out;
}
else
{
saved_key[index][saved_key_len[index]] = '\0';
return saved_key[index];
}
}
/*********************************************************************************
* Looks for ANY key that was cracked.
*********************************************************************************/
static int cmp_all(void *binary, int count)
{
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int j;
if (dynamic_use_sse&1) {
unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32);
for (i = 0; i < cnt; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
if( *((ARCH_WORD_32 *)binary) == crypt_key[i].w[j])
return 1;
}
return 0;
}
#endif
for (i = 0; i < count; i++) {
#if MD5_X2
if (i&1) {
if (!(((ARCH_WORD_32 *)binary)[0] - crypt_key_X86[i>>MD5_X2].x2.w2[0]))
return 1;
}
else
#endif
if (!(((ARCH_WORD_32 *)binary)[0] - crypt_key_X86[i>>MD5_X2].x1.w[0]))
return 1;
}
return 0;
}
#if ARCH_LITTLE_ENDIAN
#define MASK_4x6 0x00ffffff
#else
#define MASK_4x6 0xffffff00
#endif
static int cmp_all_64_4x6(void *binary, int count)
{
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int j;
if (dynamic_use_sse==1) {
unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32);
for (i = 0; i < cnt; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
if( *((ARCH_WORD_32 *)binary) == (crypt_key[i].w[j] & MASK_4x6))
return 1;
}
return 0;
}
#endif
for (i = 0; i < count; i++) {
#if MD5_X2
if (i&1) {
if (!(((ARCH_WORD_32 *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x2.w2[0]&MASK_4x6)))
return 1;
}
else
#endif
if (!(((ARCH_WORD_32 *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x1.w[0]&MASK_4x6)))
return 1;
}
return 0;
}
/*********************************************************************************
* In this code, we always do exact compare, so if this function is called, it
* simply returns true.
*********************************************************************************/
static int cmp_exact(char *binary, int index)
{
return 1;
}
/*********************************************************************************
* There was 'something' that was possibly hit. Now john will ask us to check
* each one of the data items, for an 'exact' match.
*********************************************************************************/
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
if( (((ARCH_WORD_32 *)binary)[0] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[1] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[2] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[3] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]))
return 1;
return 0;
}
#endif
#if MD5_X2
if (index & 1) {
if ( (((ARCH_WORD_32 *)binary)[0] == crypt_key_X86[index>>MD5_X2].x2.w2[0] ) &&
(((ARCH_WORD_32 *)binary)[1] == crypt_key_X86[index>>MD5_X2].x2.w2[1] ) &&
(((ARCH_WORD_32 *)binary)[2] == crypt_key_X86[index>>MD5_X2].x2.w2[2] ) &&
(((ARCH_WORD_32 *)binary)[3] == crypt_key_X86[index>>MD5_X2].x2.w2[3] ) )
return 1;
return 0;
}
#endif
if ( (((ARCH_WORD_32 *)binary)[0] == crypt_key_X86[index>>MD5_X2].x1.w[0] ) &&
(((ARCH_WORD_32 *)binary)[1] == crypt_key_X86[index>>MD5_X2].x1.w[1] ) &&
(((ARCH_WORD_32 *)binary)[2] == crypt_key_X86[index>>MD5_X2].x1.w[2] ) &&
(((ARCH_WORD_32 *)binary)[3] == crypt_key_X86[index>>MD5_X2].x1.w[3] ) )
return 1;
return 0;
}
static int cmp_one_64_4x6(void *binary, int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
if( (((ARCH_WORD_32 *)binary)[0] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[1] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[2] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[3] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)))
return 1;
return 0;
}
#endif
#if MD5_X2
if (index & 1) {
if ( (((ARCH_WORD_32*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x2.w2[0] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x2.w2[1] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x2.w2[2] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x2.w2[3] & MASK_4x6)) )
return 1;
return 0;
}
#endif
if ( (((ARCH_WORD_32*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x1.w[0] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x1.w[1] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x1.w[2] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x1.w[3] & MASK_4x6)) )
return 1;
return 0;
}
/*********************************************************************************
*********************************************************************************
* This is the real 'engine'. It simply calls functions one
* at a time from the array of functions.
*********************************************************************************
*********************************************************************************/
static int crypt_all(int *pcount, struct db_salt *salt)
{
// set m_count. This is our GLOBAL value, used by ALL of the script functions to know how
// many keys are loaded, and how much work we do.
m_count = *pcount;
__nonMP_eLargeOut(eBase16);
__nonMP_nLargeOff(0);
#ifdef SIMD_COEF_32
// If this format is MMX built, but is supposed to start in X86 (but be switchable), then we
// set that value here.
if (curdat.store_keys_in_input==2)
dynamic_use_sse = 3;
else if (curdat.md5_startup_in_x86)
dynamic_use_sse = 2;
else if (dynamic_use_sse==2)
dynamic_use_sse = 1;
#endif
__nonMP_md5_unicode_convert(0);
if (curdat.dynamic_base16_upcase) {
dynamic_itoa16 = itoa16u;
itoa16_w2 = itoa16_w2_u;
}
else {
dynamic_itoa16 = itoa16;
itoa16_w2 = itoa16_w2_l;
}
// There may have to be some 'prelim' work done with the keys. This is so that if we 'know' that keys were
// loaded into the keys[] array, but that we should do something like md5 and base-16 put them into an
// input slot, then we do that FIRST, prior to calling the script functions. Thus for a format such as
// md5(md5($p).$s) we could md5 the pass, and base-16 put it into a input buffer. Then when john sets salt
// and calls crypt all, the crypt script would simply set the input len to 32, append the salt and call a
// single crypt. That eliminates almost 1/2 of the calls to md5_crypt() for the format show in this example.
if (keys_dirty)
{
if (curdat.store_keys_normal_but_precompute_hash_to_output2)
{
keys_dirty = 0;
if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED2)
__nonMP_DynamicFunc__clean_input2_full();
else
__nonMP_DynamicFunc__clean_input2();
if (curdat.store_keys_in_input_unicode_convert)
__nonMP_md5_unicode_convert(1);
__nonMP_DynamicFunc__append_keys2();
__nonMP_md5_unicode_convert(0);
//if (curdat.using_flat_buffers_sse2_ok) {
if (curdat.dynamic_use_sse == 0) {
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) {
#ifdef _OPENMP
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(0,m_count,0); break
#else
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(); break
#endif
switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type)
{
CASE(MD5);
CASE(MD4);
CASE(SHA1);
CASE(SHA224);
CASE(SHA256);
CASE(SHA384);
CASE(SHA512);
CASE(GOST);
CASE(WHIRLPOOL);
CASE(Tiger);
CASE(RIPEMD128);
CASE(RIPEMD160);
CASE(RIPEMD256);
CASE(RIPEMD320);
CASE(HAVAL128_3);
CASE(HAVAL128_4);
CASE(HAVAL128_5);
CASE(HAVAL160_3);
CASE(HAVAL160_4);
CASE(HAVAL160_5);
CASE(HAVAL192_3);
CASE(HAVAL192_4);
CASE(HAVAL192_5);
CASE(HAVAL224_3);
CASE(HAVAL224_4);
CASE(HAVAL224_5);
CASE(HAVAL256_3);
CASE(HAVAL256_4);
CASE(HAVAL256_5);
CASE(MD2);
CASE(PANAMA);
CASE(SKEIN224);
CASE(SKEIN256);
CASE(SKEIN384);
CASE(SKEIN512);
CASE(SHA3_224);
CASE(SHA3_256);
CASE(SHA3_384);
CASE(SHA3_512);
CASE(KECCAK_256);
CASE(KECCAK_512);
// LARGE_HASH_EDIT_POINT
}
} else if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) {
unsigned int i;
for (i = 0; i < m_count; ++i)
total_len_X86[i] = curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX;
#undef CASE
#ifdef _OPENMP
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(0,m_count,0); break
#else
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(); break
#endif
switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type) {
CASE(MD5);
CASE(MD4);
CASE(SHA1);
CASE(SHA224);
CASE(SHA256);
CASE(SHA384);
CASE(SHA512);
CASE(GOST);
CASE(WHIRLPOOL);
CASE(Tiger);
CASE(RIPEMD128);
CASE(RIPEMD160);
CASE(RIPEMD256);
CASE(RIPEMD320);
CASE(HAVAL128_3);
CASE(HAVAL128_4);
CASE(HAVAL128_5);
CASE(HAVAL160_3);
CASE(HAVAL160_4);
CASE(HAVAL160_5);
CASE(HAVAL192_3);
CASE(HAVAL192_4);
CASE(HAVAL192_5);
CASE(HAVAL224_3);
CASE(HAVAL224_4);
CASE(HAVAL224_5);
CASE(HAVAL256_3);
CASE(HAVAL256_4);
CASE(HAVAL256_5);
CASE(MD2);
CASE(PANAMA);
CASE(SKEIN224);
CASE(SKEIN256);
CASE(SKEIN384);
CASE(SKEIN512);
CASE(SHA3_224);
CASE(SHA3_256);
CASE(SHA3_384);
CASE(SHA3_512);
CASE(KECCAK_256);
CASE(KECCAK_512);
// LARGE_HASH_EDIT_POINT
}
} else {
// calls 'old' code (ossl, sorry :( We should FIND and remove any format
// written this way, if it is
__possMP_DynamicFunc__crypt2_md5();
}
} else {
__possMP_DynamicFunc__crypt2_md5();
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1)
{
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==2)
__nonMP_DynamicFunc__SSEtoX86_switch_output2();
__nonMP_DynamicFunc__clean_input();
__nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16();
}
}
}
}
// Ok, now we 'run' the script. We simply call 1 function right after the other.
// ALL functions are void f(void). They use the globals:
// input_buf1[] input_buf2[] (requires thread safety)
// total_len1[] total_len2[] (requires thread safety)
// crypt1[] crypt2[] (requires thread safety)
// md5_unicode_convert (requires thread safety, had to change to array)
// saved_key[] (const?)
// saved_key_len[] (const)
// cursalt, cursalt2 (const)
// saltlen, saltlen2 (const)
// m_count (const)
// nConsts (const)
// Consts[], ConstsLen[] (const)
// Since this array is in a structure, we assign a simple pointer to it
// before walking. Trivial improvement, but every cycle counts :)
{
#ifdef _OPENMP
if ((curdat.pFmtMain->params.flags & FMT_OMP) == FMT_OMP) {
int j;
unsigned int inc = (m_count+m_ompt-1) / m_ompt;
//printf ("maxkeys=%d m_count=%d inc1=%d granularity=%d inc2=%d\n", curdat.pFmtMain->params.max_keys_per_crypt, m_count, inc, curdat.omp_granularity, ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity);
inc = ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity;
#pragma omp parallel for shared(curdat, inc, m_count)
for (j = 0; j < m_count; j += inc) {
unsigned int i;
unsigned int top=j+inc;
/* The last block may 'appear' to have more keys than we have in the
entire buffer space. This is due to the granularity. If so,
reduce that last one to stop at end of our buffers. NOT doing
this is causes a huge buffer overflow. */
if (top > curdat.pFmtMain->params.max_keys_per_crypt)
top = curdat.pFmtMain->params.max_keys_per_crypt;
// we now run a full script in this thread, using only a subset of
// the data, from [j,top) The next thread will run from [top,top+inc)
// each thread will take the next inc values, until we get to m_count
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)
(*(curdat.dynamic_FUNCTIONS[i]))(j,top,omp_get_thread_num());
}
} else {
unsigned int i;
// same code (almost), but without the threads.
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)
(*(curdat.dynamic_FUNCTIONS[i]))(0,m_count,0);
}
#else
unsigned int i;
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) {
(*(curdat.dynamic_FUNCTIONS[i]))();
#if 0
// Dump state (for debugging help)
if (i==0) printf("\npassword=%.*s\n", saved_key_len[0], saved_key[0]);
printf ("\nState after function: %s\n", dynamic_Find_Function_Name(curdat.dynamic_FUNCTIONS[i]));
// dump input 1
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("input_buf[0]", input_buf[0].c, 64, 0);
dump_stuff_mmx_msg("input_buf[1]", input_buf[0].c, 64, 1);
dump_stuff_mmx_msg("input_buf[2]", input_buf[0].c, 64, 2);
dump_stuff_mmx_msg("input_buf[3]", input_buf[0].c, 64, 3);
#endif
printf ("input_buf86[0] : %*.*s\n", total_len_X86[0],total_len_X86[0],input_buf_X86[0].x1.b);
printf ("input_buf86[1] : %*.*s\n", total_len_X86[1],total_len_X86[1],input_buf_X86[1].x1.b);
printf ("input_buf86[2] : %*.*s\n", total_len_X86[2],total_len_X86[2],input_buf_X86[2].x1.b);
printf ("input_buf86[3] : %*.*s\n", total_len_X86[3],total_len_X86[3],input_buf_X86[3].x1.b);
// dump crypt 1
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("crypt_key[0]", crypt_key[0].c, 16, 0);
dump_stuff_mmx_msg("crypt_key[1]", crypt_key[0].c, 16, 1);
dump_stuff_mmx_msg("crypt_key[2]", crypt_key[0].c, 16, 2);
dump_stuff_mmx_msg("crypt_key[3]", crypt_key[0].c, 16, 3);
#endif
dump_stuff_be_msg("crypt_key_X86[0]", crypt_key_X86[0].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[1]", crypt_key_X86[1].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[2]", crypt_key_X86[2].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[3]", crypt_key_X86[3].x1.b, 16);
// dump input 2
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("input_buf2[0]", input_buf2[0].c, 64, 0);
dump_stuff_mmx_msg("input_buf2[1]", input_buf2[0].c, 64, 1);
dump_stuff_mmx_msg("input_buf2[2]", input_buf2[0].c, 64, 2);
dump_stuff_mmx_msg("input_buf2[3]", input_buf2[0].c, 64, 3);
#endif
printf ("input2_buf86[0] : %*.*s\n", total_len2_X86[0],total_len2_X86[0],input_buf2_X86[0].x1.b);
printf ("input2_buf86[1] : %*.*s\n", total_len2_X86[1],total_len2_X86[1],input_buf2_X86[1].x1.b);
printf ("input2_buf86[2] : %*.*s\n", total_len2_X86[2],total_len2_X86[2],input_buf2_X86[2].x1.b);
printf ("input2_buf86[3] : %*.*s\n", total_len2_X86[3],total_len2_X86[3],input_buf2_X86[3].x1.b);
// dump crypt 2
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("crypt_key2[0]", crypt_key2[0].c, 16, 0);
dump_stuff_mmx_msg("crypt_key2[1]", crypt_key2[0].c, 16, 1);
dump_stuff_mmx_msg("crypt_key2[2]", crypt_key2[0].c, 16, 2);
dump_stuff_mmx_msg("crypt_key2[3]", crypt_key2[0].c, 16, 3);
#endif
dump_stuff_be_msg("crypt_key2_X86[0]", crypt_key2_X86[0].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[1]", crypt_key2_X86[1].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[2]", crypt_key2_X86[2].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[3]", crypt_key2_X86[3].x1.b, 16);
#endif
}
#endif
}
return m_count;
}
/*********************************************************************************
* 'normal' hashing functions
*********************************************************************************/
extern char *MD5_DumpHexStr(void *p);
#if !ARCH_LITTLE_ENDIAN
// the lower 8 bits is zero on the binary (but filled in on the hash). We need to dump the low 8
static int binary_hash_0_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_0; }
static int binary_hash_1_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_1; }
static int binary_hash_2_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_2; }
static int binary_hash_3_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_3; }
static int binary_hash_4_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_4; }
static int binary_hash_5_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_5; }
static int get_hash_0_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_0;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_0;}
static int get_hash_1_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_1;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_1;}
static int get_hash_2_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_2;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_2;}
static int get_hash_3_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_3;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_3;}
static int get_hash_4_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_4;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_4;}
static int get_hash_5_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_5;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_5;}
#endif
static int get_hash_0(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_0;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_0;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_1;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_1;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_2;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_2;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_3;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_3;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_4;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_4;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_5;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_5;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_6;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_6;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_6;
}
/************************************************************************
* We now fully handle all hashing of salts, here in the format. We
* return a pointer ot an allocated salt record. Thus, we search all
* of the salt records, looking for the same salt. If we find it, we
* want to return THAT pointer, and not allocate a new pointer.
* This works great, but forces us to do salt comparision here.
***********************************************************************/
#define DYNA_SALT_HASH_BITS SALT_HASH_LOG
#define DYNA_SALT_HASH_SIZE (1<<DYNA_SALT_HASH_BITS)
#define DYNA_SALT_HASH_MOD (DYNA_SALT_HASH_SIZE-1)
typedef struct dyna_salt_list_entry {
struct dyna_salt_list_entry *next;
unsigned len;
unsigned char *salt;
} dyna_salt_list_entry;
typedef struct {
dyna_salt_list_entry *head, *tail;
int count;
} dyna_salt_list_main;
typedef struct {
dyna_salt_list_main List;
} SaltHashTab_t;
static SaltHashTab_t *SaltHashTab=NULL;
static dyna_salt_list_entry *pSaltHashData=NULL, *pSaltHashDataNext=NULL;
static int dyna_salt_list_count=0;
static unsigned char *pSaltDataBuf=NULL, *pNextSaltDataBuf=NULL;
static int nSaltDataBuf=0;
static unsigned char *AddSaltHash(unsigned char *salt, unsigned int len, unsigned int idx)
{
unsigned char *pRet;
if (dyna_salt_list_count == 0) {
pSaltHashDataNext = pSaltHashData = mem_calloc_tiny(sizeof(dyna_salt_list_entry) * 25000, MEM_ALIGN_WORD);
dyna_salt_list_count = 25000;
}
if (nSaltDataBuf < len) {
pSaltDataBuf = pNextSaltDataBuf = mem_alloc_tiny(0x60000, MEM_ALIGN_NONE);
nSaltDataBuf = 0x60000;
}
pRet = pNextSaltDataBuf;
pSaltHashDataNext->salt = pNextSaltDataBuf;
memcpy(pSaltHashDataNext->salt, salt, len);
pSaltHashDataNext->len = len;
pNextSaltDataBuf += len;
nSaltDataBuf -= len;
if (SaltHashTab[idx].List.count == 0)
SaltHashTab[idx].List.tail = SaltHashTab[idx].List.head = pSaltHashDataNext;
else {
SaltHashTab[idx].List.tail->next = pSaltHashDataNext;
SaltHashTab[idx].List.tail = pSaltHashDataNext;
}
++SaltHashTab[idx].List.count;
++pSaltHashDataNext;
--dyna_salt_list_count;
return pRet;
}
static unsigned char *FindSaltHash(unsigned char *salt, unsigned int len, CRC32_t crc)
{
unsigned int idx = crc & DYNA_SALT_HASH_MOD;
dyna_salt_list_entry *p;
if (!SaltHashTab)
SaltHashTab = mem_calloc_tiny(sizeof(SaltHashTab_t) * DYNA_SALT_HASH_SIZE, MEM_ALIGN_WORD);
if (!SaltHashTab[idx].List.count) {
return AddSaltHash(salt, len, idx);
}
// Ok, we have some salts in this hash list. Now walk the list, searching for an EQUAL salt.
p = SaltHashTab[idx].List.head;
while (p) {
if (len == p->len && !memcmp((char*)salt, (char*)p->salt, len)) {
return p->salt; // found it! return this one, so we do not allocate another.
}
p = p->next;
}
return AddSaltHash(salt, len, idx);
}
static unsigned char *HashSalt(unsigned char *salt, unsigned int len)
{
CRC32_t crc = 0xffffffff, i;
unsigned char *ret_hash;
// compute the hash.
for (i = 0; i < len; ++i)
crc = jtr_crc32(crc,salt[i]);
crc = ~crc;
ret_hash = FindSaltHash(salt, len, crc);
return ret_hash;
}
static int ConvertFromHex(unsigned char *p, int len)
{
unsigned char *cp;
unsigned int i, x;
if (!p || memcmp(p, "HEX$", 4))
return len;
// Ok, do a convert, and return 'new' len.
len -= 4;
len >>= 1;
cp = p;
x = len;
for (i=4; x; --x, i+= 2) {
*cp++ = atoi16[ARCH_INDEX(p[i])]*16 + atoi16[ARCH_INDEX(p[i+1])];
}
*cp = 0;
return len;
}
static unsigned int salt_external_to_internal_convert(unsigned char *extern_salt, unsigned char *Buffer)
{
// Ok, we get this: extern_salt = salt_data$$2salt2$$Uuser ... where anything can be missing or in any order
// the any order has 1 exception of salt_data MUST be first. So if we get $$2salt2, then we know there is no salt-1 value.
unsigned char *salt2=0, *userid=0, *Flds[10];
int i, nsalt2=0, nuserid=0, nFlds[10]={0,0,0,0,0,0,0,0,0,0};
unsigned int len = strlen((char*)extern_salt), bit;
unsigned int bit_array=0;
unsigned int the_real_len = 6; // 2 bytes base-8 length, and 4 bytes base-8 bitmap.
// work from back of string to front, looking for the $$X signatures.
for (i = len-3; i >= 0; --i) {
if (extern_salt[i] == '$' && extern_salt[i+1] == '$') {
// a 'likely' extra salt value.
switch(extern_salt[i+2]) {
case '2':
if (curdat.b2Salts) {
salt2 = &extern_salt[i+3];
nsalt2 = strlen((char*)salt2);
nsalt2 = ConvertFromHex(salt2, nsalt2);
extern_salt[i] = 0;
bit_array |= 1;
the_real_len += (nsalt2+1);
}
break;
case 'U':
if (curdat.nUserName) {
userid = &extern_salt[i+3];
nuserid = strlen((char*)userid);
nuserid = ConvertFromHex(userid, nuserid);
extern_salt[i] = 0;
bit_array |= 2;
the_real_len += (nuserid+1);
}
break;
case 'F': {
if (extern_salt[i+3] >= '0' && extern_salt[i+3] <= '9') {
if (curdat.FldMask && (curdat.FldMask & (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) == (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) {
Flds[extern_salt[i+3]-'0'] = &extern_salt[i+4];
nFlds[extern_salt[i+3]-'0'] = strlen((char*)(Flds[extern_salt[i+3]-'0']));
nFlds[extern_salt[i+3]-'0'] = ConvertFromHex(Flds[extern_salt[i+3]-'0'], nFlds[extern_salt[i+3]-'0']);
extern_salt[i] = 0;
bit_array |= (1<<(2+extern_salt[i+3]-'0'));
the_real_len += (nFlds[extern_salt[i+3]-'0']+1);
}
break;
}
}
}
}
}
// We have now ripped the data apart. Now put it into Buffer, in proper ORDER
// Length of salt (salt1) These 2 are stored as base-8 numbers.
len = strlen((char*)extern_salt);
len = ConvertFromHex(extern_salt, len);
the_real_len += len;
*Buffer++ = (len>>3) + '0';
*Buffer++ = (len&7) + '0';
// bit array
*Buffer++ = (bit_array>>9) + '0';
*Buffer++ = ((bit_array>>6)&7) + '0';
*Buffer++ = ((bit_array>>3)&7) + '0';
*Buffer++ = (bit_array&7) + '0';
memcpy((char*)Buffer, (char*)extern_salt, len);
Buffer += len;
if (!bit_array)
return the_real_len;
if (nsalt2) {
*Buffer++ = nsalt2;
memcpy((char*)Buffer, (char*)salt2, nsalt2);
Buffer += nsalt2;
bit_array &= ~1;
if (!bit_array)
return the_real_len;
}
if (nuserid) {
*Buffer++ = nuserid;
memcpy((char*)Buffer, (char*)userid, nuserid);
if (curdat.nUserName==2) {
Buffer[nuserid] = 0;
strupr((char*)Buffer);
} else if (curdat.nUserName==2) {
Buffer[nuserid] = 0;
strlwr((char*)Buffer);
}
Buffer += nuserid;
bit_array &= ~2;
if (!bit_array)
return the_real_len;
}
bit = 4;
for (i = 0; i < 10; ++i, bit<<=1) {
if (nFlds[i]) {
*Buffer++ = nFlds[i];
memcpy((char*)Buffer, (char*)(Flds[i]), nFlds[i]);
Buffer += nFlds[i];
bit_array &= ~bit;
if (!bit_array)
return the_real_len;
}
}
return the_real_len;
}
/*********************************************************************************
* This salt function has been TOTALLY re-written. Now, we do these things:
* 1. convert from external format ($salt$$Uuser$$2HEX$salt2_in_hex, etc, into
* our internal format. Our internal format is 2 base-8 numbers (2 digit and 4
* digit), followed by the 'raw' salt bytes, followed by pascal strings of any
* other special salt values (salt2, user, flields 0 to 9). The first 2 digit
* base 8 number is the length of the binary bytes of the 'real' salt. The
* 2nd base-8 4 digit number, is a bit mask of what 'extra' salt types are
* contained.
* 2. We allocate and 'own' the salt buffers here, so that:
* 3. We detect duplicate salts. NOTE, we have normalized the salts, so 2 salts that
* appear different (external format), appear exactly the same on internal format.
* Thus, we dupe remove them here.
* 4. We allocation storage for the salts. The ONLY thing we return to john, is
* a 4 (or 8 byte in 64 bit builds) pointer to the salt. Thus, when we find
* a dupe, we do not have to allocate ANY memory, and simply return the pointer
* to the original salt (which is the same as the one we are working on now).
*
* this is much more complex, however, it allows us to use much less memory, to
* have the set_salt function operate VERY quickly (all processing is done here).
* It also allows john load time to happen FASTER (yes faster), that it was happening
* due to smaller memory footprint, and john's external salt collision to have
* less work to do. The memory footprint was also reduced, because now we store
* JUST the require memory, and a pointer. Before, often we stored a LOT of memory
* for many format types. For a few types, we do use more memory with this method
* than before, but for more the memory usage is way down.
*********************************************************************************/
static void *get_salt(char *ciphertext)
{
char Salt[SALT_SIZE+1], saltIntBuf[SALT_SIZE+1];
int off, possible_neg_one=0;
unsigned char *saltp;
unsigned int the_real_len;
static union x {
unsigned char salt_p[sizeof(unsigned char*)];
ARCH_WORD p[1];
} union_x;
if ( (curdat.pSetup->flags&MGF_SALTED) == 0) {
memset(union_x.salt_p, 0, sizeof(union_x.salt_p));
return union_x.salt_p;
}
memset(Salt, 0, SALT_SIZE+1);
// Ok, see if the wrong dynamic type is loaded (such as the 'last' dynamic type).
if (!strncmp(ciphertext, "$dynamic_", 9)) {
char *cp1 = &ciphertext[9];
char *cp2 = &curdat.dynamic_WHICH_TYPE_SIG[9];
while (*cp2 && *cp2 == *cp1) {
++cp1; ++cp2;
}
if (*cp2) {
char subformat[17];
struct fmt_main *pFmtLocal;
int nFmtNum;
memcpy(subformat, ciphertext, 16);
subformat[16] = 0;
cp2 = &subformat[9];
while (*cp2 && *cp2 != '$')
++cp2;
*cp2 = 0;
nFmtNum = -1;
sscanf(subformat, "$dynamic_%d", &nFmtNum);
if (nFmtNum==-1)
return union_x.salt_p;
pFmtLocal = dynamic_Get_fmt_main(nFmtNum);
memcpy(&curdat, pFmtLocal->private.data, sizeof(private_subformat_data));
}
}
if (curdat.dynamic_FIXED_SALT_SIZE==0 && !curdat.nUserName && !curdat.FldMask)
return union_x.salt_p;
if (!strncmp(ciphertext, "$dynamic_", 9))
off=curdat.dynamic_SALT_OFFSET;
else
off=curdat.dynamic_SALT_OFFSET-strlen(curdat.dynamic_WHICH_TYPE_SIG);
if (ciphertext[off] == '$') {
if (ciphertext[off+1]=='U' && curdat.nUserName)
possible_neg_one = -1;
else if (ciphertext[off+1]=='2' && curdat.b2Salts)
possible_neg_one = -1;
else if (ciphertext[off+1]=='F' && ciphertext[off+2]>='0' && ciphertext[off+2]<='9' && curdat.FldMask) {
if ((curdat.FldMask & (MGF_FLDx_BIT<<(ciphertext[off+2]-'0'))) == (MGF_FLDx_BIT<<(ciphertext[off+2]-'0')))
possible_neg_one = -1;
}
}
strnzcpy(Salt, &ciphertext[off + possible_neg_one], SALT_SIZE);
if (curdat.dynamic_salt_as_hex)
{
unsigned char Buf[128];
unsigned int slen=strlen(Salt);
switch (curdat.dynamic_salt_as_hex_format_type) {
// TODO: Come up with some way to put these into a CASE(HASH) #define
#define SPH_CASE(H,F,S) case MGF__##H: {sph_##F##_context c;sph_##F##_init(&c);sph_##F(&c,(const unsigned char*)Salt,slen);sph_##F##_close(&c,Buf); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; }
#define OSSL_CASE(H,C,S) case MGF__##H: {C##_CTX c;H##_Init(&c);H##_Update(&c,Salt,slen);H##_Final(Buf,&c); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; }
#define KECCAK_CASE(H,S) case MGF__##H: {KECCAK_CTX c;H##_Init(&c);KECCAK_Update(&c,(BitSequence*)Salt,slen);KECCAK_Final(Buf,&c); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; }
case MGF__MD5:
{
// Do not 'worry' about SSE/MMX, Only do 'generic' md5. This is ONLY done
// at the start of the run. We will NEVER see this run, once john starts.
MD5_CTX ctx;
int i;
char *cpo;
MD5_Init(&ctx);
if (curdat.dynamic_salt_as_hex & 0x100)
{
char *s2 = mem_alloc(slen*2+1);
for (i = 0; i < slen; ++i)
{
s2[i<<1] = Salt[i];
s2[(i<<1)+1] = 0;
}
MD5_Update(&ctx, s2, slen*2);
MEM_FREE(s2);
}
else
MD5_Update(&ctx, Salt, slen);
MD5_Final(Buf, &ctx);
if ( (curdat.dynamic_salt_as_hex&3) == 2) {
strcat(Salt, "$$2");
cpo = &Salt[slen+3];
}
else {
cpo = Salt;
memset(Salt, 0, SALT_SIZE+1);
}
base64_convert(Buf, e_b64_raw, 16, cpo, e_b64_hex, SALT_SIZE, 0, 0);
break;
}
OSSL_CASE(MD4,MD4,16)
OSSL_CASE(SHA1,SHA,20)
OSSL_CASE(SHA224,SHA256,28)
OSSL_CASE(SHA256,SHA256,32)
OSSL_CASE(SHA384,SHA512,48)
OSSL_CASE(SHA512,SHA512,64)
OSSL_CASE(WHIRLPOOL,WHIRLPOOL,64)
case MGF__GOST:
{
gost_ctx ctx;
john_gost_init(&ctx);
john_gost_update(&ctx, (const unsigned char*)Salt, slen);
john_gost_final(&ctx, (unsigned char*)Buf);
memset(Salt, 0, SALT_SIZE+1);
base64_convert(Buf, e_b64_raw, 32, Salt, e_b64_hex, SALT_SIZE, 0, 0);
break;
}
SPH_CASE(Tiger,tiger,24)
SPH_CASE(RIPEMD128,ripemd128,16)
SPH_CASE(RIPEMD160,ripemd160,20)
SPH_CASE(RIPEMD256,ripemd256,32)
SPH_CASE(RIPEMD320,ripemd320,40)
SPH_CASE(HAVAL128_3,haval128_3,16)
SPH_CASE(HAVAL128_4,haval128_4,16)
SPH_CASE(HAVAL128_5,haval128_5,16)
SPH_CASE(HAVAL160_3,haval160_3,20)
SPH_CASE(HAVAL160_4,haval160_4,20)
SPH_CASE(HAVAL160_5,haval160_5,20)
SPH_CASE(HAVAL192_3,haval192_3,24)
SPH_CASE(HAVAL192_4,haval192_4,24)
SPH_CASE(HAVAL192_5,haval192_5,24)
SPH_CASE(HAVAL224_3,haval224_3,28)
SPH_CASE(HAVAL224_4,haval224_4,28)
SPH_CASE(HAVAL224_5,haval224_5,28)
SPH_CASE(HAVAL256_3,haval256_3,32)
SPH_CASE(HAVAL256_4,haval256_4,32)
SPH_CASE(HAVAL256_5,haval256_5,32)
SPH_CASE(MD2,md2,16)
SPH_CASE(PANAMA,panama,32)
SPH_CASE(SKEIN224,skein224,28)
SPH_CASE(SKEIN256,skein256,32)
SPH_CASE(SKEIN384,skein384,48)
SPH_CASE(SKEIN512,skein512,64)
KECCAK_CASE(SHA3_224,28)
KECCAK_CASE(SHA3_256,32)
KECCAK_CASE(SHA3_384,48)
KECCAK_CASE(SHA3_512,64)
KECCAK_CASE(KECCAK_256,32)
KECCAK_CASE(KECCAK_512,64)
// LARGE_HASH_EDIT_POINT
default:
{
error_msg("Invalid dynamic flags seen. Data type not yet defined\n");
}
}
}
the_real_len = salt_external_to_internal_convert((unsigned char*)Salt, (unsigned char*)saltIntBuf);
// Now convert this into a stored salt, or find the 'already' stored same salt.
saltp = HashSalt((unsigned char*)saltIntBuf, the_real_len);
memcpy(union_x.salt_p, &saltp, sizeof(saltp));
return union_x.salt_p;
}
/*********************************************************************************
* Now our salt is returned only as a pointer. We
*********************************************************************************/
static int salt_hash(void *salt)
{
unsigned long H;
if (!salt) return 0;
if ( (curdat.pSetup->flags&MGF_SALTED) == 0)
return 0;
// salt is now a pointer, but WORD aligned. We remove that word alingment, and simply use the next bits
H = *((unsigned long*)salt);
// Mix up the pointer value (H^(H>>9)) so that if we have a fixed sized allocation
// that things do get 'stirred' up better.
return ( (H^(H>>9)) & (SALT_HASH_SIZE-1) );
}
static unsigned dynamic_this_salt_length(const void *v) {
const unsigned char *s = (unsigned char*)v;
unsigned l = *s++ - '0';
unsigned bits;
l <<= 3;
l += *s++ - '0';
#if ARCH_ALLOWS_UNALIGNED
if (*((ARCH_WORD_32*)s) == 0x30303030)
#else
if (!memcmp(s, "0000", 4))
#endif
return l;
bits = *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
s += l;
while(bits) {
if (bits & 1) {
l += *s;
s += *s;
++s;
}
bits >>= 1;
}
return l;
}
/*
* dyna compare is required, to get all the shortest
* salt strings first, then the next longer, then the
* next, and finally the longest. Without this change
* there are many dyna formats which will miss finding
* hashes, because old dirty salt information gets left
* over, blowing the next runs. There are many formats
* which try to not clear buffers if they do not need
* to, BUT this only works if salts are taken shortest
* to longest. This sort builds the list of salts that way
*/
static int salt_compare(const void *x, const void *y)
{
/* this is all that is needed in dyna salt_compare().
Dyna is a pointer to a string, NOT the actual string.
The first 2 bytes of string are length (base 8 ascii) */
const char *X = *((const char**)x);
const char *Y = *((const char**)y);
int l1, l2, l;
if (*X<*Y) return -1;
if (*X>*Y) return 1;
if (X[1]<Y[1]) return -1;
if (X[1]>Y[1]) return 1;
// we had to make the salt order 100% deterministic, so that intersalt-restore
l = l1 = dynamic_this_salt_length(X);
l2 = dynamic_this_salt_length(Y);
if (l2 < l) l = l2;
l = memcmp(&X[6], &Y[6], l);
if (l) return l;
if (l1==l2) return 0;
if (l1 > l2) return 1;
return -1;
}
void dynamic_salt_md5(struct db_salt *s) {
MD5_CTX ctx;
int len;
const char *S = *((const char**)s->salt);
MD5_Init(&ctx);
len = dynamic_this_salt_length(S);
MD5_Update(&ctx, S + 6, len);
MD5_Final((unsigned char*)(s->salt_md5), &ctx);
}
/*********************************************************************************
* Gets the binary value from a base-16 hash.
*********************************************************************************/
static void *get_binary(char *_ciphertext)
{
static char *realcipher;
unsigned int i;
char *ciphertext = _ciphertext;
if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE_SHA, MEM_ALIGN_WORD);
if (!strncmp(_ciphertext, "$dynamic_", 9)) {
ciphertext += 9;
while (*ciphertext++ != '$')
;
}
for(i=0;i<BINARY_SIZE;i++)
{
realcipher[i] =
atoi16[ARCH_INDEX(ciphertext[i*2])]*16 +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
}
return (void *)realcipher;
}
// NOTE NOTE NOTE, we have currently ONLY implemented a non-salted function!!!
static char *source(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 16; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_20_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 20; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_28_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 28; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_32_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 32; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_40_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 40; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_48_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 48; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_64_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 64; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
/*********************************************************************************
* Gets the binary value from a base-64 hash
*********************************************************************************/
static void * binary_b64m(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_mime, 0, 0);
base64_convert(pos, e_b64_mime, i, b, e_b64_raw, 64+3, 0, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
static void * binary_b64(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_crypt, 0, 0);
base64_convert(pos, e_b64_cryptBS, i, b, e_b64_raw, 64+3, 0, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
static void * binary_b64b(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_crypt, 0, 0);
base64_convert(pos, e_b64_crypt, i, b, e_b64_raw, 64+3, 0, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
#define TO_BINARY(b1, b2, b3) \
value = \
(MD5_word)atoi64[ARCH_INDEX(pos[0])] | \
((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((MD5_word)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((MD5_word)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
b[b1] = value >> 16; \
b[b2] = value >> 8; \
b[b3] = value;
static void * binary_b64a(char *ciphertext)
{
static unsigned char *b;
char *pos;
MD5_word value;
if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
TO_BINARY(0, 6, 12);
TO_BINARY(1, 7, 13);
TO_BINARY(2, 8, 14);
TO_BINARY(3, 9, 15);
TO_BINARY(4, 10, 5);
b[11] =
(MD5_word)atoi64[ARCH_INDEX(pos[0])] |
((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6);
MD5_swap((MD5_word*)b,(MD5_word*)b, 4);
return b;
}
/*********************************************************************************
* Gets the binary value from a base-64 hash (such as cisco PIX)
*********************************************************************************/
static void * binary_b64_4x6(char *ciphertext)
{
static ARCH_WORD_32 *b;
unsigned int i;
char *pos;
if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
for(i = 0; i < 4; i++) {
b[i] =
atoi64[ARCH_INDEX(pos[i*4 + 0])] +
(atoi64[ARCH_INDEX(pos[i*4 + 1])] << 6) +
(atoi64[ARCH_INDEX(pos[i*4 + 2])] << 12) +
(atoi64[ARCH_INDEX(pos[i*4 + 3])] << 18);
}
MD5_swap(b,b, 4);
return (void *)b;
}
/*********************************************************************************
* Here is the main mdg_generic fmt_main. NOTE in its default settings, it is
* ready to handle base-16 hashes.
*********************************************************************************/
static struct fmt_main fmt_Dynamic =
{
{
FORMAT_LABEL,
FORMAT_NAME,
#ifdef SIMD_COEF_32
ALGORITHM_NAME,
#else
ALGORITHM_NAME_X86,
#endif
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
#ifdef SIMD_COEF_32
PLAINTEXT_LENGTH,
#else
PLAINTEXT_LENGTH_X86,
#endif
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
#ifdef SIMD_COEF_32
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#else
MIN_KEYS_PER_CRYPT_X86,
MAX_KEYS_PER_CRYPT_X86,
#endif
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
{ NULL },
dynamic_tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
salt_compare,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
/**************************************************************
**************************************************************
**************************************************************
**************************************************************
* These are the md5 'primitive' functions that are used by
* the build-in expressions, and by the expression generator
* They load passwords, salts, user ids, do crypts, convert
* crypts into base-16, etc. They are pretty encompassing,
* and have been found to be able to do most anything with
* a standard 'base-16' md5 hash, salted or unsalted that
* fits a 'simple' php style expression.
**************************************************************
**************************************************************
**************************************************************
*************************************************************/
static void Dynamic_Load_itoa16_w2()
{
char buf[3];
unsigned int i;
for (i = 0; i < 256; ++i)
{
sprintf(buf, "%X%X", i>>4, i&0xF);
memcpy(&(itoa16_w2_u[i]), buf, 2);
sprintf(buf, "%x%x", i>>4, i&0xF);
memcpy(&(itoa16_w2_l[i]), buf, 2);
}
}
#ifdef SIMD_COEF_32
/**************************************************************
**************************************************************
* Here are some 'helpers' to our helpers, when it comes to
* loading data into the mmx/sse buffers. We have several
* of these common helper functions, and use them in 'most'
* of the helper primitives, instead of having the same
* code being inlined in each of them.
**************************************************************
*************************************************************/
static void __SSE_append_output_base16_to_input(ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #3
// 5955K (core2, $dynamic_2$)
// 1565K (core2, $dynamic_1006$)
// 3381K (ath64, $dynamic_2$)
// 824.7k (ath64, $dynamic_1006$)
#undef inc
#define inc ((SIMD_COEF_32-1) * 2)
unsigned short *IPBw = (unsigned short*)IPBdw;
IPBw += (idx_mod<<1);
CRY += (idx_mod<<2);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw = 0x80;
#undef inc
}
static void __SSE_overwrite_output_base16_to_input(ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #3
// 5955K (core2, $dynamic_2$)
// 1565K (core2, $dynamic_1006$)
// 3381K (ath64, $dynamic_2$)
// 824.7k (ath64, $dynamic_1006$)
#undef inc
#define inc ((SIMD_COEF_32-1) * 2)
unsigned short *IPBw = (unsigned short *)IPBdw;
IPBw += (idx_mod<<1);
CRY += (idx_mod<<2);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
#undef inc
}
static void __SSE_append_output_base16_to_input_semi_aligned_2(unsigned int ip, ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #1
// 9586k/4740k (core2, $dynamic_9$)
// 5113k/4382k (core2,$dynamic_10$)
// (ath64, $dynamic_9$)
// (ath64, $dynamic_10$)
# define inc SIMD_COEF_32
# define incCRY ((SIMD_COEF_32 - 1) * 4)
// Ok, here we are 1/2 off. We are starting in the 'middle' of a DWORD (and end
// in the middle of the last one).
// start our pointers out at the right 32 bit offset into the first MMX/SSE buffer
IPBdw += idx_mod;
IPBdw += (ip>>2)*SIMD_COEF_32;
CRY += (idx_mod<<2);
// first byte handled here.
*IPBdw &= 0xFFFF;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
// Add the 0x80 at the proper location (offset 0x21)
*IPBdw |= 0x800000;
#undef inc
#undef incCRY
}
static void __SSE_append_output_base16_to_input_semi_aligned_0(unsigned int ip, ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #2
// 6083k (core2, $dynamic_2$)
// 1590K (core2, $dynamic_1006$)
// 3537K (ath64, $dynamic_2$)
// 890.3K (ath64, $dynamic_1006$)
#undef inc
#define inc SIMD_COEF_32
#define incCRY (4*SIMD_COEF_32-2)
// start our pointers out at the right 32 bit offset into the first MMX/SSE buffer
IPBdw += idx_mod;
IPBdw += (ip>>2)*SIMD_COEF_32;
CRY += (idx_mod<<2);
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
// Add the 0x80 at the proper location (offset 0x21)
IPBdw += inc;
*IPBdw = 0x80;
#undef inc
#undef incCRY
}
static void __SSE_append_string_to_input_unicode(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80)
{
unsigned char *cpO;
#if ARCH_LITTLE_ENDIAN
// if big-endian, we gain nothing from this function (since we would have to byte swap)
if (len>1&&!(bf_ptr&1))
{
unsigned int w32_cnt;
if(bf_ptr&2) {
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
bf_ptr += 2;
*cpO = *cp++;
cpO[1] = 0;
--len;
}
w32_cnt = len>>1;
if (w32_cnt)
{
ARCH_WORD_32 *wpO;
wpO = (ARCH_WORD_32*)&IPB[GETPOS(bf_ptr, idx_mod)];
len -= (w32_cnt<<1);
bf_ptr += (w32_cnt<<2);
do
{
ARCH_WORD_32 x = 0;
x = cp[1];
x <<= 16;
x += cp[0];
*wpO = x;
cp += 2;
wpO += SIMD_COEF_32;
}
while (--w32_cnt);
}
}
#endif
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
*cpO++ = 0;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
}
if (bUpdate0x80)
*cpO = 0x80;
}
static void __SSE_append_string_to_input(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80)
{
unsigned char *cpO;
// if our insertion point is on an 'even' DWORD, then we use DWORD * copying, as long as we can
// This provides quite a nice speedup.
#if ARCH_LITTLE_ENDIAN
// if big-endian, we gain nothing from this function (since we would have to byte swap)
if (len>3&&(bf_ptr&3)) {
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0) {
if (!len) {
if (bUpdate0x80)
*cpO = 0x80;
return;
}
break;
}
}
}
if (len>3&&!(bf_ptr&3))
{
unsigned int w32_cnt = len>>2;
if (w32_cnt)
{
ARCH_WORD_32 *wpO;
wpO = (ARCH_WORD_32*)&IPB[GETPOS(bf_ptr, idx_mod)];
len -= (w32_cnt<<2);
bf_ptr += (w32_cnt<<2);
do
{
*wpO = *((ARCH_WORD_32*)cp);
cp += 4;
wpO += SIMD_COEF_32;
}
while (--w32_cnt);
}
if (!len) {
if (bUpdate0x80)
IPB[GETPOS(bf_ptr, idx_mod)] = 0x80;
return;
}
}
#endif
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
}
if (bUpdate0x80)
*cpO = 0x80;
}
#endif // #ifdef SIMD_COEF_32 from way above.
static inline void __append_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len)
{
unsigned int j;
unsigned int til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (!utf16) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += len;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1);
}
} else {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, 27, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += outlen;
// note we use the 'non' unicode variant, since we have already computed the unicode, and length properly
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
}
} else {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += len << 1;
__SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1);
}
}
}
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = (unsigned char*)utf16Str;
if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < outlen; ++z) {
*cp++ = *cpi++;
}
total_len_X86[j] += outlen;
}
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = Str;
if (total_len_X86[j] + (len<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < len; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len_X86[j] += (len<<1);
}
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), Str, len);
else
#endif
memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), Str, len);
total_len_X86[j] += len;
}
}
}
static inline void __append2_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len)
{
unsigned int j;
unsigned int til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (!utf16) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += len;
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1);
}
} else {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, 27, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += outlen;
// note we use the 'non' unicode variant of __SSE_append_string_to_input(), since it's already unicode, and length properly
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
}
} else {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += len << 1;
__SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1);
}
}
}
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = (unsigned char*)utf16Str;
if (total_len2_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < outlen; ++z) {
*cp++ = *cpi++;
}
total_len2_X86[j] += outlen;
}
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = Str;
if (total_len2_X86[j] + (len<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < len; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len2_X86[j] += (len<<1);
}
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), Str, len);
else
#endif
memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), Str, len);
total_len2_X86[j] += len;
}
}
}
void DynamicFunc__setmode_unicodeBE(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(2,tid);
}
void DynamicFunc__setmode_unicode(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(1,tid);
}
void DynamicFunc__setmode_normal (DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(0,tid);
}
/**************************************************************
* DYNAMIC primitive helper function
* Clears the input variable, and input 'lengths'
*************************************************************/
void DynamicFunc__clean_input(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input();
#else
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf[x].c, 0, sizeof(input_buf[0]));
memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
++x;
}
return;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2();
#else
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf2[x].c, 0, sizeof(input_buf2[0]));
memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
++x;
}
return;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input_full(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input_full();
#else
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf[x].c, 0, sizeof(input_buf[0]));
memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
++x;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2_full(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2_full();
#else
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf2[x].c, 0, sizeof(input_buf2[0]));
memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
++x;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input_kwik(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input_kwik();
#else
#ifdef SIMD_COEF_32
unsigned int i;
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y)
memset(total_len[x++], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
return;
}
#else
unsigned int i;
#endif
for (i = first; i < last; ++i) {
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, total_len_X86[i]+5);
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, total_len_X86[i]+5);
#endif
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2_kwik(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2_kwik();
#else
#ifdef SIMD_COEF_32
unsigned int i;
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y)
memset(total_len2[x++], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
return;
}
#else
unsigned int i;
#endif
for (i = first; i < last; ++i) {
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, total_len2_X86[i]+5);
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, total_len2_X86[i]+5);
#endif
total_len2_X86[i] = 0;
}
#endif
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends all keys to the end of the input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_keys(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
int maxlen=27;
if (curdat.pSetup->MaxInputLen < maxlen)
maxlen = curdat.pSetup->MaxInputLen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
total_len[idx][idx_mod] += outlen;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
} else {
total_len[idx][idx_mod] += (saved_key_len[j] << 1);
__SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi;
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
// only copy data if it will NOT trash the buffer
if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE)
{
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z)
*cp++ = *cpi++;
total_len_X86[j] += outlen;
}
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)saved_key[j];
if (total_len_X86[j] + (saved_key_len[j]<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < saved_key_len[j]; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len_X86[j] += (saved_key_len[j]<<1);
}
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], saved_key_len[j]);
else
#endif
memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], saved_key_len[j]);
total_len_X86[j] += saved_key_len[j];
}
}
}
// DynamicFunc__append_keys_pad16
// append the array of keys to the array input1[], padding with nulls to 16 bytes, if input shorter.
// Needed for net-md5 and net-sha1 formats.
void DynamicFunc__append_keys_pad16(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
if (saved_key_len[j] < 16) {
char buf[24];
strncpy(buf, saved_key[j], 18);
total_len[idx][idx_mod] += 16;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,16,bf_ptr,1);
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
for (; j < til; ++j) {
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
#if MD5_X2
if (j&1)
strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 17);
else
#endif
strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 17);
total_len_X86[j] += 16;
}
}
void DynamicFunc__append_keys_pad20(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
if (saved_key_len[j] < 20) {
char buf[28];
strncpy(buf, saved_key[j], 22);
total_len[idx][idx_mod] += 20;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,20,bf_ptr,1);
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
for (; j < til; ++j) {
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
#if MD5_X2
if (j&1)
strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 21);
else
#endif
strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 21);
total_len_X86[j] += 20;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends all keys to the end of the 2nd input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_keys2(DYNA_OMP_PARAMS)
{
unsigned int j, til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
int maxlen=27;
if (curdat.pSetup->MaxInputLen < maxlen)
maxlen = curdat.pSetup->MaxInputLen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
total_len2[idx][idx_mod] += outlen;
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
} else {
total_len2[idx][idx_mod] += (saved_key_len[j] << 1);
__SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
} else {
total_len2[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi;
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
// only copy data if it will NOT trash the buffer
if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z)
*cp++ = *cpi++;
total_len2_X86[j] += outlen;
}
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)saved_key[j];
if (total_len2_X86[j] + (saved_key_len[j]<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) {
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < saved_key_len[j]; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len2_X86[j] += (saved_key_len[j]<<1);
}
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), saved_key[j], saved_key_len[j]);
else
#endif
memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), saved_key[j], saved_key_len[j]);
total_len2_X86[j] += saved_key_len[j];
}
}
}
void DynamicFunc__set_input_len_16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 16, then remove existing end of buffer marker, and then set
// one at offset 16
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len[j][k];
if (this_item_len < 16)
input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 16;
}
}
return;
}
#endif
for (; j < til; ++j)
{
// TODO: this code MAY need buffer cleaned up if we are using md5_go code!!!
#if MD5_X2
if (j&1) {
while (total_len_X86[j] < 16)
input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0;
}
else
#endif
{while (total_len_X86[j] < 16)
input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;}
total_len_X86[j] = 16;
}
}
void DynamicFunc__set_input2_len_16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 16, then remove existing end of buffer marker, and then set
// one at offset 16
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len2[j][k];
if (this_item_len < 16)
input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf2[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 16;
}
}
return;
}
#endif
for (; j < til; ++j)
{
// TODO: this code MAY need buffer cleaned up if we are using md5_go code!!!
#if MD5_X2
if (j&1) {
while (total_len2_X86[j] < 16)
input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0;
}
else
#endif
{while (total_len2_X86[j] < 16)
input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;}
total_len2_X86[j] = 16;
}
}
void DynamicFunc__set_input_len_20(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 20, then remove existing end of buffer marker, and then set
// one at offset 20
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len[j][k];
if (this_item_len < 20)
input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 20;
}
}
return;
}
#endif
for (; j < til; ++j)
{
#if MD5_X2
if (j&1) {
while (total_len_X86[j] < 20)
input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0;
}
else
#endif
{while (total_len_X86[j] < 20)
input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;}
total_len_X86[j] = 20;
}
}
void DynamicFunc__set_input2_len_20(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 20, then remove existing end of buffer marker, and then set
// one at offset 20
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len2[j][k];
if (this_item_len < 20)
input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf2[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 20;
}
}
return;
}
#endif
for (; j < til; ++j)
{
#if MD5_X2
if (j&1) {
while (total_len2_X86[j] < 20)
input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0;
}
else
#endif
{while (total_len2_X86[j] < 20)
input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;}
total_len2_X86[j] = 20;
}
}
void DynamicFunc__set_input_len_32(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len_X86[j] = 32;
}
void DynamicFunc__set_input_len_32_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 32;
}
}
return;
}
#endif
for (; j < til; ++j) {
total_len_X86[j] = 32;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
//MD5_swap(input_buf_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8);
memset(&(input_buf_X86[j>>MD5_X2].x2.B2[32]), 0, 24);
}
else
#endif
{
//MD5_swap(input_buf_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8);
memset(&(input_buf_X86[j>>MD5_X2].x1.B[32]), 0, 24);
}
#endif
}
}
void DynamicFunc__set_input2_len_32(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len2_X86[j] = 32;
}
void DynamicFunc__set_input2_len_32_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf2[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 32;
}
}
return;
}
#endif
for (; j < til; ++j)
{
total_len2_X86[j] = 32;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
//MD5_swap(input_buf2_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8);
memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[32]), 0, 24);
}
else
#endif
{
//MD5_swap(input_buf2_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8);
memset(&(input_buf2_X86[j>>MD5_X2].x1.B[32]), 0, 24);
}
#endif
}
}
void DynamicFunc__set_input_len_40(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len_X86[j] = 40;
}
void DynamicFunc__set_input2_len_40(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len2_X86[j] = 40;
}
void DynamicFunc__set_input2_len_40_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf2[j].c[GETPOS(40, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 40;
}
}
return;
}
#endif
for (; j < til; ++j)
{
total_len2_X86[j] = 40;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[40]), 0, 16);
}
else
#endif
{
memset(&(input_buf2_X86[j>>MD5_X2].x1.B[40]), 0, 16);
}
#endif
}
}
void DynamicFunc__set_input_len_64(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_64 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 64;
}
void DynamicFunc__set_input2_len_64(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_64 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 64;
}
void DynamicFunc__set_input_len_100(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_100 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j) {
unsigned char *cp;
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
while (*cp)
*cp++ = 0;
total_len_X86[j] = 100;
}
}
void DynamicFunc__set_input_len_24(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_24 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 24;
}
void DynamicFunc__set_input_len_28(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_28 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 28;
}
void DynamicFunc__set_input_len_48(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_48 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 48;
}
void DynamicFunc__set_input_len_56(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_56 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 56;
}
void DynamicFunc__set_input_len_80(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_80 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 80;
}
void DynamicFunc__set_input_len_96(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_96 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 96;
}
void DynamicFunc__set_input_len_112(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_112 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 112;
}
void DynamicFunc__set_input_len_128(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_128 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 128;
}
void DynamicFunc__set_input_len_160(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_160 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 160;
}
void DynamicFunc__set_input_len_192(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_192 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 192;
}
void DynamicFunc__set_input_len_256(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_256 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 256;
}
void DynamicFunc__set_input2_len_24(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_24 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 24;
}
void DynamicFunc__set_input2_len_28(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_28 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 28;
}
void DynamicFunc__set_input2_len_48(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_48 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 48;
}
void DynamicFunc__set_input2_len_56(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_56 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 56;
}
void DynamicFunc__set_input2_len_80(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_80 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 80;
}
void DynamicFunc__set_input2_len_96(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_96 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 96;
}
void DynamicFunc__set_input2_len_112(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_112 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 112;
}
void DynamicFunc__set_input2_len_128(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_128 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 128;
}
void DynamicFunc__set_input2_len_160(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_160 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 160;
}
void DynamicFunc__set_input2_len_192(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_192 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 192;
}
void DynamicFunc__set_input2_len_256(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_256 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 256;
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends the salt to the end of the input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_salt(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm cursalt, saltlen);
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends the salt to the end of the 2nd input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_salt2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm cursalt, saltlen);
}
void DynamicFunc__append_input_from_input2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len[i][j];
unsigned int len1 = total_len2[i][j];
for (k = 0; k < len1; ++k)
input_buf[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)];
input_buf[i].c[GETPOS((len1+start_len), j)] = 0x80;
total_len[i][j] += len1;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]);
else
#endif
memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]);
total_len_X86[i] += total_len2_X86[i];
}
}
void DynamicFunc__append_input2_from_input(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len2[i][j];
unsigned int len1 = total_len[i][j];
for (k = 0; k < len1; ++k)
input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)];
input_buf2[i].c[GETPOS((len1+start_len), j)] = 0x80;
total_len2[i][j] += len1;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]);
else
#endif
memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]);
total_len2_X86[i] += total_len_X86[i];
}
}
void DynamicFunc__append_input_from_input(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len[i][j];
for (k = 0; k < start_len; ++k)
input_buf[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)];
input_buf[i].c[GETPOS((start_len+start_len), j)] = 0x80;
total_len[i][j] += start_len;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]);
else
#endif
memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]);
total_len_X86[i] <<= 1;
}
}
void DynamicFunc__append_input2_from_input2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len2[i][j];
for (k = 0; k < start_len; ++k)
input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)];
input_buf2[i].c[GETPOS((start_len+start_len), j)] = 0x80;
total_len2[i][j] += start_len;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]);
else
#endif
memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]);
total_len2_X86[i] <<= 1;
}
}
#ifdef SIMD_PARA_MD5
static void SSE_Intrinsics_LoadLens_md5(int side, int i)
{
ARCH_WORD_32 *p;
unsigned int j, k;
if (side == 0)
{
for (j = 0; j < SIMD_PARA_MD5; j++)
{
p = input_buf[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3;
}
}
else
{
for (j = 0; j < SIMD_PARA_MD5; j++)
{
p = input_buf2[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3;
}
}
}
#endif
#ifdef SIMD_PARA_MD4
static void SSE_Intrinsics_LoadLens_md4(int side, int i)
{
ARCH_WORD_32 *p;
unsigned int j, k;
if (side == 0)
{
for (j = 0; j < SIMD_PARA_MD4; j++)
{
p = input_buf[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3;
}
}
else
{
for (j = 0; j < SIMD_PARA_MD4; j++)
{
p = input_buf2[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3;
}
}
}
#endif
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the first input field. The data is
* still in the binary encrypted format, in the crypt_key.
* we do not yet convert to base-16. This is so we can output
* as base-16, or later, if we add base-64, we can output to
* that format instead.
*************************************************************/
void DynamicFunc__crypt_md5(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD5) {
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(0, i);
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD4) {
SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(0, i);
SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__POCrypt(DYNA_OMP_PARAMS)
{
unsigned int i, j;
unsigned int til, len;
unsigned char *pBuf;
#if MD5_X2
unsigned char *pBuf2;
unsigned int lens[2];
#endif
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
//DynamicFunc__clean_input_kwik();
//DynamicFunc__append_salt,
//DynamicFunc__append_input1_from_CONST1,
//DynamicFunc__append_keys,
//DynamicFunc__append_input1_from_CONST2,
//DynamicFunc__append_salt,
//DynamicFunc__crypt_md5,
pBuf = input_buf_X86[i>>MD5_X2].x1.B;
#if MD5_X2
pBuf2 = input_buf_X86[i>>MD5_X2].x2.B2;
memset(pBuf2, 0, sizeof(input_buf_X86[i>>MD5_X2].x2.B2));
memcpy(pBuf2, cursalt, 32);
pBuf2[32] = 'Y';
#endif
memset(pBuf, 0, sizeof(input_buf_X86[i>>MD5_X2].x1.b));
memcpy(pBuf, cursalt, 32);
pBuf[32] = 'Y';
for (j = i; j < til; ++j) {
len = saved_key_len[j];
memcpy(&pBuf[33], saved_key[j], len);
pBuf[33+len] = 0xf7;
memcpy(&pBuf[34+len], cursalt, 32);
#if MD5_X2
lens[0] = len+66; // len from the 'first'
++j;
if (j < m_count) {
len = saved_key_len[j];
memcpy(&pBuf2[33], saved_key[j], len);
pBuf2[33+len] = 0xf7;
memcpy(&pBuf2[34+len], cursalt, 32);
lens[1] = len+66;
} else {
lens[1] = 0;
}
DoMD5(input_buf_X86[i>>MD5_X2], lens, crypt_key_X86[j>>MD5_X2]);
#else
DoMD5(input_buf_X86[i>>MD5_X2], (len+66), crypt_key_X86[j]);
#endif
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 2nd input field into crypt_keys2.
*************************************************************/
void DynamicFunc__crypt2_md5(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(1, i);
SIMDmd5body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i < m_count)
len[1] = total_len2_X86[i];
else
len[1] = 0;
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt2_md4(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(1, i);
SIMDmd4body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 1st input field crypt_keys2.
*************************************************************/
void DynamicFunc__crypt_md5_in1_to_out2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD5) {
SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(0, i);
SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4_in1_to_out2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD4) {
SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(0, i);
SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 2nd input field into crypt_keys.
*************************************************************/
void DynamicFunc__crypt_md5_in2_to_out1(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
SSE_Intrinsics_LoadLens_md5(1, i);
SIMDmd5body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
//dump_stuff_mmx_msg("DynamicFunc__crypt_md5_in2_to_out1", input_buf2[i].c,64,m_count-1);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4_in2_to_out1(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD4)
{
SSE_Intrinsics_LoadLens_md4(1, i);
SIMDmd4body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md5_to_input_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j, k;
SSE_Intrinsics_LoadLens_md5(0, i);
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
{
memset(input_buf[i+j].c, 0, sizeof(input_buf[0]));
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
for (k = 0; k < SIMD_COEF_32; k++)
total_len[i+j][k] = 16;
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i];
total_len_X86[i++] = 0x10;
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
total_len_X86[i] = 0x10;
}
}
void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j;
SSE_Intrinsics_LoadLens_md5(0, i);
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j;
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
// we call DoMD5o so as to 'not' change then length (it was already set)
DoMD5o(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
}
}
void DynamicFunc__overwrite_salt_to_input1_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
__SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0);
}
} else {
for (; j < til; ++j)
__SSE_append_string_to_input_unicode(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0);
}
return;
}
for (; j < til; ++j)
__SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0);
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = input_buf_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[j>>MD5_X2].x1.B;
for (z = 0; z < outlen; ++z)
*cp++ = *cpi++;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)cursalt;
#if MD5_X2
if (j&1)
cp = input_buf_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[j>>MD5_X2].x1.B;
for (z = 0; z < saltlen; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
}
}
return;
}
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(input_buf_X86[j>>MD5_X2].x2.b2, cursalt, saltlen);
else
#endif
memcpy(input_buf_X86[j>>MD5_X2].x1.b, cursalt, saltlen);
}
}
void DynamicFunc__overwrite_salt_to_input2_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
int utf16 = md5_unicode_convert_get(tid);
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
__SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0);
}
} else {
for (; j < til; ++j)
__SSE_append_string_to_input_unicode(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0);
}
return;
}
for (; j < til; ++j)
__SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0);
return;
}
#endif
if (utf16) {
if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) {
UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1];
int outlen;
if (utf16 == 1)
outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
else
outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = input_buf2_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf2_X86[j>>MD5_X2].x1.B;
for (z = 0; z < outlen; ++z)
*cp++ = *cpi++;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)cursalt;
#if MD5_X2
if (j&1)
cp = input_buf2_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf2_X86[j>>MD5_X2].x1.B;
for (z = 0; z < saltlen; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
}
}
return;
}
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(input_buf2_X86[j>>MD5_X2].x2.b2, cursalt, saltlen);
else
#endif
memcpy(input_buf2_X86[j>>MD5_X2].x1.b, cursalt, saltlen);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input1 from the output2 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
idx = ( ((unsigned int)j)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
unsigned int i;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input1 from the output1 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
idx = ( ((unsigned int)j)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
unsigned int i;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys (the encrypted
* 'first' key variable), and use a base-16 text formatting, and
* append this to the first input buffer (adjusting the lengths)
*************************************************************/
void DynamicFunc__append_from_last_output_as_base16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
unsigned int ip;
idx = ( ((unsigned int)j)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][j & (SIMD_COEF_32 - 1)];
total_len[idx][j & (SIMD_COEF_32 - 1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
unsigned int k;
for (k = 0; k < 16; ++k)
{
unsigned char v = crypt_key[idx].c[GETPOS(k, j&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+(k<<1), j&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf[idx].c[GETPOS(ip+(k<<1)+1, j&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf[idx].c[GETPOS(ip+32, j&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cp, *cpi;
unsigned int i;
#if MD5_X2
if (j&1)
{cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x1.B; }
for (i = 0; i < 16; ++i)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len_X86[j] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys2 (the encrypted
* 'second' key variable), and base-16 appends to the 2nd input
*************************************************************/
void DynamicFunc__append_from_last_output2_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
unsigned int ip, j;
idx = ( ((unsigned int)i)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][i&(SIMD_COEF_32-1)];
total_len2[idx][i&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (j = 0; j < 16; ++j)
{
unsigned char v = crypt_key2[idx].c[GETPOS(j, i&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+(j<<1), i&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf2[idx].c[GETPOS(ip+(j<<1)+1, i&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf2[idx].c[GETPOS(ip+32, i&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len2_X86[i] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input2 from the output1 data using base-16
* an optimization, if the same thing is done over and over
* again, such as md5(md5(md5(md5($p)))) There, we would only
* call the copy and set length once, then simply call copy.
*************************************************************/
void DynamicFunc__overwrite_from_last_output_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j = i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
void DynamicFunc__overwrite_from_last_output2_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j = i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf2_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf2_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input2 from the output2 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j=i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys1 (the encrypted
* 'first' key variable), and base-16 appends to the 2nd input
*************************************************************/
void DynamicFunc__append_from_last_output_to_input2_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
total_len2[idx][index&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (i = 0; i < 16; ++i)
{
unsigned char v = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf2[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf2[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cpi = crypt_key_X86[i>>MD5_X2].x2.B2; cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); }
else
#endif
{cpi = crypt_key_X86[i>>MD5_X2].x1.B; cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]);}
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len2_X86[i] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys2 (the encrypted
* 'second' key variable), and base-16 appends to the 1st input
*************************************************************/
void DynamicFunc__append_from_last_output2_to_input1_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
total_len[idx][index&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (i = 0; i < 16; ++i)
{
unsigned char v = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len_X86[i] += 32;
}
}
void DynamicFunc__append_from_last_output2_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf[idx].w;
ARCH_WORD_32 *pi = crypt_key2[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len_X86[i] += 16;
}
}
void DynamicFunc__append2_from_last_output2_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf2[idx].w;
ARCH_WORD_32 *pi = crypt_key2[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len2[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len2_X86[i] += 16;
}
}
void DynamicFunc__append_from_last_output1_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index, idx;
for (index = i; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf[idx].w;
ARCH_WORD_32 *pi = crypt_key[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len_X86[i] += 16;
}
}
void DynamicFunc__append2_from_last_output1_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index, idx;
for (index = i; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf2[idx].w;
ARCH_WORD_32 *pi = crypt_key[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len2[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len2_X86[i] += 16;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Append salt #2 into input 1
*************************************************************/
void DynamicFunc__append_2nd_salt(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append salt #2 into input 2
*************************************************************/
void DynamicFunc__append_2nd_salt2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append UserID into input 1
*************************************************************/
void DynamicFunc__append_userid(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm username, usernamelen);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append UserID into input 2
*************************************************************/
void DynamicFunc__append_userid2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm username, usernamelen);
}
void DynamicFunc__append_input1_from_CONST1(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]);
}
void DynamicFunc__append_input1_from_CONST2(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]);
}
void DynamicFunc__append_input1_from_CONST3(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]);
}
void DynamicFunc__append_input1_from_CONST4(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]);
}
void DynamicFunc__append_input1_from_CONST5(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]);
}
void DynamicFunc__append_input1_from_CONST6(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]);
}
void DynamicFunc__append_input1_from_CONST7(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]);
}
void DynamicFunc__append_input1_from_CONST8(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]);
}
void DynamicFunc__append_input2_from_CONST1(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]);
}
void DynamicFunc__append_input2_from_CONST2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]);
}
void DynamicFunc__append_input2_from_CONST3(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]);
}
void DynamicFunc__append_input2_from_CONST4(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]);
}
void DynamicFunc__append_input2_from_CONST5(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]);
}
void DynamicFunc__append_input2_from_CONST6(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]);
}
void DynamicFunc__append_input2_from_CONST7(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]);
}
void DynamicFunc__append_input2_from_CONST8(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]);
}
void DynamicFunc__append_fld0(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]);
}
void DynamicFunc__append_fld1(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]);
}
void DynamicFunc__append_fld2(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]);
}
void DynamicFunc__append_fld3(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]);
}
void DynamicFunc__append_fld4(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]);
}
void DynamicFunc__append_fld5(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]);
}
void DynamicFunc__append_fld6(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]);
}
void DynamicFunc__append_fld7(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]);
}
void DynamicFunc__append_fld8(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]);
}
void DynamicFunc__append_fld9(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]);
}
void DynamicFunc__append2_fld0(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]);
}
void DynamicFunc__append2_fld1(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]);
}
void DynamicFunc__append2_fld2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]);
}
void DynamicFunc__append2_fld3(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]);
}
void DynamicFunc__append2_fld4(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]);
}
void DynamicFunc__append2_fld5(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]);
}
void DynamicFunc__append2_fld6(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]);
}
void DynamicFunc__append2_fld7(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]);
}
void DynamicFunc__append2_fld8(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]);
}
void DynamicFunc__append2_fld9(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]);
}
void DynamicFunc__SSEtoX86_switch_input1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx, max;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = input_buf_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = input_buf_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = input_buf_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = input_buf[idx].w;
max = total_len_X86[j] = (total_len[idx][0]);
for (i = 1; i < SIMD_COEF_32; i++)
if (max < (total_len_X86[j+i] = total_len[idx][j]))
max = total_len_X86[j+i];
max = (max+3)>>2;
for (k = 0; k < max; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
input_buf_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0;
input_buf_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
input_buf_X86[j+i].x1.b[total_len_X86[j+i]] = 0;
#endif
}
#endif
}
void DynamicFunc__SSEtoX86_switch_input2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx, max;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = input_buf2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = input_buf2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = input_buf2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = input_buf2[idx].w;
max = total_len2_X86[j] = (total_len2[idx][0]);
for (i = 1; i < SIMD_COEF_32; i++)
if (max < (total_len2_X86[j+i] = total_len2[idx][i]))
max = total_len2_X86[j+i];
max = (max+3)>>2;
for (k = 0; k < max; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
// get rid of the 0x80
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
input_buf2_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0;
input_buf2_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
input_buf2_X86[j+i].x1.b[total_len2_X86[j+i]] = 0;
#endif
}
#endif
}
void DynamicFunc__SSEtoX86_switch_output1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if MD5_X2
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key_X86[j+i].x1.w;
#endif
idx = j/SIMD_COEF_32;
cpi = (void*)crypt_key[idx].c;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
}
#endif
}
void DynamicFunc__SSEtoX86_switch_output2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = crypt_key2[idx].w;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
}
#endif
}
void DynamicFunc__X86toSSE_switch_input1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int j, idx, idx_mod;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
__nonMP_DynamicFunc__clean_input();
for (j = 0; j < m_count; ++j) {
idx = j/SIMD_COEF_32;
idx_mod = j&(SIMD_COEF_32-1);
total_len[idx][idx_mod] += total_len_X86[j];
#if (MD5_X2)
if (j & 1)
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>1].x2.B2,total_len_X86[j],0,1);
else
#endif
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>MD5_X2].x1.B,total_len_X86[j],0,1);
}
#endif
}
void DynamicFunc__X86toSSE_switch_input2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int j, idx, idx_mod;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
__nonMP_DynamicFunc__clean_input2();
for (j = 0; j < m_count; ++j) {
idx = j/SIMD_COEF_32;
idx_mod = j&(SIMD_COEF_32-1);
total_len2[idx][idx_mod] += total_len2_X86[j];
#if (MD5_X2)
if (j & 1)
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>1].x2.B2,total_len2_X86[j],0,1);
else
#endif
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>MD5_X2].x1.B,total_len2_X86[j],0,1);
}
#endif
}
void DynamicFunc__X86toSSE_switch_output1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = (void*)crypt_key[idx].c;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpi++ = *cpo[i]++;
}
}
#endif
}
void DynamicFunc__X86toSSE_switch_output2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = crypt_key2[idx].w;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpi++ = *cpo[i]++;
}
}
#endif
}
// This function, simply 'switches' back to SSE It does NOT copy any data from X86 to SSE
void DynamicFunc__ToSSE(DYNA_OMP_PARAMS)
{
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
}
// This function, simply 'switches' to X86 It does NOT copy any data from SSE to X86
void DynamicFunc__ToX86(DYNA_OMP_PARAMS)
{
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
}
void DynamicFunc__base16_convert_locase(DYNA_OMP_PARAMS)
{
dynamic_itoa16 = itoa16;
itoa16_w2=itoa16_w2_l;
}
void DynamicFunc__base16_convert_upcase(DYNA_OMP_PARAMS)
{
dynamic_itoa16 = itoa16u;
itoa16_w2=itoa16_w2_u;
}
/**************************************************************
* DEPRICATED functions. These are the older pseudo functions
* which we now have flags for. We keep them, so that we can
* add the proper flags, even if the user is running an older
* script.
*************************************************************/
void DynamicFunc__InitialLoadKeysToInput(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32(DYNA_OMP_PARAMS) {}
/**************************************************************
**************************************************************
**************************************************************
**************************************************************
* DYNAMIC primitive helper function
* This is the END of the primitives.
**************************************************************
**************************************************************
**************************************************************
*************************************************************/
static DYNAMIC_primitive_funcp *ConvertFuncs(DYNAMIC_primitive_funcp p, unsigned int *count)
{
static DYNAMIC_primitive_funcp fncs[20];
*count = 0;
if (p==DynamicFunc__InitialLoadKeysToInput ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
return fncs; // ignore these
#ifndef SIMD_COEF_32
if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 ||
p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 ||
p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 ||
p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 ||
p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86)
return fncs; // we ignore these functions 100% in x86 mode.
#endif
// if (p==DynamicFunc__append_input2_from_CONST1) {
// fncs[0] = DynamicFunc__set_input2;
// fncs[1] = DynamicFunc__set_CONST1;
// fncs[2] = DynamicFunc__append_CONST;
// *count = 3;
// }
/* LOOK INTO THIS!!!!! This may not be valid, now that SHA1 is handled 100% outside of the SSE2 code.
But I am not sure just WTF this is supposed to do anyway, since not LE should be using CTX only??? */
#if !ARCH_LITTLE_ENDIAN
if (/*p==DynamicFunc__SHA1_crypt_input1_append_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_append_input2 ||
/*p==DynamicFunc__SHA1_crypt_input2_append_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_append_input1 ||
/*p==DynamicFunc__SHA1_crypt_input1_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input1 ||
/*p==DynamicFunc__SHA1_crypt_input2_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input2 ||
/*p==DynamicFunc__SHA1_crypt_input1_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input2 ||
/*p==DynamicFunc__SHA1_crypt_input2_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA1_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA1_crypt_input2_to_output1_FINAL)
curdat.force_md5_ctx = 0;
#endif
*count = 1;
fncs[0] = p;
return fncs;
}
#ifdef _OPENMP
static int isBadOMPFunc(DYNAMIC_primitive_funcp p)
{
// If ANY of these functions are seen, we can NOT use OMP for this single format.
#if SIMD_COEF_32
if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 ||
p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 ||
p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 ||
p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 ||
p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86)
return 1;
#endif
if (p==DynamicFunc__base16_convert_locase || p==DynamicFunc__base16_convert_upcase)
return 1;
return 0;
}
#endif
#define RETURN_TRUE_IF_BIG_FUNC(H) if(p==DynamicFunc__##H##_crypt_input1_append_input2 || \
p==DynamicFunc__##H##_crypt_input2_append_input1 || \
p==DynamicFunc__##H##_crypt_input1_overwrite_input1 || \
p==DynamicFunc__##H##_crypt_input2_overwrite_input2 || \
p==DynamicFunc__##H##_crypt_input1_overwrite_input2 || \
p==DynamicFunc__##H##_crypt_input2_overwrite_input1 || \
p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL || \
p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL) \
return 1
static int isMD4Func(DYNAMIC_primitive_funcp p)
{
// handle flats
RETURN_TRUE_IF_BIG_FUNC(MD4);
// handle older mmx_coef variants
if (p==DynamicFunc__crypt_md4 || p==DynamicFunc__crypt_md4_in1_to_out2 ||
p==DynamicFunc__crypt2_md4 || p==DynamicFunc__crypt_md4_in2_to_out1)
return 1;
return 0;
}
#ifdef _OPENMP
// Only used in OMP code, to compute LCM granularity. So we #ifdef it out to avoid compiler warnings.
#ifdef SIMD_COEF_32
// otherwise unused
static int isMD5Func(DYNAMIC_primitive_funcp p)
{
// handle flats
RETURN_TRUE_IF_BIG_FUNC(MD5);
// handle older mmx_coef variants
if (p==DynamicFunc__crypt_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 ||
p==DynamicFunc__crypt_md5_in1_to_out2 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 ||
p==DynamicFunc__crypt_md5_to_input_raw || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen ||
p==DynamicFunc__crypt_md5_in2_to_out1 || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE ||
p==DynamicFunc__crypt2_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
return 1;
return 0;
}
#endif
#endif
static int isSHA1Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA1);
return 0;
}
static int isSHA2_256Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA224); RETURN_TRUE_IF_BIG_FUNC(SHA256);
return 0;
}
static int isSHA2_512Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA384); RETURN_TRUE_IF_BIG_FUNC(SHA512);
return 0;
}
static int isGOSTFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(GOST);
return 0;
}
static int isTigerFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(Tiger);
return 0;
}
static int isWHIRLFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(WHIRLPOOL);
return 0;
}
static int isRIPEMDFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(RIPEMD128); RETURN_TRUE_IF_BIG_FUNC(RIPEMD160);
RETURN_TRUE_IF_BIG_FUNC(RIPEMD256); RETURN_TRUE_IF_BIG_FUNC(RIPEMD320);
return 0;
}
static int isHAVALFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(HAVAL128_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL160_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL192_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL224_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL256_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_5);
return 0;
}
static int isMD2Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(MD2);
return 0;
}
static int isPANAMAFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(PANAMA);
return 0;
}
static int isSKEINFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SKEIN224); RETURN_TRUE_IF_BIG_FUNC(SKEIN256);
RETURN_TRUE_IF_BIG_FUNC(SKEIN384); RETURN_TRUE_IF_BIG_FUNC(SKEIN512);
return 0;
}
static int isKECCAKFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA3_224); RETURN_TRUE_IF_BIG_FUNC(SHA3_256); RETURN_TRUE_IF_BIG_FUNC(SHA3_384);
RETURN_TRUE_IF_BIG_FUNC(SHA3_512); RETURN_TRUE_IF_BIG_FUNC(KECCAK_256); RETURN_TRUE_IF_BIG_FUNC(KECCAK_512);
return 0;
}
// LARGE_HASH_EDIT_POINT (Add a new IsXXXFunc() type function)
static int isLargeHashFinalFunc(DYNAMIC_primitive_funcp p)
{
#undef IF
#define IF(H) p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL||p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL
if (IF(SHA1)||IF(SHA224)||IF(SHA256)||IF(SHA384)||IF(SHA512)||IF(GOST)||IF(WHIRLPOOL)||IF(Tiger)||IF(RIPEMD128)||
IF(RIPEMD160)||IF(RIPEMD256)||IF(RIPEMD320)||
IF(HAVAL128_3)||IF(HAVAL128_4)||IF(HAVAL128_5)||IF(HAVAL160_3)||IF(HAVAL160_4)||IF(HAVAL160_5)||
IF(HAVAL192_3)||IF(HAVAL192_4)||IF(HAVAL192_5)||IF(HAVAL224_3)||IF(HAVAL224_4)||IF(HAVAL224_5)||
IF(HAVAL256_3)||IF(HAVAL256_4)||IF(HAVAL256_5)||IF(MD2)||IF(PANAMA)||IF(SKEIN224)||IF(SKEIN256)||
IF(SKEIN384)||IF(SKEIN512)||IF(SHA3_224)||IF(SHA3_256)||IF(SHA3_384)||IF(SHA3_512)||
IF(KECCAK_256)||IF(KECCAK_512))
// LARGE_HASH_EDIT_POINT
return 1;
return 0;
}
#ifdef _OPENMP
#ifdef SIMD_COEF_32
// Simple euclid algorithm for GCD
static int GCD (int a, int b)
{
while (b) {
int t = b;
b = a % b;
a = t;
}
return a;
}
// simple algorithm for LCM is (a*b)/GCD(a,b)
static int LCM(int a, int b)
{
a/=GCD(a,b);
return a*b;
}
#endif
static void dyna_setupOMP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt)
{
unsigned int i;
#ifndef SIMD_COEF_32
curdat.omp_granularity=OMP_INC;
#else
if ((curdat.pSetup->flags& MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe)
curdat.omp_granularity=OMP_INC;
else {
curdat.omp_granularity = 1;
for (i=0; Setup->pFuncs[i]; ++i) {
if (isMD5Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD5*SIMD_COEF_32);
else if (isMD4Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD4*SIMD_COEF_32);
else if (isSHA1Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA1*SIMD_COEF_32);
else if (isSHA2_256Func(Setup->pFuncs[i]))
#if SIMD_COEF_32
#if SIMD_PARA_SHA256
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA256*SIMD_COEF_32);
#else
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_32);
#endif
#else
curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC);
#endif
else if (isSHA2_512Func(Setup->pFuncs[i]))
#if SIMD_COEF_64
#if SIMD_PARA_SHA512
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA512*SIMD_COEF_64);
#else
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_64);
#endif
#else
curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC);
#endif
}
}
#endif
for (i=0; Setup->pFuncs[i]; ++i) {
if (isBadOMPFunc(Setup->pFuncs[i]))
pFmt->params.flags &= (~(FMT_OMP|FMT_OMP_BAD));
}
if ((pFmt->params.flags&FMT_OMP)==FMT_OMP && (curdat.pSetup->startFlags&MGF_POOR_OMP)==MGF_POOR_OMP)
pFmt->params.flags |= FMT_OMP_BAD;
}
#endif
int dynamic_SETUP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt)
{
unsigned int i, j, cnt, cnt2, x;
DYNAMIC_primitive_funcp *pFuncs;
if (Setup->flags & MGF_ColonNOTValid)
{
extern struct options_main options;
if (options.loader.field_sep_char == ':')
{
return 0;
}
}
// Deal with depricated 1st functions. Convert them to proper 'flags'
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeysToInput)
Setup->startFlags |= MGF_KEYS_INPUT;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2)
Setup->startFlags |= MGF_KEYS_CRYPT_IN2;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1)
Setup->startFlags |= MGF_KEYS_BASE16_IN1;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
Setup->startFlags |= MGF_KEYS_BASE16_IN1_Offset32;
curdat.dynamic_40_byte_input = ((Setup->startFlags&MGF_INPUT_20_BYTE)==MGF_INPUT_20_BYTE) ? 1 : 0;
curdat.dynamic_48_byte_input = ((Setup->startFlags&MGF_INPUT_24_BYTE)==MGF_INPUT_24_BYTE) ? 1 : 0;
curdat.dynamic_64_byte_input = ((Setup->startFlags&MGF_INPUT_32_BYTE)==MGF_INPUT_32_BYTE) ? 1 : 0;
curdat.dynamic_56_byte_input = ((Setup->startFlags&MGF_INPUT_28_BYTE)==MGF_INPUT_28_BYTE) ? 1 : 0;
curdat.dynamic_80_byte_input = ((Setup->startFlags&MGF_INPUT_40_BYTE)==MGF_INPUT_40_BYTE) ? 1 : 0;
curdat.dynamic_96_byte_input = ((Setup->startFlags&MGF_INPUT_48_BYTE)==MGF_INPUT_48_BYTE) ? 1 : 0;
curdat.dynamic_128_byte_input= ((Setup->startFlags&MGF_INPUT_64_BYTE)==MGF_INPUT_64_BYTE) ? 1 : 0;
curdat.FldMask = 0;
curdat.b2Salts = ((Setup->flags&MGF_SALTED2)==MGF_SALTED2) ? 1 : 0;
curdat.dynamic_base16_upcase = ((Setup->flags&MGF_BASE_16_OUTPUT_UPCASE)==MGF_BASE_16_OUTPUT_UPCASE) ? 1 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD0)==MGF_FLD0) ? MGF_FLD0 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD1)==MGF_FLD1) ? MGF_FLD1 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD2)==MGF_FLD2) ? MGF_FLD2 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD3)==MGF_FLD3) ? MGF_FLD3 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD4)==MGF_FLD4) ? MGF_FLD4 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD5)==MGF_FLD5) ? MGF_FLD5 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD6)==MGF_FLD6) ? MGF_FLD6 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD7)==MGF_FLD7) ? MGF_FLD7 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD8)==MGF_FLD8) ? MGF_FLD8 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD9)==MGF_FLD9) ? MGF_FLD9 : 0;
curdat.dynamic_base64_inout = 0;
curdat.dynamic_salt_as_hex = 0;
curdat.dynamic_salt_as_hex_format_type = 0;
curdat.force_md5_ctx = 0;
curdat.nUserName = 0;
curdat.nPassCase = 1;
curdat.md5_startup_in_x86 = curdat.dynamic_use_sse = 0; // if 0, then never use SSE2
curdat.init = 0;
curdat.pSetup = Setup;
pFmt->methods.binary = get_binary;
pFmt->methods.cmp_all=cmp_all;
pFmt->methods.cmp_one=cmp_one;
pFmt->methods.source=fmt_default_source;
pFmt->methods.salt = get_salt;
pFmt->methods.done = done;
pFmt->methods.set_salt = set_salt;
pFmt->methods.salt_hash = salt_hash;
//pFmt->params.format_name = str_alloc_copy(Setup->szFORMAT_NAME);
pFmt->params.format_name = "";
pFmt->params.benchmark_length = 0; // NOTE 0 'assumes' salted. If unsalted, we set back to -1
pFmt->params.salt_size = 0;
curdat.using_flat_buffers_sse2_ok = 0; // used to distingish MGF_NOTSSE2Safe from MGF_FLAT_BUFFERS
if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS)
curdat.using_flat_buffers_sse2_ok = 1;
#ifdef SIMD_COEF_32
curdat.dynamic_use_sse = 1; // if 1, then we are in SSE2 mode (but can switch out)
if ((Setup->flags & MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe) {
curdat.dynamic_use_sse = 0; // Do not use SSE code at all.
} else if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS) {
curdat.dynamic_use_sse = 0; // uses flat buffers but will use SSE code (large formats use the flat buffers, and the SSE2 code 'mixes' them).
curdat.using_flat_buffers_sse2_ok = 1;
} else if ((Setup->flags & MGF_StartInX86Mode) == MGF_StartInX86Mode) {
curdat.dynamic_use_sse = 2; // if 2, then we are in SSE2 mode, but currently using X86 (and can switch back to SSE2).
curdat.md5_startup_in_x86 = 1;
}
if (curdat.dynamic_use_sse || curdat.using_flat_buffers_sse2_ok) {
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT;
pFmt->params.algorithm_name = ALGORITHM_NAME;
} else {
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86;
pFmt->params.algorithm_name = ALGORITHM_NAME_X86;
}
#else
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86;
pFmt->params.algorithm_name = ALGORITHM_NAME_X86;
#endif
pFmt->params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
dynamic_use_sse = curdat.dynamic_use_sse;
// Ok, set the new 'constants' data
memset(curdat.Consts, 0, sizeof(curdat.Consts));
memset(curdat.ConstsLen, 0, sizeof(curdat.ConstsLen));
for (curdat.nConsts = 0; curdat.nConsts < 8; ++curdat.nConsts)
{
if (Setup->pConstants[curdat.nConsts].Const == NULL)
break;
//curdat.Consts[curdat.nConsts] = (unsigned char*)str_alloc_copy(Setup->pConstants[curdat.nConsts].Const);
//curdat.ConstsLen[curdat.nConsts] = strlen(Setup->pConstants[curdat.nConsts].Const);
// we really do not 'have' to null terminate, but do just to be on the 'safe' side.
curdat.Consts[curdat.nConsts] = mem_alloc_tiny(Setup->pConstants[curdat.nConsts].len+1, MEM_ALIGN_NONE);
memcpy(curdat.Consts[curdat.nConsts], Setup->pConstants[curdat.nConsts].Const, Setup->pConstants[curdat.nConsts].len);
curdat.Consts[curdat.nConsts][Setup->pConstants[curdat.nConsts].len] = 0;
curdat.ConstsLen[curdat.nConsts] = Setup->pConstants[curdat.nConsts].len;
}
if ( (Setup->flags & MGF_INPBASE64) == MGF_INPBASE64)
{
curdat.dynamic_base64_inout = 1;
pFmt->methods.binary = binary_b64;
}
if ( (Setup->flags & MGF_INPBASE64m) == MGF_INPBASE64m)
{
curdat.dynamic_base64_inout = 3;
pFmt->methods.binary = binary_b64m;
}
if ( (Setup->flags & MGF_INPBASE64b) == MGF_INPBASE64b)
{
curdat.dynamic_base64_inout = 5;
pFmt->methods.binary = binary_b64b;
}
if ( (Setup->flags & MGF_INPBASE64_4x6) == MGF_INPBASE64_4x6)
{
curdat.dynamic_base64_inout = 2;
pFmt->methods.binary = binary_b64_4x6;
pFmt->methods.cmp_all = cmp_all_64_4x6;
pFmt->methods.cmp_one = cmp_one_64_4x6;
#if !ARCH_LITTLE_ENDIAN
pFmt->methods.binary_hash[0] = binary_hash_0_64x4;
pFmt->methods.binary_hash[1] = binary_hash_1_64x4;
pFmt->methods.binary_hash[2] = binary_hash_2_64x4;
pFmt->methods.binary_hash[3] = binary_hash_3_64x4;
pFmt->methods.binary_hash[4] = binary_hash_4_64x4;
pFmt->methods.binary_hash[5] = binary_hash_5_64x4;
pFmt->methods.get_hash[0] = get_hash_0_64x4;
pFmt->methods.get_hash[1] = get_hash_1_64x4;
pFmt->methods.get_hash[2] = get_hash_2_64x4;
pFmt->methods.get_hash[3] = get_hash_3_64x4;
pFmt->methods.get_hash[4] = get_hash_4_64x4;
pFmt->methods.get_hash[5] = get_hash_5_64x4;
#endif
// Not enough bits in a single WORD to do the 7th one.
pFmt->methods.binary_hash[6] = NULL;
pFmt->methods.get_hash[6] = NULL;
}
// printf ("%.13s",Setup->szFORMAT_NAME);
if ( (Setup->flags & (MGF_INPBASE64|MGF_INPBASE64_4x6|MGF_INPBASE64a|MGF_INPBASE64m|MGF_INPBASE64b)) == 0) {
pFmt->params.flags |= FMT_SPLIT_UNIFIES_CASE;
// printf (" Setting FMT_SPLIT_UNIFIES_CASE");
if (pFmt->methods.split == split) {
pFmt->methods.split = split_UC;
// printf (" split set to split_UC()\n");
}
}
// else printf (" split set to split()\n");
if (Setup->flags & MGF_UTF8)
pFmt->params.flags |= FMT_UTF8;
if (Setup->flags & MGF_INPBASE64a) {
curdat.dynamic_base64_inout = 1;
pFmt->methods.binary = binary_b64a;
}
if ( (Setup->flags & MGF_USERNAME) == MGF_USERNAME)
curdat.nUserName = 1;
if ( (Setup->flags & MGF_USERNAME_UPCASE) == MGF_USERNAME_UPCASE)
curdat.nUserName = 2;
if ( (Setup->flags & MGF_USERNAME_LOCASE) == MGF_USERNAME_LOCASE)
curdat.nUserName = 3;
// Ok, what 'flag' in the format struct, do we clear???
if ( (Setup->flags & MGF_PASSWORD_UPCASE) == MGF_PASSWORD_UPCASE) {
curdat.nPassCase = 2;
pFmt->params.flags &= (~FMT_CASE);
}
if ( (Setup->flags & MGF_PASSWORD_LOCASE) == MGF_PASSWORD_LOCASE) {
curdat.nPassCase = 3;
pFmt->params.flags &= (~FMT_CASE);
}
if ( (Setup->flags & MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX) {
curdat.dynamic_salt_as_hex = 1;
curdat.dynamic_salt_as_hex_format_type = Setup->flags >> 56;
}
if ( (Setup->flags & MGF_SALT_AS_HEX_TO_SALT2) == MGF_SALT_AS_HEX_TO_SALT2) {
curdat.dynamic_salt_as_hex = 2;
if (curdat.b2Salts)
return !fprintf(stderr, "Error invalid format %s: MGF_SALT_AS_HEX_TO_SALT2 and MGF_SALTED2 are not valid to use in same format\n", Setup->szFORMAT_NAME);
curdat.b2Salts = 2;
}
if ( (Setup->flags & MGF_SALT_UNICODE_B4_CRYPT) == MGF_SALT_UNICODE_B4_CRYPT && curdat.dynamic_salt_as_hex)
curdat.dynamic_salt_as_hex |= 0x100;
if ( (Setup->flags & MGF_SALTED) == 0)
{
curdat.dynamic_FIXED_SALT_SIZE = 0;
pFmt->params.benchmark_length = -1;
pFmt->params.salt_size = 0;
}
else
{
pFmt->params.salt_size = sizeof(void *);
if (Setup->SaltLen > 0)
curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen;
else
{
// says we have a salt, but NOT a fixed sized one that we 'know' about.
// if the SaltLen is -1, then there is NO constraints. If the SaltLen
// is -12 (or any other neg number other than -1), then there is no
// fixed salt length, but the 'max' salt size is -SaltLen. So, -12
// means any salt from 1 to 12 is 'valid'.
if (Setup->SaltLen > -2)
curdat.dynamic_FIXED_SALT_SIZE = -1;
else {
curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen;
#if !defined (SIMD_COEF_32)
// for non-sse, we limit ourselves to 110 bytes, not 55. So, we can add 55 to this value
curdat.dynamic_FIXED_SALT_SIZE -= 55;
#endif
}
}
}
if (Setup->MaxInputLen)
pFmt->params.plaintext_length = Setup->MaxInputLen;
else {
if ( ((Setup->flags&MGF_FLAT_BUFFERS)==MGF_FLAT_BUFFERS) || ((Setup->flags&MGF_NOTSSE2Safe)==MGF_NOTSSE2Safe)) {
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 32)
pFmt->params.plaintext_length = 32;
} else {
pFmt->params.plaintext_length = 55 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 1) {
pFmt->params.plaintext_length = 1;
fprintf(stderr, "\nError, for format %s, MMX build, is not valid due to TOO long of a SaltLength\n", Setup->szFORMAT_NAME);
}
}
}
#ifndef SIMD_COEF_32
if (Setup->MaxInputLenX86) {
pFmt->params.plaintext_length = Setup->MaxInputLenX86;
} else {
if (Setup->SaltLenX86)
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLenX86);
else
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 32)
pFmt->params.plaintext_length = 32;
}
#endif
curdat.store_keys_in_input = !!(Setup->startFlags&MGF_KEYS_INPUT );
curdat.input2_set_len32 = !!(Setup->startFlags&MGF_SET_INP2LEN32);
if (Setup->startFlags&MGF_SOURCE) {
if (Setup->startFlags&MGF_INPUT_20_BYTE) pFmt->methods.source = source_20_hex;
else if (Setup->startFlags&MGF_INPUT_28_BYTE) pFmt->methods.source = source_28_hex;
else if (Setup->startFlags&MGF_INPUT_32_BYTE) pFmt->methods.source = source_32_hex;
else if (Setup->startFlags&MGF_INPUT_40_BYTE) pFmt->methods.source = source_40_hex;
else if (Setup->startFlags&MGF_INPUT_48_BYTE) pFmt->methods.source = source_48_hex;
else if (Setup->startFlags&MGF_INPUT_64_BYTE) pFmt->methods.source = source_64_hex;
else pFmt->methods.source = source;
}
if (!curdat.store_keys_in_input && Setup->startFlags&MGF_KEYS_INPUT_BE_SAFE)
curdat.store_keys_in_input = 3;
curdat.store_keys_in_input_unicode_convert = !!(Setup->startFlags&MGF_KEYS_UNICODE_B4_CRYPT);
if (curdat.store_keys_in_input_unicode_convert && curdat.store_keys_in_input)
return !fprintf(stderr, "Error invalid format %s: Using MGF_KEYS_INPUT and MGF_KEYS_UNICODE_B4_CRYPT in same format is NOT valid\n", Setup->szFORMAT_NAME);
curdat.store_keys_normal_but_precompute_hash_to_output2 = !!(Setup->startFlags&MGF_KEYS_CRYPT_IN2);
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = !!(Setup->startFlags&MGF_KEYS_BASE16_IN1);
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1)
curdat.store_keys_normal_but_precompute_hash_to_output2 = 1;
#define IF_CDOFF32(F,L) if (!curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) \
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = \
(!!((Setup->startFlags&MGF_KEYS_BASE16_IN1_Offset_TYPE)==MGF_KEYS_BASE16_IN1_Offset_ ## F))*L
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = 0;
IF_CDOFF32(MD5,32); IF_CDOFF32(MD4,32); IF_CDOFF32(SHA1,40); IF_CDOFF32(SHA224,56);
IF_CDOFF32(SHA256,64); IF_CDOFF32(SHA384,96); IF_CDOFF32(SHA512,128); IF_CDOFF32(GOST,64);
IF_CDOFF32(WHIRLPOOL,128); IF_CDOFF32(Tiger,48); IF_CDOFF32(RIPEMD128,32); IF_CDOFF32(RIPEMD160,40);
IF_CDOFF32(RIPEMD256,64); IF_CDOFF32(RIPEMD320,80); IF_CDOFF32(MD2,32); IF_CDOFF32(PANAMA,64);
IF_CDOFF32(HAVAL128_3,32); IF_CDOFF32(HAVAL160_3,40); IF_CDOFF32(HAVAL192_3,48); IF_CDOFF32(HAVAL224_3,56); IF_CDOFF32(HAVAL256_3,64);
IF_CDOFF32(HAVAL128_4,32); IF_CDOFF32(HAVAL160_4,40); IF_CDOFF32(HAVAL192_4,48); IF_CDOFF32(HAVAL224_4,56); IF_CDOFF32(HAVAL256_4,64);
IF_CDOFF32(HAVAL128_5,32); IF_CDOFF32(HAVAL160_5,40); IF_CDOFF32(HAVAL192_5,48); IF_CDOFF32(HAVAL224_5,56); IF_CDOFF32(HAVAL256_5,64);
IF_CDOFF32(SKEIN224,56); IF_CDOFF32(SKEIN256,64); IF_CDOFF32(SKEIN384,96); IF_CDOFF32(SKEIN512,128);
IF_CDOFF32(SHA3_224,56); IF_CDOFF32(SHA3_256,64); IF_CDOFF32(SHA3_384,96); IF_CDOFF32(SHA3_512,128);
IF_CDOFF32(KECCAK_256,64); IF_CDOFF32(KECCAK_512,128);
// LARGE_HASH_EDIT_POINT
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX)
{
curdat.store_keys_normal_but_precompute_hash_to_output2 = 1;
}
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type = Setup->startFlags>>56;
if ((Setup->startFlags) == 0)
{
// Ok, if we do not have some 'special' loader function, we MUST first clean some
// input. If that is not done, there is NO WAY this is a valid format. This is
// NOT an intelligent check, but more like the dummy lights on newer automobiles.
// You know it will not work, but do not know 'why', nor should you care.
if (Setup->pFuncs[0] != DynamicFunc__clean_input &&
Setup->pFuncs[0] != DynamicFunc__clean_input2 &&
Setup->pFuncs[0] != DynamicFunc__clean_input_kwik &&
Setup->pFuncs[0] != DynamicFunc__clean_input2_kwik &&
Setup->pFuncs[0] != DynamicFunc__clean_input_full)
return !fprintf(stderr, "Error invalid format %s: The first command MUST be a clean of input 1 or input 2 OR a special key 2 input loader function\n", Setup->szFORMAT_NAME);
}
if ( (Setup->flags&MGF_SALTED2)==MGF_SALTED2 && (Setup->flags&MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX)
{
// if the user wants salt_as_hex, then here can NOT be 2 salts.
return !fprintf(stderr, "Error invalid format %s: If using MGF_SALT_AS_HEX flag, then you can NOT have a 2nd salt.\n", Setup->szFORMAT_NAME);
}
if (Setup->pFuncs && Setup->pFuncs[0])
{
unsigned int z;
for (z = 0; Setup->pFuncs[z]; ++z)
;
z += 50;
curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD);
j = 0;
#if !ARCH_LITTLE_ENDIAN
// for bigendian, we do NOT store into keys, since we byte swap them.
if (curdat.store_keys_in_input==1) {
// this is only a minor speed hit, so simply fix by doing this. There is an
// extra memcpy, that is it.
curdat.store_keys_in_input = 0;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys;
}
// NOTE NOTE NOTE, FIXME. These are 'hacks' which slow stuff way down. We should look at
// building preloads that CAN do this. Store key input to input 1, but then do not use
// input 1. Put a copy to input 2, then append, etc. In that way, we cut the number of
// MD5's down by at least 1.
//
// But for now, just get it working. Get it working faster later.
// NOTE, these are commented out now. I am not sure why they were there
// I think the thought was for SIMD, BUT SIMD is not used on Sparc
// I am leaving this code for now, BUT I think it should NOT be here.
// I was getting failures on the 16 byte sph formats, for any
// hash(hash($p).$s) such as md2(md2($p).$s) However, the modifications
// where curdat.store_keys_in_input==1 is absolutely needed, or we have
// get_key() failures all over the place.
// note, with Setup->pFuncs[0]==DynamicFunc__set_input_len_32, we only will handle type 6 and 7
// for now we have this 'turned' off. It is fixed for type 6, 7 and 14. It is left on for the
// john.ini stuff. Thus, if someone builds the intel version type 6, it will work (but slower).
// if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==1 && Setup->pFuncs[0]==DynamicFunc__set_input_len_32) {
// curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = 0;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__crypt_md5;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
// Setup->pFuncs[0] = DynamicFunc__append_from_last_output_as_base16;
// }
#endif
for (i=0; Setup->pFuncs[i]; ++i)
{
if (j > z-10)
{
unsigned int k;
z += 100;
curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD);
for (k = 0; k <= j; ++k)
curdat.dynamic_FUNCTIONS[k] = curdat.dynamic_FUNCTIONS[k];
}
if (curdat.store_keys_in_input)
{
if (Setup->pFuncs[i] == DynamicFunc__append_keys)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_keys2)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys2 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__clean_input)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but clean_input called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_salt)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_salt called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output2_to_input1_as_base16)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output2_to_input1_as_base16 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output2_to_input1_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output_as_base16)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output_as_base16s called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_2nd_salt called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__set_input_len_32)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__set_input_len_64)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_salt_to_input1_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_input_from_input2)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
}
// Ok if copy constants are set, make SURE we have that many constants.
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST1 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST1) && curdat.nConsts == 0)
return !fprintf(stderr, "Error invalid format %s: Append Constant function called, but NO constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST2 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST2) && curdat.nConsts < 2)
return !fprintf(stderr, "Error invalid format %s: Append Constant #2 function called, but NO constants, or less than 2 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST3 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST3) && curdat.nConsts < 3)
return !fprintf(stderr, "Error invalid format %s: Append Constant #3 function called, but NO constants, or less than 3 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST4 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST4) && curdat.nConsts < 4)
return !fprintf(stderr, "Error invalid format %s: Append Constant #4 function called, but NO constants, or less than 4 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST5 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST5) && curdat.nConsts < 5)
return !fprintf(stderr, "Error invalid format %s: Append Constant #5 function called, but NO constants, or less than 5 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST6 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST6) && curdat.nConsts < 6)
return !fprintf(stderr, "Error invalid format %s: Append Constant #6 function called, but NO constants, or less than 6 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST7 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST7) && curdat.nConsts < 7)
return !fprintf(stderr, "Error invalid format %s: Append Constant #7 function called, but NO constants, or less than 7 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST8 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST8) && curdat.nConsts < 8)
return !fprintf(stderr, "Error invalid format %s: Append Constant #8 function called, but NO constants, or less than 8 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt || Setup->pFuncs[i] == DynamicFunc__append_2nd_salt2) && curdat.b2Salts == 0)
return !fprintf(stderr, "Error invalid format %s: A call to one of the 'salt-2' functions, but this format does not have MFG_SALT2 flag set\n", Setup->szFORMAT_NAME);
// Ok, if we have made it here, the function is 'currently' still valid. Load this pointer into our array of pointers.
pFuncs = ConvertFuncs(Setup->pFuncs[i], &cnt2);
#define IS_FUNC_NAME(H,N) if(is##H##Func(pFuncs[x])){ if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME)) pFmt->params.algorithm_name = ALGORITHM_NAME_##N; \
else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86)) pFmt->params.algorithm_name = ALGORITHM_NAME_X86_##N; }
for (x = 0; x < cnt2; ++x) {
curdat.dynamic_FUNCTIONS[j++] = pFuncs[x];
if (pFuncs[x] == DynamicFunc__setmode_unicode || pFuncs[x] == DynamicFunc__setmode_unicodeBE)
pFmt->params.flags |= FMT_UNICODE;
IS_FUNC_NAME(SHA1,S)
if (isSHA2_256Func(pFuncs[x])) {
#ifdef SIMD_COEF_32
if (curdat.using_flat_buffers_sse2_ok)
pFmt->params.algorithm_name = ALGORITHM_NAME_S2_256;
else
#endif
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_256;
}
if (isSHA2_512Func(pFuncs[x])) {
#ifdef SIMD_COEF_64
if (curdat.using_flat_buffers_sse2_ok)
pFmt->params.algorithm_name = ALGORITHM_NAME_S2_512;
else
#endif
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_512;
}
IS_FUNC_NAME(MD4,4)
IS_FUNC_NAME(WHIRL,WP2)
IS_FUNC_NAME(GOST,GST2)
IS_FUNC_NAME(Tiger,TGR)
IS_FUNC_NAME(RIPEMD,RIPEMD)
IS_FUNC_NAME(HAVAL,HAVAL)
IS_FUNC_NAME(MD2,MD2)
IS_FUNC_NAME(PANAMA,PANAMA)
IS_FUNC_NAME(SKEIN,SKEIN)
// Note, until we add SIMD keccak, one algoithm is all we 'need'
IS_FUNC_NAME(KECCAK,KECCAK)
// IS_FUNC_NAME(KECCAK,SHA3_256)
// IS_FUNC_NAME(KECCAK,SHA3_384)
// IS_FUNC_NAME(KECCAK,SHA3_512)
// IS_FUNC_NAME(KECCAK,KECCAK_256)
// IS_FUNC_NAME(KECCAK,KECCAK_512)
// LARGE_HASH_EDIT_POINT (MUST match the just added a new IsXXXFunc() type function)
}
if (isLargeHashFinalFunc(curdat.dynamic_FUNCTIONS[j-1]))
{
if (Setup->pFuncs[i+1])
return !fprintf(stderr, "Error invalid format %s: DynamicFunc__LARGE_HASH_crypt_inputX_to_output1_FINAL, can ONLY be used as the last function in a script\n", Setup->szFORMAT_NAME);
}
}
curdat.dynamic_FUNCTIONS[j] = NULL;
}
if (!Setup->pPreloads || Setup->pPreloads[0].ciphertext == NULL)
{
return !fprintf(stderr, "Error invalid format %s: Error, no validation hash(s) for this format\n", Setup->szFORMAT_NAME);
}
cnt = 0;
#ifdef _OPENMP
dyna_setupOMP(Setup, pFmt);
#endif
{
struct fmt_tests *pfx = mem_alloc_tiny(ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests), MEM_ALIGN_WORD);
memset(pfx, 0, ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests));
for (i = 0; cnt < ARRAY_COUNT(dynamic_tests) -1; ++i)
{
if (Setup->pPreloads[i].ciphertext == NULL) {
i = 0;
}
if (Setup->pPreloads[i].ciphertext[0] == 'A' && Setup->pPreloads[i].ciphertext[1] == '=') {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1)
continue;
pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]);
}
else if (Setup->pPreloads[i].ciphertext[0] == 'U' && Setup->pPreloads[i].ciphertext[1] == '=') {
if (options.target_enc != UTF_8)
continue;
pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]);
}
else
pfx[cnt].ciphertext = str_alloc_copy(Setup->pPreloads[i].ciphertext);
pfx[cnt].plaintext = str_alloc_copy(Setup->pPreloads[i].plaintext);
pfx[cnt].fields[0] = Setup->pPreloads[i].fields[0] ? str_alloc_copy(Setup->pPreloads[i].fields[0]) : "";
pfx[cnt].fields[1] = pfx[cnt].ciphertext;
for (j = 2; j < 10; ++j)
pfx[cnt].fields[j] = Setup->pPreloads[i].fields[j] ? str_alloc_copy(Setup->pPreloads[i].fields[j]) : "";
++cnt;
}
pfx[cnt].ciphertext = NULL;
pfx[cnt].plaintext = NULL;
pFmt->params.tests = pfx;
}
if (curdat.dynamic_base16_upcase)
dynamic_itoa16 = itoa16u;
else
dynamic_itoa16 = itoa16;
{
char s[512], *cp;
cp = Setup->szFORMAT_NAME;
cp = strchr(Setup->szFORMAT_NAME, ' ');
++cp;
sprintf(s, "%s %s", cp, pFmt->params.algorithm_name);
pFmt->params.algorithm_name = str_alloc_copy(s);
}
if ((Setup->flags & MGF_SALTED) && !Setup->SaltLen)
return !fprintf(stderr, "Error invalid format %s\n\tIt is required to add SaltLen= to the script, for this format\n", Setup->szFORMAT_NAME);
return 1;
}
static int LoadOneFormat(int idx, struct fmt_main *pFmt)
{
extern struct options_main options;
char label[16] = { 0 }, label_id[16] = { 0 }, *cp = NULL;
memcpy(pFmt, &fmt_Dynamic, sizeof(struct fmt_main));
// TODO:
// NOTE, this was commented out, because the late binding @dynamic=expr@
// hashes were killing out possibly pre-setup input buffers. NOTE, that
// things worked fine after this, all self tests do pass, and I am 99%
// sure that all of this 'required' cleaning happens in init(). but I am
// putting this comment in here, so that if at a later time, there are
// problems and are tracked down to this, we will know why.
// dynamic_RESET(pFmt);
// Ok we need to list this as a dynamic format (even for the 'thin' formats)
pFmt->params.flags |= FMT_DYNAMIC;
if (idx < 1000) {
if (dynamic_RESERVED_PRELOAD_SETUP(idx, pFmt) != 1)
return 0;
}
else {
if (dynamic_LOAD_PARSER_FUNCTIONS(idx, pFmt) != 1)
return 0;
}
/* we 'have' to take the sig from the test array. If we do not have */
/* our preload array 'solid', then the idx will not be the proper */
/* number. So we simply grab the label from the test cyphertext string */
strncpy(label, pFmt->params.tests[0].ciphertext, 15);
cp = strchr(&label[1], '$');
if (NULL != cp) cp[1] = 0;
strcpy(label_id, &label[1]);
cp = strchr(label_id, '$');
if (NULL != cp) *cp = 0;
// if (!options.format || strncmp(options.format, "dynamic_", 8))
// pFmt->params.label = str_alloc_copy("dynamic");
// else
pFmt->params.label = str_alloc_copy(label_id);
strcpy(curdat.dynamic_WHICH_TYPE_SIG, label);
curdat.dynamic_HASH_OFFSET = strlen(label);
if (curdat.dynamic_base64_inout == 1 || curdat.dynamic_base64_inout == 3) {
// we have to compute 'proper' offset
const char *cp = pFmt->params.tests[0].ciphertext;
size_t len = base64_valid_length(&cp[curdat.dynamic_HASH_OFFSET], curdat.dynamic_base64_inout == 1 ? e_b64_crypt : e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0);
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + len + 1;
}
else if (curdat.dynamic_base64_inout == 2)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 16 + 1;
else if (curdat.dynamic_40_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 40 + 1;
else if (curdat.dynamic_48_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 48 + 1;
else if (curdat.dynamic_64_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 64 + 1;
else if (curdat.dynamic_56_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 56 + 1;
else if (curdat.dynamic_80_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 80 + 1;
else if (curdat.dynamic_96_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 96 + 1;
else if (curdat.dynamic_128_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 128 + 1;
else
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 32 + 1;
pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD);
memcpy(pFmt->private.data, &curdat, sizeof(private_subformat_data));
if (strncmp(curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext, strlen(curdat.dynamic_WHICH_TYPE_SIG)))
{
fprintf(stderr, "ERROR, when loading dynamic formats, the wrong curdat item was linked to this type:\nTYPE_SIG=%s\nTest_Dat=%s\n",
curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext);
return 0;
}
return 1;
}
struct fmt_main *dynamic_Register_local_format(int *type) {
int num=nLocalFmts++;
private_subformat_data keep;
if (!pLocalFmts)
pLocalFmts = mem_calloc_tiny(1000*sizeof(struct fmt_main), 16);
/* since these are loaded LATE in the process, init() has been called
* and we HAVE to preserve the already loaded setup. This will happen
* if we run a crack, but do not specify a specific dyna format
*/
memcpy(&keep, &curdat, sizeof(private_subformat_data));
LoadOneFormat(num+6000, &(pLocalFmts[num]));
memcpy(&curdat, &keep, sizeof(private_subformat_data));
dynamic_use_sse = curdat.dynamic_use_sse;
force_md5_ctx = curdat.force_md5_ctx;
*type = num+6000;
return &(pLocalFmts[num]);
}
int dynamic_Register_formats(struct fmt_main **ptr)
{
int count, i, idx, single=-1, wildcard = 0, pop[5000];
extern struct options_main options;
if (options.format && strstr(options.format, "*"))
wildcard = 1;
Dynamic_Load_itoa16_w2();
if (!wildcard && options.format &&
!strncmp(options.format, "dynamic_", 8))
sscanf(options.format, "dynamic_%d", &single);
if (options.format && options.subformat && !strcmp(options.format, "dynamic") && !strncmp(options.subformat, "dynamic_", 8))
sscanf(options.subformat, "dynamic_%d", &single);
if (options.dynamic_bare_hashes_always_valid == 'Y')
dynamic_allow_rawhash_fixup = 1;
else if (options.dynamic_bare_hashes_always_valid != 'N' && cfg_get_bool(SECTION_OPTIONS, NULL, "DynamicAlwaysUseBareHashes", 1))
dynamic_allow_rawhash_fixup = 1;
if (single != -1) {
// user wanted only a 'specific' format. Simply load that one.
dynamic_allow_rawhash_fixup = 1;
if (dynamic_IS_VALID(single, 1) == 0)
return 0;
pFmts = mem_alloc_tiny(sizeof(pFmts[0]), MEM_ALIGN_WORD);
if (!LoadOneFormat(single, pFmts))
return 0;
*ptr = pFmts;
return (nFmts = 1);
}
for (count = i = 0; i < 5000; ++i) {
if ((pop[i] = (dynamic_IS_VALID(i, 0) == 1)))
++count;
}
// Ok, now we know how many formats we have. Load them
pFmts = mem_alloc_tiny(sizeof(pFmts[0])*count, MEM_ALIGN_WORD);
for (idx = i = 0; i < 5000; ++i) {
if (pop[i]) {
if (LoadOneFormat(i, &pFmts[idx]) == 0)
--count;
else
++idx;
}
}
*ptr = pFmts;
return (nFmts = count);
}
/*
* finds the 'proper' sub format from the allocated formats, IFF that format 'exists'
*/
static struct fmt_main *dynamic_Get_fmt_main(int which)
{
char label[40];
int i;
sprintf(label, "$dynamic_%d$", which);
for (i = 0; i < nFmts; ++i) {
private_subformat_data *pPriv = pFmts[i].private.data;
if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label))
return &pFmts[i];
}
for (i = 0; i < nLocalFmts; ++i) {
private_subformat_data *pPriv = pLocalFmts[i].private.data;
if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label))
return &pLocalFmts[i];
}
return NULL;
}
/*
* This function will 'forget' which md5-gen subtype we are working with. It will allow
* a different type to be used. Very useful for things like -test (benchmarking).
*/
static void dynamic_RESET(struct fmt_main *fmt)
{
memset(&curdat, 0, sizeof(curdat));
m_count = 0;
keys_dirty = 0;
cursalt=cursalt2=username=0;
saltlen=saltlen2=usernamelen=0;
// make 'sure' we startout with blank inputs.
m_count = 0;
#ifdef SIMD_COEF_32
if (input_buf) {
#else
if (input_buf_X86) {
#endif
__nonMP_DynamicFunc__clean_input_full();
__nonMP_DynamicFunc__clean_input2_full();
}
}
/*
* This will LINK our functions into some other fmt_main struction. That way
* that struction can use our code. The other *_fmt.c file will need to
* 'override' the valid, the binary and the salt functions, and make changes
* to the hash, BEFORE calling into the dynamic valid/binary/salt functions.
* Other than those functions (and calling into this linkage function at init time)
* that is about all that needs to be in that 'other' *_fmt.c file, as long as the
* format is part of the md5-generic 'class' of functions.
*/
struct fmt_main *dynamic_THIN_FORMAT_LINK(struct fmt_main *pFmt, char *ciphertext, char *orig_sig, int bInitAlso)
{
int i, valid, nFmtNum;
struct fmt_main *pFmtLocal;
static char subformat[17], *cp;
dynamic_allow_rawhash_fixup = 0;
strncpy(subformat, ciphertext, 16);
subformat[16] = 0;
cp = strchr(&subformat[9], '$');
if (cp)
cp[1] = 0;
nFmtNum = -1;
sscanf(subformat, "$dynamic_%d", &nFmtNum);
if (nFmtNum == -1)
error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext);
pFmtLocal = dynamic_Get_fmt_main(nFmtNum);
if (pFmtLocal == NULL)
error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext);
valid = pFmtLocal->methods.valid(ciphertext, pFmtLocal);
if (!valid)
error_msg("Error, trying to link to %s using ciphertext=%s FAILED\n", subformat, ciphertext);
pFmt->params.algorithm_name = pFmtLocal->params.algorithm_name;
if (pFmt->params.plaintext_length == 0 ||
pFmt->params.plaintext_length > pFmtLocal->params.plaintext_length) {
pFmt->params.plaintext_length = pFmtLocal->params.plaintext_length;
pFmt->params.plaintext_min_length = pFmtLocal->params.plaintext_min_length;
}
pFmt->params.max_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt;
pFmt->params.min_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
pFmt->params.flags = pFmtLocal->params.flags;
if (pFmtLocal->params.salt_size)
pFmt->params.salt_size = sizeof(void*);
else
pFmt->params.salt_size = 0;
pFmt->methods.cmp_all = pFmtLocal->methods.cmp_all;
pFmt->methods.cmp_one = pFmtLocal->methods.cmp_one;
pFmt->methods.cmp_exact = pFmtLocal->methods.cmp_exact;
for (i = 0; i < FMT_TUNABLE_COSTS; ++i) {
pFmt->methods.tunable_cost_value[i] = pFmtLocal->methods.tunable_cost_value[i];
pFmt->params.tunable_cost_name[i] = pFmtLocal->params.tunable_cost_name[i];
}
pFmt->methods.source = pFmtLocal->methods.source;
pFmt->methods.set_salt = pFmtLocal->methods.set_salt;
pFmt->methods.salt = pFmtLocal->methods.salt;
pFmt->methods.done = pFmtLocal->methods.done;
pFmt->methods.salt_hash = pFmtLocal->methods.salt_hash;
pFmt->methods.split = pFmtLocal->methods.split;
pFmt->methods.set_key = pFmtLocal->methods.set_key;
pFmt->methods.get_key = pFmtLocal->methods.get_key;
pFmt->methods.clear_keys = pFmtLocal->methods.clear_keys;
pFmt->methods.crypt_all = pFmtLocal->methods.crypt_all;
pFmt->methods.prepare = pFmtLocal->methods.prepare;
pFmt->methods.salt_compare = pFmtLocal->methods.salt_compare;
for (i = 0; i < PASSWORD_HASH_SIZES; ++i)
{
pFmt->methods.binary_hash[i] = pFmtLocal->methods.binary_hash[i];
pFmt->methods.get_hash[i] = pFmtLocal->methods.get_hash[i];
}
if (bInitAlso)
{
//fprintf(stderr, "dynamic_THIN_FORMAT_LINK() calling init(%s)\n", subformat);
init(pFmtLocal);
}
pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD);
memcpy(pFmt->private.data, pFmtLocal->private.data, sizeof(private_subformat_data));
return pFmtLocal;
}
// We ONLY deal with hex hashes at this time. Is we later have to deal with
// base-64, this will become harder. Before this function we had bugs where
// many things were loaded as 'being' valid, even if not.
static int looks_like_raw_hash(char *ciphertext, private_subformat_data *pPriv)
{
int i, cipherTextLen = CIPHERTEXT_LENGTH;
if (pPriv->dynamic_40_byte_input) {
cipherTextLen = 40;
} else if (pPriv->dynamic_48_byte_input) {
cipherTextLen = 48;
} else if (pPriv->dynamic_64_byte_input) {
cipherTextLen = 64;
} else if (pPriv->dynamic_56_byte_input) {
cipherTextLen = 56;
} else if (pPriv->dynamic_80_byte_input) {
cipherTextLen = 80;
} else if (pPriv->dynamic_96_byte_input) {
cipherTextLen = 96;
} else if (pPriv->dynamic_128_byte_input) {
cipherTextLen = 128;
}
for (i = 0; i < cipherTextLen; i++) {
if (atoi16[ARCH_INDEX(ciphertext[i])] == 0x7f)
return 0;
}
if ((pPriv->pSetup->flags&MGF_SALTED) == 0) {
if (!ciphertext[cipherTextLen])
return 1;
return 0;
}
return ciphertext[cipherTextLen] == '$';
}
static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv)
{
if (!ciphertext || *ciphertext == 0 || *ciphertext == '*')
return ciphertext;
if (dynamic_allow_rawhash_fixup && strncmp(ciphertext, "$dynamic_", 9) && looks_like_raw_hash(ciphertext, pPriv))
{
static char __ciphertext[512+24];
if (pPriv->pSetup->flags & MGF_SALTED) {
if (!strchr(ciphertext, '$'))
return ciphertext;
}
if ( (pPriv->pSetup->flags & MGF_SALTED2) == MGF_SALTED2) {
if (!strstr(ciphertext, "$$2"))
return ciphertext;
}
if ( (pPriv->pSetup->flags & MGF_USERNAME) == MGF_USERNAME) {
if (!strstr(ciphertext, "$$U"))
return ciphertext;
}
if (pPriv->FldMask) {
int i;
for (i = 0; i < 10; ++i) {
if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) {
char Fld[5];
sprintf(Fld, "$$F%d", i);
if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld))
return ciphertext;
}
}
}
strcpy(__ciphertext, pPriv->dynamic_WHICH_TYPE_SIG);
strnzcpy(&__ciphertext[strlen(__ciphertext)], ciphertext, 512);
return __ciphertext;
}
return ciphertext;
}
int text_in_dynamic_format_already(struct fmt_main *pFmt, char *ciphertext)
{
private_subformat_data *pPriv;
if (!pFmt) return 0;
/* NOTE, it 'is' possible to get called here, without the private stuff being setup
properly (in valid, etc). So, we simply grab the static private stuff each time */
pPriv = pFmt->private.data;
if (!ciphertext || !pPriv) return 0;
return !strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG));
}
// if caseType == 1, return cp
// if caseType == 2, return upcase(cp)
// if caseType == 3, return locase(cp)
// if caseType == 4, return upcaseFirstChar(locase(cp))
static char *HandleCase(char *cp, int caseType)
{
static UTF8 dest[256];
switch(caseType) {
case 1:
return cp;
case 2:
enc_uc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp));
if (!strcmp((char*)dest, cp))
return cp;
break;
case 3:
case 4:
enc_lc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp));
if (caseType == 4)
dest[0] = low2up_ansi(dest[0]);
if (!strcmp((char*)dest, cp))
return cp;
break;
default:
return cp;
}
return (char*)dest;
}
int dynamic_real_salt_length(struct fmt_main *pFmt)
{
if (pFmt->params.flags & FMT_DYNAMIC) {
private_subformat_data *pPriv = pFmt->private.data;
if (pPriv == NULL || pPriv->pSetup == NULL)
return -1; // not a dynamic format, or called before we have loaded them!!
return abs(pPriv->pSetup->SaltLen);
}
// NOT a dynamic format
return -1;
}
#else
#warning Notice: Dynamic format disabled from build.
#endif /* DYNAMIC_DISABLED */
|
udr-3.c
|
/* { dg-do compile } */
/* { dg-options "-fopenmp -std=gnu89" } */
struct S { int s; };
struct T { int t; };
struct U { int u; };
#pragma omp declare reduction (+: struct S: omp_out.s += omp_in.s)
#pragma omp declare reduction (*: struct S: omp_out.s *= omp_in.s) \
initializer (omp_priv = {1})
#pragma omp declare reduction (foo: struct S: omp_out.s += omp_in.s)
void
f1 ()
{
struct S s, s2;
struct T t;
#pragma omp declare reduction (+: struct T: omp_out.t += omp_in.t)
#pragma omp parallel reduction (+: t) reduction (foo: s) reduction (*: s2)
s.s = 1, t.t = 1, s2.s = 2;
#pragma omp parallel reduction (+: s)
s.s = 1;
}
void bar (struct S *);
void
f2 ()
{
#pragma omp declare reduction (foo: struct S: omp_out.s += omp_in.s) initializer (bar (&omp_priv))
#pragma omp declare reduction (bar: struct S: omp_out.s += omp_in.s) initializer (bar (&omp_orig)) /* { dg-error "one of the initializer call arguments should be" } */
}
#pragma omp declare reduction (+: struct U: omp_out.u *= omp_in.u) /* { dg-error "previous" } */
#pragma omp declare reduction (+: struct U: omp_out.u += omp_in.u) /* { dg-error "redeclaration of" } */
void
f3 ()
{
#pragma omp declare reduction (f3: struct U: omp_out.u *= omp_in.u) /* { dg-error "previous" } */
#pragma omp declare reduction (f3: struct U: omp_out.u += omp_in.u) /* { dg-error "redeclaration of" } */
}
struct V
{
#pragma omp declare reduction (bar: struct S: omp_out.s *= omp_in.s) /* { dg-error "not at file or block scope" } */
#pragma omp declare reduction (bar: struct S: omp_out.s += omp_in.s) /* { dg-error "not at file or block scope" } */
};
#pragma omp declare reduction (n3: long: omp_out += omp_in) /* { dg-error "previous" } */
#pragma omp declare reduction (n3: long int: omp_out += omp_in) /* { dg-error "redeclaration of" } */
#pragma omp declare reduction (n3: short unsigned: omp_out += omp_in)
#pragma omp declare reduction (n3: short int: omp_out += omp_in)
void
f4 (void)
{
#pragma omp declare reduction (f4: long: omp_out += omp_in) /* { dg-error "previous" } */
#pragma omp declare reduction (f4: long int: omp_out += omp_in) /* { dg-error "redeclaration of" } */
#pragma omp declare reduction (f4: short unsigned: omp_out += omp_in)
#pragma omp declare reduction (f4: short int: omp_out += omp_in)
}
void
f5 (void)
{
#pragma omp declare reduction (+: struct S: omp_out.s += omp_in.s) initializer (omp_priv) /* { dg-error "expected" } */
#pragma omp declare reduction (+: struct T: omp_out.t += omp_in.t) initializer (omp_priv ()) /* { dg-error "expected" } */
}
void
f6 (a, b)
#pragma omp declare reduction (bar: struct S: omp_out.s *= omp_in.s) /* { dg-error "expected declaration specifiers before" } */
int a;
int b;
{
}
|
GB_binop__first_int16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_int16
// A.*B function (eWiseMult): GB_AemultB__first_int16
// A*D function (colscale): GB_AxD__first_int16
// D*A function (rowscale): GB_DxB__first_int16
// C+=B function (dense accum): GB_Cdense_accumB__first_int16
// C+=b function (dense accum): GB_Cdense_accumb__first_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_int16
// C=scalar+B GB_bind1st__first_int16
// C=scalar+B' GB_bind1st_tran__first_int16
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = aij
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT16 || GxB_NO_FIRST_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__first_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
partition.h
|
//===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <[email protected]>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_PARTITION_H
#define RIPPLES_PARTITION_H
#include <algorithm>
#include <iostream>
#include "ripples/utility.h"
namespace ripples {
//! Sequential swap ranges.
//!
//! \tparam ItrTy1 The iterator type of the first sequence.
//! \tparam ItrTy2 The iterator type of the second sequence.
//!
//! \param B The begin of the first sequence.
//! \param E The end of the second sequence.
//! \param O The begin of the second sequence.
//! \return The iterator to the one-past last element swapped.
template <typename ItrTy1, typename ItrTy2>
ItrTy2 swap_ranges(ItrTy1 B, ItrTy1 E, ItrTy2 O, sequential_tag) {
return std::swap_ranges(B, E, O);
}
template <typename ItrTy1, typename ItrTy2>
ItrTy2 swap_ranges(ItrTy1 B, ItrTy1 E, ItrTy2 O, size_t num_threads) {
size_t toBeSwaped = std::distance(B, E);
#pragma omp parallel for num_threads(num_threads)
for (size_t i = 0; i < toBeSwaped; ++i) {
std::iter_swap(B + i, O + i);
}
return O + toBeSwaped;
}
//! Parallel swap ranges.
//!
//! \tparam ItrTy1 The iterator type of the first sequence.
//! \tparam ItrTy2 The iterator type of the second sequence.
//!
//! \param B The begin of the first sequence.
//! \param E The end of the second sequence.
//! \param O The begin of the second sequence.
//! \return The iterator to the one-past last element swapped.
template <typename ItrTy1, typename ItrTy2>
ItrTy2 swap_ranges(ItrTy1 B, ItrTy1 E, ItrTy2 O, omp_parallel_tag) {
size_t num_threads(0);
#pragma omp single
{ num_threads = omp_get_max_threads(); }
return swap_ranges(B, E, O, num_threads);
}
namespace {
template <typename ItrTy, typename ex_tag = omp_parallel_tag>
struct PartitionIndices {
ItrTy begin;
ItrTy end;
ItrTy pivot;
PartitionIndices() : begin(), end(), pivot() {}
PartitionIndices(PartitionIndices &&O)
: begin{std::move(O.begin)},
end{std::move(O.end)},
pivot{std::move(O.pivot)} {}
PartitionIndices &operator=(PartitionIndices &&O) {
this->begin = std::move(O.begin);
this->end = std::move(O.end);
this->pivot = std::move(O.pivot);
return *this;
}
PartitionIndices(const PartitionIndices &O)
: begin{O.begin}, end{O.end}, pivot{O.pivot} {}
PartitionIndices &operator=(const PartitionIndices &O) {
this->begin = O.begin;
this->end = O.end;
this->pivot = O.pivot;
return *this;
}
PartitionIndices(ItrTy B, ItrTy E, ItrTy P) : begin{B}, end{E}, pivot{P} {}
PartitionIndices(ItrTy B, ItrTy E) : PartitionIndices(B, E, E) {}
bool operator==(const PartitionIndices &O) const {
return this->begin == O.begin && this->end == O.end &&
this->pivot == O.pivot;
}
PartitionIndices mergeBlocks(const PartitionIndices &O, size_t num_threads) {
PartitionIndices result(*this);
if (this->pivot == this->begin && O.pivot == O.begin) {
result.end = O.end;
return result;
} else if (this->pivot == this->end) {
result.end = O.end;
result.pivot = O.pivot;
return result;
}
if (std::distance(this->pivot, this->end) <
std::distance(O.begin, O.pivot)) {
size_t toBeMoved = std::distance(this->pivot, this->end);
swap_ranges(this->pivot, this->end, std::prev(O.pivot, toBeMoved),
num_threads);
result.pivot = std::prev(O.pivot, toBeMoved);
} else {
result.pivot = swap_ranges(O.begin, O.pivot, this->pivot, num_threads);
}
result.end = O.end;
return result;
}
// PartitionIndices operator+(const PartitionIndices &O) {
// PartitionIndices result(*this);
// if (this->pivot == this->begin && O.pivot == O.begin) {
// result.end = O.end;
// return result;
// } else if (this->pivot == this->end) {
// result.end = O.end;
// result.pivot = O.pivot;
// return result;
// }
// if (std::distance(this->pivot, this->end) <
// std::distance(O.begin, O.pivot)) {
// size_t toBeMoved = std::distance(this->pivot, this->end);
// swap_ranges(this->pivot, this->end, std::prev(O.pivot, toBeMoved),
// ex_tag{});
// result.pivot = std::prev(O.pivot, toBeMoved);
// } else {
// result.pivot = swap_ranges(O.begin, O.pivot, this->pivot, ex_tag{});
// }
// result.end = O.end;
// return result;
// }
};
} // namespace
template <typename ItrTy, typename UnaryPredicate>
ItrTy partition(ItrTy B, ItrTy E, UnaryPredicate P, size_t num_threads) {
std::vector<PartitionIndices<ItrTy>> indices(num_threads,
PartitionIndices<ItrTy>(B, E));
#pragma omp parallel num_threads(num_threads)
{
size_t num_elements = std::distance(B, E);
size_t threadnum = omp_get_thread_num(), numthreads = omp_get_num_threads();
size_t low = num_elements * threadnum / numthreads,
high = num_elements * (threadnum + 1) / numthreads;
indices[threadnum].begin = B + low;
indices[threadnum].end = std::min(E, B + high);
indices[threadnum].pivot =
std::partition(indices[threadnum].begin, indices[threadnum].end, P);
}
for (size_t j = 1; j < num_threads; j <<= 1) {
#pragma omp parallel num_threads(num_threads >> j)
{
#pragma omp for schedule(dynamic)
for (size_t i = 0; i < (num_threads - j); i += j * 2) {
indices[i] = indices[i].mergeBlocks(indices[i + j],
std::min(2 * j, num_threads));
}
}
}
return indices[0].pivot;
}
//! Reorder a sequence in such a way that all the element for which a predicate
//! is true preceed the one for which the predicate is false.
//! \tparam ItrTy The type of the iterator of the input sequence.
//! \tparam UnaryPredicate The type of a unary predicate object.
//!
//! \param B The start of the sequence to be partitioned.
//! \param E The end of the sequence to be partitioned.
//! \param P A C++ collable object implementing the predicate.
//! \return An iterator to the first element for which the predicate is false.
template <typename ItrTy, typename UnaryPredicate>
ItrTy partition(ItrTy B, ItrTy E, UnaryPredicate P, omp_parallel_tag) {
size_t num_threads(1);
#pragma omp single
{ num_threads = omp_get_max_threads(); }
return partition(B, E, P, num_threads);
}
//! Reorder a sequence in such a way that all the element for which a predicate
//! is true preceed the one for which the predicate is false.
//!
//! \tparam ItrTy The type of the iterator of the input sequence.
//! \tparam UnaryPredicate The type of a unary predicate object.
//!
//! \param B The start of the sequence to be partitioned.
//! \param E The end of the sequence to be partitioned.
//! \param P A C++ collable object implementing the predicate.
//! \return An iterator to the first element for which the predicate is false.
template <typename ItrTy, typename UnaryPredicate>
ItrTy partition(ItrTy B, ItrTy E, UnaryPredicate P, sequential_tag) {
return std::partition(B, E, P);
}
} // namespace ripples
#endif /* RIPPLES_PARTITION_H */
|
GB_binop__plus_uint64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint64)
// A*D function (colscale): GB (_AxD__plus_uint64)
// D*A function (rowscale): GB (_DxB__plus_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint64)
// C=scalar+B GB (_bind1st__plus_uint64)
// C=scalar+B' GB (_bind1st_tran__plus_uint64)
// C=A+scalar GB (_bind2nd__plus_uint64)
// C=A'+scalar GB (_bind2nd_tran__plus_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT64 || GxB_NO_PLUS_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wem.c
|
#include "seismic.h"
#include "wem.h"
void wem(float **d, float **m, float **wav,
int nt, float ot, float dt,
int nmx,float omx, float dmx,
int nmy,float omy, float dmy,
float sx,float sy,
int nz, float oz, float dz, float gz, float sz,
float **vel, int nref, float fmin, float fmax,
int padt, int padx,
bool adj, bool pspi, bool verbose)
/*< wave equation depth migration operator. Can specify different velocities for src and rec side wavefields. >*/
{
int iz,ix,imx,imy,igx,igy,ik,iw,it,nw,nkx,nky,ntfft;
float dw,dkx,dky,w;
int ifmin,ifmax;
float *d_t;
complex *d_w,**d_g_wx,**d_s_wx;
fftwf_complex *a,*b;
int *n;
fftwf_plan p1,p2;
float *po,**pd;
float progress;
int ithread,nthread;
float **m_threads;
/* decompose slowness into layer average, and layer purturbation */
po = alloc1float(nz);
pd = alloc2float(nz,nmx*nmy);
for (iz=0;iz<nz;iz++){
po[iz] = 0.;
for (ix=0;ix<nmx*nmy;ix++) po[iz] += vel[ix][iz];
po[iz] /= (float) nmx*nmy;
po[iz] = 1./po[iz];
for (ix=0;ix<nmx*nmy;ix++) pd[ix][iz] = 1.0/vel[ix][iz] - po[iz];
}
/**/ printf("break 1.0 wem.c\n");
/****************************************************************************************/
float **vref,vmin,vmax,v;
int **iref1,**iref2;
int iref;
/* generate reference velocities for each depth step */
vref = alloc2float(nz,nref); /* reference velocities for each layer */
iref1 = alloc2int(nz,nmx*nmy); /* index of nearest lower reference velocity for each subsurface point */
iref2 = alloc2int(nz,nmx*nmy); /* index of nearest upper reference velocity for each subsurface point */
for (iz=0;iz<nz;iz++){
vmin=vel[0][iz];
for (ix=0;ix<nmx;ix++) if (vel[ix][iz] < vmin) vmin = vel[ix][iz];
vmax=vel[nmx-1][iz];
for (ix=0;ix<nmx*nmy;ix++) if (vel[ix][iz] > vmax) vmax = vel[ix][iz];
for (iref=0;iref<nref;iref++) vref[iref][iz] = vmin + (float) iref*(vmax-vmin)/((float) nref-1);
for (ix=0;ix<nmx*nmy;ix++){
v = vel[ix][iz];
if (vmax>vmin+10){
iref = (int) (nref-1)*(v-vmin)/(vmax-vmin);
iref1[ix][iz] = iref;
iref2[ix][iz] = iref+1;
if (iref>nref-2){
iref1[ix][iz] = nref-1;
iref2[ix][iz] = nref-1;
}
}
else{
iref1[ix][iz] = 0;
iref2[ix][iz] = 0;
}
}
}
/****************************************************************************************/
if (adj){
for (ix=0;ix<nmx*nmy;ix++) for (iz=0;iz<nz;iz++) m[ix][iz] = 0.;
}
else{
for (ix=0;ix<nmx*nmy;ix++) for (it=0;it<nt;it++) d[ix][it] = 0.;
}
ntfft = (int) 2*padt*((float) nt)/2;
nw = (int) ntfft/2 + 1;
nkx = nmx > 1 ? padx*nmx : 1;
nky = nmy > 1 ? padx*nmy : 1;
dkx = 2*PI/((float) nkx)/dmx;
dky = 2*PI/((float) nky)/dmy;
dw = 2*PI/((float) ntfft)/dt;
if(fmax*dt*ntfft+1<nw) ifmax = (int) fmax*dt*ntfft + 1;
else ifmax = nw;
if(fmin*dt*ntfft+1<ifmax) ifmin = (int) fmin*dt*ntfft;
else ifmin = 0;
d_g_wx = alloc2complex(nw,nmx*nmy);
d_s_wx = alloc2complex(nw,nmx*nmy);
d_t = alloc1float(nt);
d_w = alloc1complex(nw);
for (it=0;it<nt;it++) d_t[it] = 0.;
for (iw=0;iw<nw;iw++) d_w[iw] = 0.;
/**/ printf("break 2.0 wem.c\n");
/* set up fftw plans and pass them to the OMP region of the code */
a = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
b = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
n = alloc1int(2);
n[0] = nkx;
n[1] = nky;
p1 = fftwf_plan_dft(2, n, a, a, FFTW_FORWARD, FFTW_ESTIMATE);
p2 = fftwf_plan_dft(2, n, b, b, FFTW_BACKWARD, FFTW_ESTIMATE);
for (ik=0;ik<nkx*nky;ik++){
a[ik] = 0.;
b[ik] = 0.;
}
fftwf_execute_dft(p1,a,a);
fftwf_execute_dft(p2,b,b);
/**********************************************************************/
igx = (int) (sx - omx)/dmx; /*position to inject source in x-dir*/
igy = (int) (sy - omy)/dmy; /*position to inject source in y-dir*/
/* source wavefield*/
for (ix=0;ix<nmx*nmy;ix++) for (iw=0;iw<nw;iw++) d_s_wx[ix][iw] = 0.;
for (it=0;it<nt;it++) d_t[it] = wav[0][it];
/**/ printf("break 3.0 wem.c\n");
f_op(d_w,d_t,nw,nt,1); /* d_t to d_w */
/**/ printf("break 3.5 wem.c\n");
for (iw=0;iw<nw;iw++) d_s_wx[igx*nmy + igy][iw] = d_w[iw];
/**/ printf("break 4.0 wem.c\n");
/* receiver wavefield*/
if (adj){
for (ix=0;ix<nmx*nmy;ix++){
for (it=0;it<nt;it++) d_t[it] = d[ix][it];
f_op(d_w,d_t,nw,nt,1); /* d_t to d_w */
for (iw=0;iw<ifmin;iw++) d_g_wx[ix][iw] = 0.;
for (iw=ifmin;iw<ifmax;iw++){
w = iw*dw;
d_g_wx[ix][iw] = w*w*d_w[iw];
}
for (iw=ifmax;iw<nw;iw++) d_g_wx[ix][iw] = 0.;
}
}
else{
/**/ printf("break 5.0 wem.c\n");
for (ix=0;ix<nmx*nmy;ix++){
for (iw=0;iw<nw;iw++){
d_g_wx[ix][iw] = 0.;
}
}
}
nthread = omp_thread_count();
if (adj){
m_threads = alloc2float(nz,nmx*nmy*nthread);
for (imx=0;imx<nmx;imx++){
for (imy=0;imy<nmy;imy++){
for (ithread=0;ithread<nthread;ithread++){
for (iz=0;iz<nz;iz++){
m_threads[imx*nmy*nthread + imy*nthread + ithread][iz] = 0.;
}
}
}
}
}
else{
m_threads = alloc2float(nz,nmx*nmy);
for (imx=0;imx<nmx;imx++){
for (imy=0;imy<nmy;imy++){
for (iz=0;iz<nz;iz++){
m_threads[imx*nmy + imy][iz] = m[imx*nmy + imy][iz];
}
}
}
}
progress = 0.;
#pragma omp parallel for private(iw) shared(m_threads,d_g_wx,d_s_wx)
for (iw=ifmin;iw<ifmax;iw++){
progress += 1./((float) ifmax - ifmin);
if (verbose) progress_msg(progress);
extrap1f(m_threads,d_g_wx,d_s_wx,iw,ifmax,nw,ifmax,ntfft,dw,dkx,dky,nkx,nky,nz,oz,dz,gz,sz,nmx,omx,dmx,nmy,omy,dmy,nthread,vel,po,pd,vref,iref1,iref2,nref,p1,p2,adj,pspi,verbose);
}
if (verbose) fprintf(stderr,"\n");
if (adj){
// reduction over parallel axis
for (imx=0;imx<nmx;imx++) for (imy=0;imy<nmy;imy++) for (ithread=0;ithread<nthread;ithread++) for (iz=0;iz<nz;iz++) m[imx*nmy + imy][iz] += m_threads[imx*nmy*nthread + imy*nthread + ithread][iz];
}
else{
for (ix=0;ix<nmx*nmy;ix++){
for (iw=0;iw<ifmin;iw++) d_w[iw] = 0.;
for (iw=ifmin;iw<ifmax;iw++){
w = iw*dw;
d_w[iw] = w*w*d_g_wx[ix][iw];
}
for (iw=ifmax;iw<nw;iw++) d_w[iw] = 0.;
f_op(d_w,d_t,nw,nt,0); /* d_w to d_t */
for (it=0;it<nt;it++) d[ix][it] = d_t[it];
}
}
/**/ printf("break 6.0 wem.c\n");
free1int(n);
fftwf_free(a);
fftwf_free(b);
fftwf_destroy_plan(p1);
fftwf_destroy_plan(p2);
free1float(d_t);
free1complex(d_w);
free2float(m_threads);
free2complex(d_g_wx);
free2complex(d_s_wx);
free2float(vref);
free2int(iref1);
free2int(iref2);
return;
}
void extrap1f(float **m,complex **d_g_wx, complex **d_s_wx,
int iw, int ang_iw_max, int nw,int ifmax,int ntfft,float dw,float dkx,float dky,int nkx,int nky,
int nz, float oz, float dz, float gz, float sz,
int nmx,float omx, float dmx,
int nmy,float omy, float dmy,
int nthread,
float **v,float *po,float **pd,
float **vref, int **iref1, int **iref2, int nref,
fftwf_plan p1,fftwf_plan p2,
bool adj, bool pspi, bool verbose)
/*< extrapolate 1 frequency >*/
{
float w,factor,z;
int iz,ix,imx,imy,ithread;
complex *d_xg,*d_xs,**smig;
ithread = omp_get_thread_num();
d_xg = alloc1complex(nmx*nmy);
d_xs = alloc1complex(nmx*nmy);
for (ix=0;ix<nmx*nmy;ix++) d_xg[ix] = 0.;
for (ix=0;ix<nmx*nmy;ix++) d_xs[ix] = 0.;
if (iw==0) factor = 1.;
else factor = 2.;
w = iw*dw;
if (adj){
for (ix=0;ix<nmx*nmy;ix++){
d_xs[ix] = d_s_wx[ix][iw]/sqrtf((float) ntfft);
d_xg[ix] = d_g_wx[ix][iw]/sqrtf((float) ntfft);
}
for (iz=0;iz<nz;iz++){ // extrapolate source and receiver wavefields
z = oz + dz*iz;
if (z >= sz){
if (pspi) pspiop(d_xs,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,v,po,pd,vref,iref1,iref2,nref,p1,p2,true,true,verbose);
else ssop(d_xs,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,v,po,pd,p1,p2,true,true,verbose);
}
if (z >= gz){
if (pspi) pspiop(d_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,dz,iz,v,po,pd,vref,iref1,iref2,nref,p1,p2,true,false,verbose);
else ssop(d_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,dz,iz,v,po,pd,p1,p2,true,false,verbose);
for (imx=0;imx<nmx;imx++){
for (imy=0;imy<nmy;imy++){
m[imx*nmy*nthread + imy*nthread + ithread][iz] += factor*crealf(d_xg[imx*nmy + imy]*conjf(d_xs[imx*nmy + imy]));
}
}
}
}
}
else{
smig = alloc2complex(nz,nmx*nmy);
for (ix=0;ix<nmx*nmy;ix++) d_xs[ix] = d_s_wx[ix][iw]/sqrtf((float) ntfft);
for (iz=0;iz<nz;iz++){ // extrapolate source wavefield
z = oz + dz*iz;
if (z >= sz){
if (pspi) pspiop(d_xs,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,v,po,pd,vref,iref1,iref2,nref,p1,p2,true,true,verbose);
else ssop(d_xs,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,v,po,pd,p1,p2,true,true,verbose);
for (ix=0;ix<nmx*nmy;ix++) smig[ix][iz] = d_xs[ix];
}
else{
for (ix=0;ix<nmx*nmy;ix++) smig[ix][iz] = 0.;
}
}
for (ix=0;ix<nmx*nmy;ix++) d_xg[ix] = 0.;
for (iz=nz-1;iz>=0;iz--){ // extrapolate receiver wavefield
z = oz + dz*iz;
if (z >= gz){
for (ix=0;ix<nmx*nmy;ix++){
d_xg[ix] += m[ix][iz]*smig[ix][iz];
}
if (pspi) pspiop(d_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,v,po,pd,vref,iref1,iref2,nref,p1,p2,false,false,verbose);
else ssop(d_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,v,po,pd,p1,p2,false,false,verbose);
}
}
for (ix=0;ix<nmx*nmy;ix++){
d_g_wx[ix][iw] = d_xg[ix]/sqrtf((float) ntfft);
}
free2complex(smig);
}
free1complex(d_xs);
free1complex(d_xg);
return;
}
void ssop(complex *d_x,
float w,float dkx,float dky,int nkx,int nky,int nmx,float omx,float dmx,int nmy,float omy,float dmy,float dz,int iz,
float **v,float *po,float **pd,
fftwf_plan p1,fftwf_plan p2,
bool adj,
bool src,
bool verbose)
{
float kx,ky,s;
complex L;
int ik,ikx,iky,imx,imy;
complex *d_k;
fftwf_complex *a,*b;
int lmx,lmy;
if (nmx>100) lmx=30;
else lmx=0;
if (nmy>100) lmy=30;
else lmy=0;
a = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
b = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
d_k = alloc1complex(nkx*nky);
if (adj){
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy) a[imx*nky + imy] = d_x[imx*nmy + imy];
else a[imx*nky + imy] = 0.;
}
}
}
else{
boundary_condition(d_x,nmx,lmx,nmy,lmy);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy){
L = cexpf(I*w*pd[imx*nmy + imy][iz]*dz);
a[imx*nky + imy] = d_x[imx*nmy + imy]*L; // SS operator
}
else a[imx*nky + imy] = 0.;
}
}
}
fftwf_execute_dft(p1,a,a);
for (ikx=0;ikx<nkx;ikx++){
if (ikx<= (int) nkx/2) kx = (float) dkx*ikx;
else kx = -((float) dkx*nkx - dkx*ikx);
for (iky=0;iky<nky;iky++){
if (iky<= (int) nky/2) ky = (float) dky*iky;
else ky = -((float) dky*nky - dky*iky);
s = (w*w)*(po[iz]*po[iz]) - (kx*kx) - (ky*ky);
if (s>=0) L = cexp(I*sqrtf(s)*dz);
else L = cexp(-0.2*sqrtf(fabs(s))*fabs(dz));
d_k[ikx*nky + iky] = ((complex) a[ikx*nky + iky])*L/sqrtf((float) nkx*nky);
}
}
for(ik=0; ik<nkx*nky;ik++) b[ik] = (fftwf_complex) d_k[ik];
fftwf_execute_dft(p2,b,b);
if (adj){
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy){
L = cexpf(I*w*pd[imx*nmy + imy][iz]*dz);
d_x[imx*nmy + imy] = ((complex) b[imx*nky + imy])*L/sqrtf((float) nkx*nky); // SS operator
}
}
}
boundary_condition(d_x,nmx,lmx,nmy,lmy);
}
else{
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy){
d_x[imx*nmy + imy] = ((complex) b[imx*nky + imy])/sqrtf((float) nkx*nky);
}
}
}
}
free1complex(d_k);
fftwf_free(a);
fftwf_free(b);
return;
}
void pspiop(complex *d_x,
float w,float dkx,float dky,int nkx,int nky,
int nmx,float omx,float dmx,int nmy,float omy,float dmy,
float dz,int iz,
float **vel,float *po,float **pd,
float **vref, int **iref1, int **iref2, int nref,
fftwf_plan p1,fftwf_plan p2,
bool adj,
bool src,
bool verbose)
{
float kx,ky,s;
complex L;
int ik,ikx,iky,imx,imy;
complex *d_k;
fftwf_complex *a,*b;
int lmx,lmy;
complex **dref;
int iref;
float v,vref1,vref2;
if (nmx>100) lmx=30;
else lmx=0;
if (nmy>100) lmy=30;
else lmy=0;
a = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
b = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
dref = alloc2complex(nref,nmx*nmy);
d_k = alloc1complex(nkx*nky);
if (adj){
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy) a[imx*nky + imy] = d_x[imx*nmy + imy];
else a[imx*nky + imy] = 0.;
}
}
}
else{
// boundary_condition(d_x,nmx,lmx,nmy,lmy);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy) a[imx*nky + imy] = d_x[imx*nmy + imy];
else a[imx*nky + imy] = 0.;
}
}
}
fftwf_execute_dft(p1,a,a);
for (iref=0;iref<nref;iref++){
for (ikx=0;ikx<nkx;ikx++){
if (ikx<= (int) nkx/2) kx = (float) dkx*ikx;
else kx = -((float) dkx*nkx - dkx*ikx);
for (iky=0;iky<nky;iky++){
if (iky<= (int) nky/2) ky = (float) dky*iky;
else ky = -((float) dky*nky - dky*iky);
s = (w*w)*(1/(vref[iref][iz]*vref[iref][iz])) - (kx*kx) - (ky*ky);
if (s>=0) L = cexpf(I*sqrtf(s)*dz);
else L = cexpf(-0.2*sqrtf(fabs(s))*fabs(dz));
d_k[ikx*nky + iky] = ((complex) a[ikx*nky + iky])*L/sqrtf((float) nkx*nky);
}
}
for(ik=0; ik<nkx*nky;ik++) b[ik] = (fftwf_complex) d_k[ik];
fftwf_execute_dft(p2,b,b);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy){
dref[imx*nmy + imy][iref] = ((complex) b[imx*nky + imy])/sqrtf((float) nkx*nky);
}
}
}
}
for (imx=0;imx<nmx*nmy;imx++){
v = vel[imx][iz];
vref1 = vref[iref1[imx][iz]][iz];
vref2 = vref[iref2[imx][iz]][iz];
if (vref2 - vref1 > 10.0){
d_x[imx] = linear_interp(dref[imx][iref1[imx][iz]],dref[imx][iref2[imx][iz]],vref1,vref2,v);
}
else{
d_x[imx] = dref[imx][iref1[imx][iz]];
}
}
//if (adj){
// boundary_condition(d_x,nmx,lmx,nmy,lmy);
//}
free1complex(d_k);
fftwf_free(a);
fftwf_free(b);
free2complex(dref);
return;
}
float linear_interp(complex y1,complex y2,float x1,float x2,float x)
/*< linear interpolation between two points. x2-x1 must be nonzero. >*/
{
//return y1 + (y2-y1)*(x-x1)/(x2-x1);
return y1;
}
void f_op(complex *m,float *d,int nw,int nt,bool adj)
{
fftwf_complex *out1a,*in1b;
float *in1a,*out1b;
int ntfft,it,iw;
fftwf_plan p1a,p1b;
ntfft = (nw-1)*2;
if (adj){ /* data --> model */
/**/ printf("break 0.1 f_op\n");
out1a = fftwf_malloc(sizeof(fftwf_complex) * nw);
in1a = alloc1float(ntfft);
/**/ printf("break 0.2 f_op\n");
p1a = fftwf_plan_dft_r2c_1d(ntfft, in1a, (fftwf_complex*)out1a, FFTW_ESTIMATE);
for(it=0;it<nt;it++) in1a[it] = d[it];
for(it=nt;it<ntfft;it++) in1a[it] = 0.;
/**/ printf("break 0.3 f_op\n");
fftwf_execute(p1a);
for(iw=0;iw<nw;iw++) m[iw] = out1a[iw];
/**/ printf("break 0.4 f_op\n");
fftwf_destroy_plan(p1a);
fftwf_free(in1a); fftwf_free(out1a);
/**/ printf("break 0.5 f_op\n");
}
else{ /* model --> data */
/**/ printf("break 1.0 f_op\n");
out1b = alloc1float(ntfft);
in1b = fftwf_malloc(sizeof(fftwf_complex) * ntfft);
p1b = fftwf_plan_dft_c2r_1d(ntfft, (fftwf_complex*)in1b, out1b, FFTW_ESTIMATE);
/**/ printf("break 2.0 f_op\n");
for(iw=0;iw<nw;iw++) in1b[iw] = m[iw];
for(iw=nw;iw<ntfft;iw++) in1b[iw] = 0.;
fftwf_execute(p1b);
/**/ printf("break 3.0 f_op\n");
for(it=0;it<nt;it++) d[it] = out1b[it];
fftwf_destroy_plan(p1b);
fftwf_free(in1b); fftwf_free(out1b);
/**/ printf("break 4.0 f_op\n");
}
return;
}
void progress_msg(float progress)
{
fprintf(stderr,"\r[%6.2f%% complete] ",progress*100);
return;
}
float signf(float a)
/*< sign of a float >*/
{
float b;
if (a>0.) b = 1.;
else if (a<0.) b =-1.;
else b = 0.;
return b;
}
int compare (const void * a, const void * b)
{
float fa = *(const float*) a;
float fb = *(const float*) b;
return (fa > fb) - (fa < fb);
}
int omp_thread_count() {
int n = 0;
#pragma omp parallel reduction(+:n)
n += 1;
return n;
}
void boundary_condition(complex *d_x,int nmx,int lmx,int nmy,int lmy)
{
int imx,imy;
float tmx,tmy;
tmx = 1.;tmy = 1.;
for (imx=0;imx<nmx;imx++){
if (imx>=0 && imx<lmx) tmx = expf(-powf(0.015*((float) lmx - imx),2.));
if (imx>=lmx && imx<=nmx-lmx) tmx = 1.;
if (imx>nmx-lmx && imx<nmx) tmx = expf(-powf(0.015*((float) imx - nmx + lmx),2.));
for (imy=0;imy<nmy;imy++){
if (imy>=0 && imy<lmy) tmy = expf(-powf(0.015*((float) lmy - imy),2.));
if (imy>=lmy && imy<=nmy-lmy) tmy = 1.;
if (imy>nmy-lmy && imy<nmy) tmy = expf(-powf(0.015*((float) imy - nmy + lmy),2.));
d_x[imx*nmy + imy] *= tmx*tmy;
}
}
return;
}
void compute_angles(float **angx, float **angy, float **wav,
int nt, float ot, float dt,
int nmx,float omx, float dmx,
int nmy,float omy, float dmy,
float sx,float sy,
int nz, float oz, float dz, float sz,
float **vel_p, float fmin, float fmax,
bool verbose)
/*< source side incidence angle computation using the one way wave equation. >*/
{
int iz,ix,igx,igy,ik,iw,it,nw,nkx,nky,nkz,ntfft;
float dw,dkx,dky,dkz;
int ifmin,ifmax;
float *d_t;
complex *d_w,**d_s_wx;
fftwf_complex *a1,*b1;
fftwf_complex *a2,*b2;
int *n1,*n2;
fftwf_plan p1,p2,p3,p4;
float *po_p,**pd_p;
float progress;
float **angx_sign,**angy_sign;
ntfft = nt;
nw = (int) ntfft/2 + 1;
nkx = nmx;
nky = nmy;
nkz = nz;
dkx = 2*PI/((float) nkx)/dmx;
dky = 2*PI/((float) nky)/dmy;
dkz = 2*PI/((float) nkz)/dz;
dw = 2*PI/((float) ntfft)/dt;
if(fmax*dt*ntfft+1<nw) ifmax = (int) fmax*dt*ntfft + 1;
else ifmax = nw;
if(fmin*dt*ntfft+1<ifmax) ifmin = (int) fmin*dt*ntfft;
else ifmin = 0;
d_s_wx = alloc2complex(nw,nmx*nmy);
d_t = alloc1float(nt);
d_w = alloc1complex(nw);
for (it=0;it<nt;it++) d_t[it] = 0.;
for (iw=0;iw<nw;iw++) d_w[iw] = 0.;
/* decompose slowness into layer average, and layer purturbation */
po_p = alloc1float(nz);
pd_p = alloc2float(nz,nmx*nmy);
for (iz=0;iz<nz;iz++){
po_p[iz] = 0.;
for (ix=0;ix<nmx*nmy;ix++) po_p[iz] += vel_p[ix][iz];
po_p[iz] /= (float) nmx*nmy;
po_p[iz] = 1./po_p[iz];
for (ix=0;ix<nmx*nmy;ix++) pd_p[ix][iz] = 1.0/vel_p[ix][iz] - po_p[iz];
}
// set up fftw plans and pass them to the OMP region of the code
// plan for extrapolation
a1 = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
b1 = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
n1 = alloc1int(2); n1[0] = nkx; n1[1] = nky;
p1 = fftwf_plan_dft(2, n1, a1, a1, FFTW_FORWARD, FFTW_ESTIMATE);
p2 = fftwf_plan_dft(2, n1, b1, b1, FFTW_BACKWARD, FFTW_ESTIMATE);
for (ik=0;ik<nkx*nky;ik++) a1[ik] = 0.;
for (ik=0;ik<nkx*nky;ik++) b1[ik] = 0.;
fftwf_execute_dft(p1,a1,a1);
fftwf_execute_dft(p2,b1,b1);
// plan for calculation of spatial derivatives
a2 = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky*nkz);
b2 = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky*nkz);
n2 = alloc1int(3); n2[0] = nkx; n2[1] = nky; n2[2] = nz;
p3 = fftwf_plan_dft(3, n2, a2, a2, FFTW_FORWARD, FFTW_ESTIMATE);
p4 = fftwf_plan_dft(3, n2, b2, b2, FFTW_BACKWARD, FFTW_ESTIMATE);
for (ik=0;ik<nkx*nky*nkz;ik++) a2[ik] = 0.;
for (ik=0;ik<nkx*nky*nkz;ik++) b2[ik] = 0.;
fftwf_execute_dft(p3,a2,a2);
fftwf_execute_dft(p4,b2,b2);
/**********************************************************************/
igx = (int) (sx - omx)/dmx; /*position to inject source in x-dir*/
igy = (int) (sy - omy)/dmy; /*position to inject source in y-dir*/
/* source wavefield*/
for (ix=0;ix<nmx*nmy;ix++) for (iw=0;iw<nw;iw++) d_s_wx[ix][iw] = 0.;
for (it=0;it<nt;it++) d_t[it] = wav[0][it];
f_op(d_w,d_t,nw,nt,1); /* d_t to d_w */
for (iw=0;iw<nw;iw++) d_s_wx[igx*nmy + igy][iw] = d_w[iw];
angx_sign = alloc2float(nz,nmx*nmy);
angy_sign = alloc2float(nz,nmx*nmy);
for (iz=0;iz<nz;iz++) for (ix=0;ix<nmx*nmy;ix++) angx_sign[ix][iz] = 0.0;
for (iz=0;iz<nz;iz++) for (ix=0;ix<nmx*nmy;ix++) angy_sign[ix][iz] = 0.0;
progress = 0.;
#pragma omp parallel for private(iw) shared(d_s_wx,angx,angy)
for (iw=ifmin;iw<ifmax;iw++){
progress += 1./((float) ifmax - ifmin);
if (verbose) progress_msg(progress);
extrapolate_source(d_s_wx,angx,angy,angx_sign,angy_sign,
iw,nw,ifmin,ifmax,ntfft,dw,
dkx,nkx,nmx,omx,dmx,
dky,nky,nmy,omy,dmy,
dkz,nkz,nz,oz,dz,
sz,
vel_p,po_p,pd_p,
p1,p2,p3,p4,verbose);
}
if (verbose) fprintf(stderr,"\n");
for (iz=0;iz<nz;iz++) for (ix=0;ix<nmx*nmy;ix++) angx[ix][iz] *= signf1(angx_sign[ix][iz])/(float) (ifmax - ifmin + 1);
for (iz=0;iz<nz;iz++) for (ix=0;ix<nmx*nmy;ix++) angy[ix][iz] *= signf1(angy_sign[ix][iz])/(float) (ifmax - ifmin + 1);
free1int(n1);
fftwf_free(a1);
fftwf_free(b1);
free1int(n2);
fftwf_free(a2);
fftwf_free(b2);
fftwf_destroy_plan(p1);
fftwf_destroy_plan(p2);
fftwf_destroy_plan(p3);
fftwf_destroy_plan(p4);
free1float(d_t);
free1complex(d_w);
free2complex(d_s_wx);
free1float(po_p);
free2float(pd_p);
free2float(angx_sign);
free2float(angy_sign);
return;
}
void extrapolate_source(complex **d_s_wx, float **angx, float **angy, float **angx_sign, float **angy_sign,
int iw,int nw,int ifmin,int ifmax,int ntfft,float dw,
float dkx,int nkx,int nmx,float omx,float dmx,
float dky,int nky,int nmy,float omy,float dmy,
float dkz,int nkz,int nz,float oz,float dz,
float sz,
float **v_p,float *po_p,float **pd_p,
fftwf_plan p1,fftwf_plan p2,fftwf_plan p3,fftwf_plan p4,
bool verbose)
/*< extrapolate 1 frequency >*/
{
float w,z;
int iz,ix,imx,imy;
complex *d_xs,*u_s,*u_sx,*u_sy,*u_sz;
float *u_s_signx,*u_s_signy;
d_xs = alloc1complex(nmx*nmy);
u_s = alloc1complex(nmx*nmy*nz);
for (ix=0;ix<nmx*nmy*nz;ix++) u_s[ix] = 0.;
for (ix=0;ix<nmx*nmy;ix++) d_xs[ix] = 0.;
w = iw*dw;
for (ix=0;ix<nmx*nmy;ix++) d_xs[ix] = d_s_wx[ix][iw]/sqrtf((float) ntfft);
for (iz=0;iz<nz;iz++){ // extrapolate source wavefield
z = oz + dz*iz;
if (z >= sz){
ssop_source(d_xs,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,v_p,po_p,pd_p,p1,p2,verbose);
for (imx=0;imx<nmx;imx++) for (imy=0;imy<nmy;imy++) u_s[imx*nmy*nz + imy*nz + iz] = d_xs[imx*nmy + imy];
}
}
u_sx = alloc1complex(nmx*nmy*nz);
u_sy = alloc1complex(nmx*nmy*nz);
u_sz = alloc1complex(nmx*nmy*nz);
u_s_signx = alloc1float(nmx*nmy*nz);
u_s_signy = alloc1float(nmx*nmy*nz);
for (ix=0;ix<nmx*nmy*nz;ix++) u_sx[ix] = 0.;
for (ix=0;ix<nmx*nmy*nz;ix++) u_sy[ix] = 0.;
for (ix=0;ix<nmx*nmy*nz;ix++) u_sz[ix] = 0.;
for (ix=0;ix<nmx*nmy*nz;ix++) u_s_signx[ix] = 1.;
for (ix=0;ix<nmx*nmy*nz;ix++) u_s_signy[ix] = 1.;
calculate_derivatives(u_s,u_sx,u_sy,u_sz,u_s_signx,u_s_signy,dkx,nkx,nmx,omx,dmx,dky,nky,nmy,omy,dmy,dkz,nkz,nz,oz,dz,p3,p4);
for(imx=0;imx<nmx;imx++){
for(imy=0;imy<nmy;imy++){
for(iz=0;iz<nz;iz++){
angx_sign[imx*nmy + imy][iz] += u_s_signx[imx*nmy*nz + imy*nz + iz]/ (float) nw;
angy_sign[imx*nmy + imy][iz] += u_s_signy[imx*nmy*nz + imy*nz + iz]/ (float) nw;
}
}
}
for(imx=0;imx<nmx;imx++){
for(imy=0;imy<nmy;imy++){
for(iz=0;iz<nz;iz++){
angx[imx*nmy + imy][iz] += (180/PI)*atanf(cabs(u_sx[imx*nmy*nz + imy*nz + iz])/(cabs(u_sz[imx*nmy*nz + imy*nz + iz]) + 1e-10));
angy[imx*nmy + imy][iz] += (180/PI)*atanf(cabs(u_sy[imx*nmy*nz + imy*nz + iz])/(cabs(u_sz[imx*nmy*nz + imy*nz + iz]) + 1e-10));
}
}
}
free1complex(d_xs);
free1complex(u_s);
free1complex(u_sx);
free1complex(u_sy);
free1complex(u_sz);
free1float(u_s_signx);
free1float(u_s_signy);
return;
}
void ssop_source(complex *d_x,
float w,float dkx,float dky,int nkx,int nky,int nmx,float omx,float dmx,int nmy,float omy,float dmy,float dz,int iz,
float **v,float *po,float **pd,
fftwf_plan p1,fftwf_plan p2,
bool verbose)
{
float kx,ky,s;
complex L;
int ik,ikx,iky,imx,imy;
complex *d_k;
fftwf_complex *a,*b;
int lmx,lmy;
if (nmx>100) lmx=30;
else lmx=0;
if (nmy>100) lmy=30;
else lmy=0;
a = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
b = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky);
d_k = alloc1complex(nkx*nky);
boundary_condition(d_x,nmx,lmx,nmy,lmy);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy){
L = cexpf(I*w*pd[imx*nmy + imy][iz]*dz);
a[imx*nky + imy] = d_x[imx*nmy + imy]*L; // SS operator
}
else a[imx*nky + imy] = 0.;
}
}
fftwf_execute_dft(p1,a,a);
for (ikx=0;ikx<nkx;ikx++){
if (ikx<= (int) nkx/2) kx = (float) dkx*ikx;
else kx = -((float) dkx*nkx - dkx*ikx);
for (iky=0;iky<nky;iky++){
if (iky<= (int) nky/2) ky = (float) dky*iky;
else ky = -((float) dky*nky - dky*iky);
s = (w*w)*(po[iz]*po[iz]) - (kx*kx) - (ky*ky);
if (s>=0) L = cexpf(I*sqrtf(s)*dz);
else L = cexpf(-0.2*sqrtf(fabs(s))*fabs(dz));
d_k[ikx*nky + iky] = ((complex) a[ikx*nky + iky])*L/sqrtf((float) nkx*nky);
}
}
for(ik=0; ik<nkx*nky;ik++) b[ik] = (fftwf_complex) d_k[ik];
fftwf_execute_dft(p2,b,b);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
if (imx < nmx && imy < nmy){
d_x[imx*nmy + imy] = ((complex) b[imx*nky + imy])/sqrtf((float) nkx*nky);
}
}
}
free1complex(d_k);
fftwf_free(a);
fftwf_free(b);
return;
}
void calculate_derivatives(complex *u_s, complex *u_sx, complex *u_sy, complex *u_sz,
float *u_s_signx, float *u_s_signy,
float dkx, int nkx, int nmx, float omx, float dmx,
float dky, int nky, int nmy, float omy, float dmy,
float dkz, int nkz, int nz, float oz, float dz,
fftwf_plan p3,fftwf_plan p4)
/*< calculate spatial derivatives of the source wavefield >*/
{
fftwf_complex *a,*b,*u_left,*u_right;
int imx,imy,iz;
float kx,ky,kz,kx_left,kx_right,ky_left,ky_right;
/* set up fftw plans */
a = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky*nkz);
b = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky*nkz);
u_left = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky*nkz);
u_right = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky*nkz);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
for(iz=0; iz<nkz;iz++){
if (imx < nmx && imy < nmy && iz < nz){
a[imx*nky*nkz + imy*nkz + iz] = u_s[imx*nmy*nz + imy*nz + iz];
}
else a[imx*nky*nkz + imy*nkz + iz] = 0.;
}
}
}
fftwf_execute_dft(p3,a,a);
// x component
for(imx=0;imx<nmx;imx++){
if (imx<= (int) nkx/2) kx = (float) dkx*imx;
else kx = -((float) dkx*nkx - dkx*imx);
for(imy=0;imy<nky;imy++){
for(iz=0;iz<nkz;iz++){
b[imx*nky*nkz + imy*nkz + iz] = I*kx*a[imx*nky*nkz + imy*nkz + iz];
}
}
}
fftwf_execute_dft(p4,b,b);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
for(iz=0; iz<nkz;iz++){
if (imx < nmx && imy < nmy && iz < nz){
u_sx[imx*nmy*nz + imy*nz + iz] = b[imx*nky*nkz + imy*nkz + iz]/((float) nkx*nky*nkz);
}
}
}
}
// y component
for(imx=0;imx<nmx;imx++){
for(imy=0;imy<nky;imy++){
if (imy<= (int) nky/2) ky = (float) dky*imy;
else ky = -((float) dky*nky - dky*imy);
for(iz=0;iz<nkz;iz++){
b[imx*nky*nkz + imy*nkz + iz] = I*ky*a[imx*nky*nkz + imy*nkz + iz];
}
}
}
fftwf_execute_dft(p4,b,b);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
for(iz=0; iz<nkz;iz++){
if (imx < nmx && imy < nmy && iz < nz){
u_sy[imx*nmy*nz + imy*nz + iz] = b[imx*nky*nkz + imy*nkz + iz]/((float) nkx*nky*nkz);
}
}
}
}
// z component
for(imx=0;imx<nmx;imx++){
for(imy=0;imy<nky;imy++){
for(iz=0;iz<nkz;iz++){
if (iz<= (int) nkz/2) kz = (float) dkz*iz;
else kz = -((float) dkz*nkz - dkz*iz);
b[imx*nky*nkz + imy*nkz + iz] = I*kz*a[imx*nky*nkz + imy*nkz + iz];
}
}
}
fftwf_execute_dft(p4,b,b);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
for(iz=0; iz<nkz;iz++){
if (imx < nmx && imy < nmy && iz < nz){
u_sz[imx*nmy*nz + imy*nz + iz] = b[imx*nky*nkz + imy*nkz + iz]/((float) nkx*nky*nkz);
}
}
}
}
// sign for x direction
for(imx=0;imx<nmx;imx++){
if (imx<= (int) nkx/2) kx_left = 0.;
else kx_left = -((float) dkx*nkx - dkx*imx);
if (imx<= (int) nkx/2) kx_right = (float) dkx*imx;
else kx_right = 0.;
for(imy=0;imy<nky;imy++){
for(iz=0;iz<nkz;iz++){
u_left[imx*nky*nkz + imy*nkz + iz] = I*kx_left*a[imx*nky*nkz + imy*nkz + iz];
u_right[imx*nky*nkz + imy*nkz + iz] = I*kx_right*a[imx*nky*nkz + imy*nkz + iz];
}
}
}
fftwf_execute_dft(p4,u_left,u_left);
fftwf_execute_dft(p4,u_right,u_right);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
for(iz=0; iz<nkz;iz++){
if (imx < nmx && imy < nmy && iz < nz){
if (cabs(u_left[imx*nky*nkz + imy*nkz + iz]) >= cabs(u_right[imx*nky*nkz + imy*nkz + iz])){
u_s_signx[imx*nmy*nz + imy*nz + iz] = 1.*cabs(u_s[imx*nmy*nz + imy*nz + iz]);
}
else{
u_s_signx[imx*nmy*nz + imy*nz + iz] = -1.*cabs(u_s[imx*nmy*nz + imy*nz + iz]);
}
}
}
}
}
// sign for y direction
for(imx=0;imx<nmx;imx++){
for(imy=0;imy<nky;imy++){
if (imy<= (int) nky/2) ky_left = 0.;
else ky_left = -((float) dky*nky - dky*imy);
if (imy<= (int) nky/2) ky_right = (float) dky*imy;
else ky_right = 0.;
for(iz=0;iz<nkz;iz++){
u_left[imx*nky*nkz + imy*nkz + iz] = I*ky_left*a[imx*nky*nkz + imy*nkz + iz];
u_right[imx*nky*nkz + imy*nkz + iz] = I*ky_right*a[imx*nky*nkz + imy*nkz + iz];
}
}
}
fftwf_execute_dft(p4,u_left,u_left);
fftwf_execute_dft(p4,u_right,u_right);
for(imx=0; imx<nkx;imx++){
for(imy=0; imy<nky;imy++){
for(iz=0; iz<nkz;iz++){
if (imx < nmx && imy < nmy && iz < nz){
if (cabs(u_left[imx*nky*nkz + imy*nkz + iz]) >= cabs(u_right[imx*nky*nkz + imy*nkz + iz])){
u_s_signy[imx*nmy*nz + imy*nz + iz] = 1.*cabs(u_s[imx*nmy*nz + imy*nz + iz]);
}
else{
u_s_signy[imx*nmy*nz + imy*nz + iz] = -1.*cabs(u_s[imx*nmy*nz + imy*nz + iz]);
}
}
}
}
}
fftwf_free(a);
fftwf_free(b);
fftwf_free(u_left);
fftwf_free(u_right);
return;
}
float signf1(float a)
/*< sign of a float, always has an amplitude of 1 >*/
{
float b;
if (a>=0.) b = 1.;
else b =-1.;
return b;
}
|
Blaze.h
|
//=================================================================================================
/*!
// \file blaze/Blaze.h
// \brief Primary include file of the Blaze library
//
// Copyright (C) 2013 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_BLAZE_H_
#define _BLAZE_BLAZE_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <blaze/Math.h>
#include <blaze/Util.h>
//=================================================================================================
//
// DOXYGEN DOCUMENTATION
//
//=================================================================================================
//*************************************************************************************************
//! Namespace of the \b Blaze C++ math library.
namespace blaze {}
//*************************************************************************************************
//**Mainpage***************************************************************************************
/*!\mainpage
//
// \image html blaze300x150.jpg
//
// This is the API for the \b Blaze high performance C++ math library. It gives a complete
// overview of the individual features and sublibraries of \b Blaze. To get a first impression
// on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards,
// the following long tutorial covers the most important aspects of the \b Blaze math library.
// The tabs at the top of the page allow a direct access to the individual modules, namespaces,
// classes, and files of the \b Blaze library.\n\n
//
// \section table_of_content Table of Contents
//
// <ul>
// <li> \ref configuration_and_installation </li>
// <li> \ref getting_started </li>
// <li> \ref vectors
// <ul>
// <li> \ref vector_types </li>
// <li> \ref vector_operations </li>
// </ul>
// </li>
// <li> \ref matrices
// <ul>
// <li> \ref matrix_types </li>
// <li> \ref matrix_operations </li>
// </ul>
// </li>
// <li> \ref adaptors
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices </li>
// </ul>
// </li>
// <li> \ref views
// <ul>
// <li> \ref views_subvectors </li>
// <li> \ref views_submatrices </li>
// <li> \ref views_rows </li>
// <li> \ref views_columns </li>
// </ul>
// </li>
// <li> \ref arithmetic_operations
// <ul>
// <li> \ref addition </li>
// <li> \ref subtraction </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// </ul>
// </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication </li>
// </ul>
// </li>
// <li> \ref shared_memory_parallelization
// <ul>
// <li> \ref openmp_parallelization </li>
// <li> \ref cpp_threads_parallelization </li>
// <li> \ref boost_threads_parallelization </li>
// <li> \ref serial_execution </li>
// </ul>
// </li>
// <li> \ref serialization
// <ul>
// <li> \ref vector_serialization </li>
// <li> \ref matrix_serialization </li>
// </ul>
// </li>
// <li> \ref blas_functions </li>
// <li> \ref lapack_functions </li>
// <li> \ref configuration_files </li>
// <li> \ref custom_data_types </li>
// <li> \ref error_reporting_customization </li>
// <li> \ref intra_statement_optimization </li>
// </ul>
*/
//*************************************************************************************************
//**Configuration and Installation*****************************************************************
/*!\page configuration_and_installation Configuration and Installation
//
// Setting up the \b Blaze library on a particular system is a fairly easy two step process. Since
// \b Blaze is a template library and therefore mainly consists of header files no compilation is
// required. In the following, this two step process is explained in detail, preceded only by a
// short summary of the requirements.
//
//
// \n \section requirements Requirements
// <hr>
//
// In order for \b Blaze to work properly, the Boost library must be installed on the system. It
// is recommended to use the newest Boost library available, but \b Blaze requires at minimum the
// Boost version 1.54.0. If you don't have Boost installed on your system, you can download it for
// free from 'http://www.boost.org'.
//
// Additionally, for maximum performance \b Blaze expects you to have a BLAS library installed
// (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>,
// <a href="http://developer.amd.com/libraries/acml/">ACML</a>,
// <a href="http://math-atlas.sourceforge.net">Atlas</a>,
// <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't
// have a BLAS library installed on your system, \b Blaze will still work and will not be reduced
// in functionality, but performance may be limited. Thus it is strongly recommended to install a
// BLAS library.
//
// Furthermore, for computing the determinant of a dense matrix and for the dense matrix inversion
// \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either of
// these features is used it is necessary to link the LAPACK library to the final executable. If
// no LAPACK library is available the use of these features will result in a linker error.
//
//
// \n \section step_1_configuration Step 1: Configuration
// <hr>
//
// \subsection step_1_configuration_unix Linux/MacOSX User
//
// The first step is to adapt the \c Configfile in the \b Blaze home directory to the local
// configuration. Any text editor can be used for this task:
\code
vi ./Configfile
\endcode
// In the \c Configfile, the kind of installation (debug or release), the library types (static
// and/or dynamic), the compiler including compiler flags, and several include paths have to be
// specified. Afterwards, the \c configure script can be run, which uses the \c Configfile to
// update and create several files:
\code
./configure
\endcode
// This step can also be omitted, but results in a default configuration that does not guarantee
// the highest performance for all operations. For instance, without running the \c configure
// script, \b Blaze assumes that no BLAS library is installed on the system and cannot use BLAS
// functionality for instance for the matrix/matrix multiplication.
//
// In order to further customize the \b Blaze library the header files in the <em>./blaze/config/</em>
// subdirectory can be adapted. See section \ref configuration_files for more details.
//
// \n \subsection step_1_configuration_windows Windows User
//
// Unfortunately, for Windows users there is no \c configure script available (yet). Therefore
// Windows user have to manually configure the \b Blaze library. Most configuration headers are
// located in the <em>./blaze/config/</em> subdirectory. The one exception is the \c BLAS.h
// header in the <em>./blaze/system/</em> subdirectory that contains the configuration of the
// BLAS functionality. Note that in case the \c BLAZE_BLAS_MODE symbol is set to 1, the correct
// BLAS header file has to be specified!
//
//
// \n \section step_2_installation Step 2: Installation
// <hr>
//
// \subsection step_2_configuration_unix Linux/MacOSX User
//
// The second step is the installation of the header files. Since \b Blaze mainly consists of
// header files, the <em>./blaze</em> subdirectory can be simply copied to a standard include
// directory (note that this requires root privileges):
\code
cp -r ./blaze /usr/local/include
\endcode
// Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the
// \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be
// searched after any directories specified on the command line with the option \c -I and
// before the standard default directories (such as \c /usr/local/include and \c /usr/include).
// Assuming a user misterX, the environment variable can be set as follows:
\code
CPLUS_INCLUDE_PATH=/usr/home/misterX/blaze
export CPLUS_INCLUDE_PATH
\endcode
// Last but not least, the <em>./blaze</em> subdirectory can be explicitly specified on the
// command line. The following example demonstrates this by means of the GNU C++ compiler:
\code
g++ -I/usr/home/misterX/blaze -o BlazeTest BlazeTest.cpp
\endcode
// \n \subsection step_2_configuration_windows Windows User
//
// Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be
// copied to any other directory or simply left in the default \b Blaze directory. However, the
// chosen include directory has to be explicitly specified as include path. In Visual Studio,
// this is done via the project property pages, configuration properties, C/C++, General settings.
// Here the additional include directories can be specified. Note that there are small differences
// between VS2008 and VS2010:
// <a href="http://blogs.msdn.com/b/vsproject/archive/2009/07/07/vc-directories.aspx">VC++ Directories</a>.
//
//
// \n \section step_3_compilation Step 3 (Optional): Compilation
// <hr>
//
// \subsection step_3_configuration_unix Linux/MacOSX User
//
// Next to the math library, \b Blaze also contains a small number of additional (sub-)libraries.
// If these libraries, such as the blaze::logging functionality, are required it is necessary to
// create the \b Blaze library files. For that purpose, the \c configure script has created a
// \c Makefile that can be used for the compilation process:
\code
make
\endcode
// Afterwards, the \c libblaze.so and/or \c libblaze.a libraries are contained in the \a lib
// subdirectory and can be copied to a standard library directory (note that this requires
// root privilages). However, this step can be omitted if only the \b Blaze math library is
// required.
\code
cp ./lib/ * /usr/local/lib
\endcode
// Alternatively, on Unix-based systems the \c LD_LIBRARY_PATH environment variable can be
// extended to also consider the \b Blaze \a lib directory:
\code
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/home/misterX/blaze/lib
export LD_LIBRARY_PATH
\endcode
// \n \subsection step_3_configuration_windows Windows User
//
// For Windows users, a comfortable compilation of the extended \b Blaze features is not (yet)
// supported.
//
// \n Next: \ref getting_started
*/
//*************************************************************************************************
//**Getting Started********************************************************************************
/*!\page getting_started Getting Started
//
// This short tutorial serves the purpose to give a quick overview of the way mathematical
// expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following
// long tutorial covers the most important aspects of the \b Blaze math library.
//
//
// \n \section getting_started_vector_example A First Example
//
// \b Blaze is written such that using mathematical expressions is as close to mathematical
// textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly
// easiest solution is the right solution and most users experience no problems when trying to
// use \b Blaze in the most natural way. The following example gives a first impression of the
// formulation of a vector addition in \b Blaze:
\code
#include <iostream>
#include <blaze/Math.h>
using blaze::StaticVector;
using blaze::DynamicVector;
// Instantiation of a static 3D column vector. The vector is directly initialized as
// ( 4 -2 5 )
StaticVector<int,3UL> a( 4, -2, 5 );
// Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to
// ( 2 5 -3 )
DynamicVector<int> b( 3UL );
b[0] = 2;
b[1] = 5;
b[2] = -3;
// Adding the vectors a and b
DynamicVector<int> c = a + b;
// Printing the result of the vector addition
std::cout << "c =\n" << c << "\n";
\endcode
// Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header
// file. Alternatively, the entire \b Blaze library, including both the math and the entire
// utility module, can be included via the \c blaze/Blaze.h header file. Also note that all
// classes and functions of \b Blaze are contained in the blaze namespace.\n\n
//
// Assuming that this program resides in a source file called \c FirstExample.cpp, it can be
// compiled for instance via the GNU C++ compiler:
\code
g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp
\endcode
// Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum
// performance, it is necessary to compile the program in release mode, which deactivates
// all debugging functionality inside \b Blaze. It is also strongly recommended to specify
// the available architecture specific instruction set (as for instance the AVX instruction
// set, which if available can be activated via the \c -mavx flag). This allows \b Blaze
// to optimize computations via vectorization.\n\n
//
// When running the resulting executable \c FirstExample, the output of the last line of
// this small program is
\code
c =
6
3
2
\endcode
// \n \section getting_started_matrix_example An Example Involving Matrices
//
// Similarly easy and intuitive are expressions involving matrices:
\code
#include <blaze/Math.h>
using namespace blaze;
// Instantiating a dynamic 3D column vector
DynamicVector<int> x( 3UL );
x[0] = 4;
x[1] = -1;
x[2] = 3;
// Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call
// operator three values of the matrix are explicitly set to get the matrix
// ( 1 0 4 )
// ( 0 -2 0 )
DynamicMatrix<int> A( 2UL, 3UL, 0 );
A(0,0) = 1;
A(0,2) = 4;
A(1,1) = -2;
// Performing a matrix/vector multiplication
DynamicVector<int> y = A * x;
// Printing the resulting vector
std::cout << "y =\n" << y << "\n";
// Instantiating a static column-major matrix. The matrix is directly initialized as
// ( 3 -1 )
// ( 0 2 )
// ( -1 0 )
StaticMatrix<int,3UL,2UL,columnMajor> B( 3, 0, -1, -1, 2, 0 );
// Performing a matrix/matrix multiplication
DynamicMatrix<int> C = A * B;
// Printing the resulting matrix
std::cout << "C =\n" << C << "\n";
\endcode
// The output of this program is
\code
y =
16
2
C =
( -1 -1 )
( 0 4 )
\endcode
// \n \section getting_started_complex_example A Complex Example
//
// The following example is much more sophisticated. It shows the implementation of the Conjugate
// Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the
// \b Blaze library:
//
// \image html cg.jpg
//
// In this example it is not important to understand the CG algorithm itself, but to see the
// advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a
// sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$
// unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical
// formulation and therefore has huge advantages in terms of readability and maintainability,
// while the performance of the code is close to the expected theoretical peak performance:
\code
const size_t NN( N*N );
blaze::CompressedMatrix<double,rowMajor> A( NN, NN );
blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN );
double alpha, beta, delta;
// ... Initializing the sparse matrix A
// Performing the CG algorithm
r = b - A * x;
p = r;
delta = (r,r);
for( size_t iteration=0UL; iteration<iterations; ++iteration )
{
Ap = A * p;
alpha = delta / (p,Ap);
x += alpha * p;
r -= alpha * Ap;
beta = (r,r);
if( std::sqrt( beta ) < 1E-8 ) break;
p = r + ( beta / delta ) * p;
delta = beta;
}
\endcode
// \n Hopefully this short tutorial gives a good first impression of how mathematical expressions
// are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types,
// will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and
// matrix types, all possible operations on vectors and matrices, and of course all possible
// mathematical expressions.
//
// \n Previous: \ref configuration_and_installation Next: \ref vectors
*/
//*************************************************************************************************
//**Vectors****************************************************************************************
/*!\page vectors Vectors
//
// \tableofcontents
//
//
// \n \section vectors_general General Concepts
// <hr>
//
// The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector,
// \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector)
// and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified
// as either column vectors or row vectors:
\code
using blaze::DynamicVector;
using blaze::columnVector;
using blaze::rowVector;
// Setup of the 3-dimensional dense column vector
//
// ( 1 )
// ( 2 )
// ( 3 )
//
DynamicVector<int,columnVector> a( 3UL );
a[0] = 1;
a[1] = 2;
a[2] = 3;
// Setup of the 3-dimensional dense row vector
//
// ( 4 5 6 )
//
DynamicVector<int,rowVector> b( 3UL );
b[0] = 4;
b[1] = 5;
b[2] = 6;
\endcode
// Per default, all vectors in \b Blaze are column vectors:
\code
// Instantiation of a 3-dimensional column vector
blaze::DynamicVector<int> c( 3UL );
\endcode
// \n \section vectors_details Vector Details
// <hr>
//
// - \ref vector_types
// - \ref vector_operations
//
//
// \n \section vectors_examples Examples
// <hr>
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowVector;
using blaze::columnVector;
StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector
CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector
DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector
// ... Resizing and initialization
c = a + trans( b );
\endcode
// \n Previous: \ref getting_started Next: \ref vector_types
*/
//*************************************************************************************************
//**Vector Types***********************************************************************************
/*!\page vector_types Vector Types
//
// \tableofcontents
//
//
// \n \section vector_types_static_vector StaticVector
// <hr>
//
// The blaze::StaticVector class template is the representation of a fixed size vector with
// statically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/StaticVector.h>
\endcode
// The type of the elements, the number of elements, and the transpose flag of the vector can
// be specified via the three template parameters:
\code
template< typename Type, size_t N, bool TF >
class StaticVector;
\endcode
// - \c Type: specifies the type of the vector elements. StaticVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the total number of vector elements. It is expected that StaticVector is
// only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at
// compile time:
\code
// Definition of a 3-dimensional integral column vector
blaze::StaticVector<int,3UL> a;
// Definition of a 4-dimensional single precision column vector
blaze::StaticVector<float,4UL,blaze::columnVector> b;
// Definition of a 6-dimensional double precision row vector
blaze::StaticVector<double,6UL,blaze::rowVector> c;
\endcode
// \n \section vector_types_dynamic_vector DynamicVector
// <hr>
//
// The blaze::DynamicVector class template is the representation of an arbitrary sized vector
// with dynamically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/DynamicVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class DynamicVector;
\endcode
// - \c Type: specifies the type of the vector elements. DynamicVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best
// choice for medium to large vectors. Its size can be modified at runtime:
\code
// Definition of a 3-dimensional integral column vector
blaze::DynamicVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector
blaze::DynamicVector<float,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0
blaze::DynamicVector<double,blaze::rowVector> c;
\endcode
// \n \section vector_types_hybrid_vector HybridVector
// <hr>
//
// The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and
// the blaze::DynamicVector class templates. It represents a fixed size vector with statically
// allocated elements, but still can be dynamically resized (within the bounds of the available
// memory). It can be included via the header file
\code
#include <blaze/math/HybridVector.h>
\endcode
// The type of the elements, the number of elements, and the transpose flag of the vector can
// be specified via the three template parameters:
\code
template< typename Type, size_t N, bool TF >
class HybridVector;
\endcode
// - \c Type: specifies the type of the vector elements. HybridVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the maximum number of vector elements. It is expected that HybridVector
// is only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not
// known at compile time or not fixed at runtime, but whose maximum size is known at compile
// time:
\code
// Definition of a 3-dimensional integral column vector with a maximum size of 6
blaze::HybridVector<int,6UL> a( 3UL );
// Definition of a 4-dimensional single precision column vector with a maximum size of 16
blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0 and a maximum size of 6
blaze::HybridVector<double,6UL,blaze::rowVector> c;
\endcode
// \n \section vector_types_custom_vector CustomVector
// <hr>
//
// The blaze::CustomVector class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data
// structure. Thus in contrast to all other dense vector types a custom vector does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom vector can therefore be considered an alias to the existing
// array. It can be included via the header file
\code
#include <blaze/math/CustomVector.h>
\endcode
// The type of the elements, the properties of the given array of elements and the transpose
// flag of the vector can be specified via the following four template parameters:
\code
template< typename Type, bool AF, bool PF, bool TF >
class CustomVector;
\endcode
// - Type: specifies the type of the vector elements. blaze::CustomVector can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::CustomVector is the right choice if any external array needs to be represented as
// a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomVector;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays
typedef CustomVector<int,unaligned,unpadded,columnVector> UnalignedUnpadded;
std::vector<int> vec( 7UL );
UnalignedUnpadded a( &vec[0], 7UL );
// Definition of a managed custom column vector for unaligned but padded 'float' arrays
typedef CustomVector<float,unaligned,padded,columnVector> UnalignedPadded;
UnalignedPadded b( new float[16], 9UL, 16UL, blaze::ArrayDelete() );
// Definition of a managed custom row vector for aligned, unpadded 'double' arrays
typedef CustomVector<double,aligned,unpadded,rowVector> AlignedUnpadded;
AlignedUnpadded c( blaze::allocate<double>( 7UL ), 7UL, blaze::Deallocate() );
// Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays
typedef CustomVector<complex<double>,aligned,padded,columnVector> AlignedPadded;
AlignedPadded d( allocate< complex<double> >( 8UL ), 5UL, 8UL, blaze::Deallocate() );
\endcode
// In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several
// special characteristics. All of these result from the fact that a custom vector is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref vector_types_custom_vector_memory_management</b>
// -# <b>\ref vector_types_custom_vector_copy_operations</b>
// -# <b>\ref vector_types_custom_vector_alignment</b>
// -# <b>\ref vector_types_custom_vector_padding</b>
//
// \n \subsection vector_types_custom_vector_memory_management Memory Management
//
// The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// vector data structure. However, this flexibility comes with the price that the user of a custom
// vector is responsible for the resource management.
//
// When constructing a custom vector there are two choices: Either a user manually manages the
// array of elements outside the custom vector, or alternatively passes the responsibility for
// the memory management to an instance of CustomVector. In the second case the CustomVector
// class employs shared ownership between all copies of the custom vector, which reference the
// same array.
//
// The following examples give an impression of several possible types of custom vectors:
\code
using blaze::CustomVector;
using blaze::ArrayDelete;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
using blaze::columnVector;
using blaze::rowVector;
// Definition of a 3-dimensional custom vector with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom vector!
std::vector<int> vec( 3UL );
CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL );
// Definition of a custom row vector with size 3 for unaligned, unpadded integer arrays.
// The responsibility for the memory management is passed to the custom vector by
// providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction
// of the custom vector.
CustomVector<int,unaligned,unpadded,rowVector> b( new int[3], 3UL, ArrayDelete() );
// Definition of a custom vector with size 3 and capacity 16 with aligned and padded
// integer array. The memory management is passed to the custom vector by providing a
// deleter of type 'blaze::Deallocate'.
CustomVector<int,aligned,padded> c( allocate<int>( 16UL ), 3UL, 16UL, Deallocate() );
\endcode
// It is possible to pass any type of deleter to the constructor. The deleter is only required
// to provide a function call operator that can be passed the pointer to the managed array. As
// an example the following code snipped shows the implementation of two native \b Blaze deleters
// blaze::ArrayDelete and blaze::Deallocate:
\code
namespace blaze {
struct ArrayDelete
{
template< typename Type >
inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); }
};
struct Deallocate
{
template< typename Type >
inline void operator()( Type ptr ) const { deallocate( ptr ); }
};
} // namespace blaze
\endcode
// \n \subsection vector_types_custom_vector_copy_operations Copy Operations
//
// As with all dense vectors it is possible to copy construct a custom vector:
\code
using blaze::CustomVector;
using blaze::unaligned;
using blaze::unpadded;
typedef CustomVector<int,unaligned,unpadded> CustomType;
std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10
CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector
a[1] = 20; // Also modifies the std::vector
CustomType b( a ); // Creating a copy of vector a
b[2] = 20; // Also affect vector a and the std::vector
\endcode
// It is important to note that a custom vector acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom vector that is referencing and representing
// the same array as the original custom vector. In case a deleter has been provided to the first
// custom vector, both vectors share the responsibility to destroy the array when the last vector
// goes out of scope.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom vector, but modifies the values of the array:
\code
std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4
CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector
a = c; // Copy assignment: Set all values of vector a and b to 4.
\endcode
// \n \subsection vector_types_custom_vector_alignment Alignment
//
// In case the custom vector is specified as \c aligned the passed array must be guaranteed to
// be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For
// instance, if AVX is active an array of integers must be 32-bit aligned:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::aligned;
using blaze::unpadded;
int* array = blaze::allocate<int>( 5UL ); // Needs to be 32-bit aligned
CustomVector<int,aligned,unpadded> a( array, 5UL, Deallocate() );
\endcode
// In case the alignment requirements are violated, a \c std::invalid_argument exception is
// thrown.
//
// \n \subsection vector_types_custom_vector_padding Padding
//
// Adding padding elements to the end of an array can have a significant impact on the performance.
// For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors
// of double precision values can be added via a single intrinsic addition instruction:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
typedef CustomVector<double,aligned,padded> CustomType;
// Creating padded custom vectors of size 3 and a capacity of 4
CustomType a( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() );
CustomType b( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() );
CustomType c( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() );
// ... Initialization
c = a + b; // AVX-based vector addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted, a scalar addition has to be used:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
typedef CustomVector<double,aligned,unpadded> CustomType;
// Creating unpadded custom vector of size 3
CustomType a( allocate<double>( 3UL ), 3UL, Deallocate() );
CustomType b( allocate<double>( 3UL ), 3UL, Deallocate() );
CustomType c( allocate<double>( 3UL ), 3UL, Deallocate() );
// ... Initialization
c = a + b; // Scalar vector addition
\endcode
// Note the different number of constructor parameters for unpadded and padded custom vectors:
// In contrast to unpadded vectors, where during the construction only the size of the array
// has to be specified, during the construction of a padded custom vector it is additionally
// necessary to explicitly specify the capacity of the array.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom vector the added padding elements must
// guarantee that the capacity is a multiple of the intrinsic vector width. In case of unaligned
// padded vectors \f$ N-1 \f$ additional padding elements are required, where \f$ N \f$ is the
// intrinsic vector width. In case the padding is insufficient with respect to the available
// instruction set, a \c std::invalid_argument exception is thrown.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \section vector_types_compressed_vector CompressedVector
// <hr>
//
// The blaze::CompressedVector class is the representation of an arbitrarily sized sparse
// vector, which stores only non-zero elements of arbitrary type. It can be included via the
// header file
\code
#include <blaze/math/CompressedVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class CompressedVector;
\endcode
// - \c Type: specifies the type of the vector elements. CompressedVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::CompressedVector is the right choice for all kinds of sparse vectors:
\code
// Definition of a 3-dimensional integral column vector
blaze::CompressedVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements
blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL );
// Definition of a double precision row vector with size 0
blaze::CompressedVector<double,blaze::rowVector> c;
\endcode
// \n Previous: \ref vectors Next: \ref vector_operations
*/
//*************************************************************************************************
//**Vector Operations******************************************************************************
/*!\page vector_operations Vector Operations
//
// \tableofcontents
//
//
// \n \section vector_operations_constructors Constructors
// <hr>
//
// Instantiating and setting up a vector is very easy and intuitive. However, there are a few
// rules to take care of:
// - In case the last template parameter (the transpose flag) is omitted, the vector is per
// default a column vector.
// - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection vector_operations_default_construction Default Construction
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
// All vectors can be default constructed. Whereas the size
// of StaticVectors is fixed via the second template parameter,
// the initial size of a default constructed DynamicVector or
// CompressedVector is 0.
StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector.
// All elements are initialized to 0.
StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector.
// Again, all elements are initialized to 0L.
DynamicVector<float> v3; // Instantiation of a dynamic single precision column
// vector of size 0.
DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row
// vector of size 0.
CompressedVector<int> v5; // Instantiation of a compressed integer column
// vector of size 0.
CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row
// vector of size 0.
\endcode
// \n \subsection vector_operations_size_construction Construction with Specific Size
//
// The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that
// allows to immediately give the vector the required size. Whereas both dense vectors (i.e.
// \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector
// elements, \c CompressedVector merely acquires the size but remains empty.
\code
DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector
// of size 9. The elements are NOT initialized!
HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single
// precision complex values. The elements are
// default constructed.
CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with
// size 10. Initially, the vector provides no
// capacity for non-zero elements.
\endcode
// \n \subsection vector_operations_initialization_constructors Initialization Constructors
//
// All dense vector classes offer a constructor that allows for a direct, homogeneous initialization
// of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements
// can be specified
\code
StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector.
// All elements are initialized to 2.
DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision
// column vector of size 3. All elements are
// set to 7.0F.
CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column
// vector of size 15, which provides enough
// space for at least 3 non-zero elements.
\endcode
// The \c StaticVector class offers a special initialization constructor. For \c StaticVectors of
// up to 6 elements (i.e. 6D vectors) the vector elements can be individually specified in the
// constructor:
\code
using blaze::StaticVector;
StaticVector<int,1UL> v13( 4 );
StaticVector<long,2UL> v14( 1L, -2L );
StaticVector<float,3UL,columnVector> v15( -0.1F, 4.2F, -7.1F );
StaticVector<double,4UL,rowVector> v16( 1.3, -0.4, 8.3, -1.2 );
StaticVector<size_t,5UL> v17( 3UL, 4UL, 1UL, 9UL, 4UL );
StaticVector<long,6UL> v18( 1L, 3L, -2L, 9L, 4L, -3L );
\endcode
// \n \subsection vector_operations_array_construction Array Construction
//
// Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic
// or static array. If the vector is initialized from a dynamic array, the constructor expects the
// actual size of the array as first argument, the array as second argument. In case of a static
// array, the fixed size of the array is used:
\code
const double array1* = new double[2];
// ... Initialization of the dynamic array
float array2[4] = { 1.0F, 2.0F, 3.0F, 4.0F };
blaze::StaticVector<double,2UL> v1( 2UL, array1 );
blaze::DynamicVector<float> v2( array2 );
delete[] array1;
\endcode
// \n \subsection vector_operations_copy_construction Copy Construction
//
// All dense and sparse vectors can be created as the copy of any other dense or sparse vector
// with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector).
\code
StaticVector<int,9UL,columnVector> v19( v7 ); // Instantiation of the dense column vector v19
// as copy of the dense column vector v7.
DynamicVector<int,rowVector> v20( v9 ); // Instantiation of the dense row vector v20 as
// copy of the sparse row vector v9.
CompressedVector<int,columnVector> v21( v1 ); // Instantiation of the sparse column vector v21
// as copy of the dense column vector v1.
CompressedVector<float,rowVector> v22( v12 ); // Instantiation of the sparse row vector v22 as
// copy of the row vector v12.
\endcode
// Note that it is not possible to create a \c StaticVector as a copy of a vector with a different
// size:
\code
StaticVector<int,5UL,columnVector> v23( v7 ); // Runtime error: Size does not match!
StaticVector<int,4UL,rowVector> v24( v10 ); // Compile time error: Size does not match!
\endcode
// \n \section vector_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse vectors:
// \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment,
// \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment.
//
// \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment
//
// Sometimes it may be necessary to assign the same value to all elements of a dense vector.
// For this purpose, the assignment operator can be used:
\code
blaze::StaticVector<int,3UL> v1;
blaze::DynamicVector<double> v2;
// Setting all integer elements of the StaticVector to 2
v1 = 2;
// Setting all double precision elements of the DynamicVector to 5.0
v2 = 5.0;
\endcode
// \n \subsection vector_operations_array_assignment Array Assignment
//
// Dense vectors can also be assigned a static array:
\code
blaze::StaticVector<float,2UL> v1;
blaze::DynamicVector<double,rowVector> v2;
float array1[2] = { 1.0F, 2.0F };
double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 };
v1 = array1;
v2 = array2;
\endcode
// \n \subsection vector_operations_copy_assignment Copy Assignment
//
// For all vector types it is generally possible to assign another vector with the same transpose
// flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the
// assigned vector is required to have the same size as the \c StaticVector since the size of a
// \c StaticVector cannot be adapted!
\code
blaze::StaticVector<int,3UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 3UL );
blaze::DynamicVector<float,columnVector> v3( 5UL );
blaze::CompressedVector<int,columnVector> v4( 3UL );
blaze::CompressedVector<float,rowVector> v5( 3UL );
// ... Initialization of the vectors
v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector
v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector
v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector
v1 = v5; // Compilation error: Cannot assign a row vector to a column vector
\endcode
// \n \subsection vector_operations_compound_assignment Compound Assignment
//
// Next to plain assignment, it is also possible to use addition assignment, subtraction
// assignment, and multiplication assignment. Note however, that in contrast to plain assignment
// the size and the transpose flag of the vectors has be to equal in order to able to perform a
// compound assignment.
\code
blaze::StaticVector<int,5UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 5UL );
blaze::CompressedVector<float,columnVector> v3( 7UL );
blaze::DynamicVector<float,rowVector> v4( 7UL );
blaze::CompressedVector<float,rowVector> v5( 7UL );
// ... Initialization of the vectors
v1 += v2; // OK: Addition assignment between two column vectors of the same size
v1 += v3; // Runtime error: No compound assignment between vectors of different size
v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag
v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size
\endcode
// \n \section vector_operations_element_access Element Access
// <hr>
//
// The easiest and most intuitive way to access a dense or sparse vector is via the subscript
// operator. The indices to access a vector are zero-based:
\code
blaze::DynamicVector<int> v1( 5UL );
v1[0] = 1;
v1[1] = 3;
// ...
blaze::CompressedVector<float> v2( 5UL );
v2[2] = 7.3F;
v2[4] = -1.4F;
\endcode
// Whereas using the subscript operator on a dense vector only accesses the already existing
// element, accessing an element of a sparse vector via the subscript operator potentially
// inserts the element into the vector and may therefore be more expensive. Consider the
// following example:
\code
blaze::CompressedVector<int> v1( 10UL );
for( size_t i=0UL; i<v1.size(); ++i ) {
... = v1[i];
}
\endcode
// Although the compressed vector is only used for read access within the for loop, using the
// subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore, all
// vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(),
// \c end(), and \c cend() functions to traverse the currently contained elements by iterators.
// In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a
// manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or
// \c cend() are used a \c ConstIterator is returned:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 10UL );
// ... Initialization of the vector
// Traversing the vector by Iterator
for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
// Traversing the vector by ConstIterator
for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) {
// ...
}
for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) {
// ...
}
\endcode
// \n \section vector_operations_element_insertion Element Insertion
// <hr>
//
// In contrast to dense vectors, that store all elements independent of their value and that
// offer direct access to all elements, spares vectors only store the non-zero elements contained
// in the vector. Therefore it is necessary to explicitly add elements to the vector. The first
// option to add elements to a sparse vector is the subscript operator:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 3UL );
v1[1] = 2;
\endcode
// In case the element at the given index is not yet contained in the vector, it is automatically
// inserted. Otherwise the old value is replaced by the new value 2. The operator returns a
// reference to the sparse vector element.\n
// An alternative is the \c set() function: In case the element is not yet contained in the vector
// the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at index 3
v1.set( 3, 1 );
\endcode
// However, insertion of elements can be better controlled via the \c insert() function. In contrast
// to the subscript operator and the \c set() function it emits an exception in case the element is
// already contained in the vector. In order to check for this case, the \c find() function can be
// used:
\code
// In case the element at index 4 is not yet contained in the matrix it is inserted
// with a value of 6.
if( v1.find( 4 ) == v1.end() )
v1.insert( 4, 6 );
\endcode
// Although the \c insert() function is very flexible, due to performance reasons it is not suited
// for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill
// a sparse vector is the \c append() function. It requires the sparse vector to provide enough
// capacity to insert a new element. Additionally, the index of the new element must be larger
// than the index of the previous element. Violating these conditions results in undefined
// behavior!
\code
v1.reserve( 10 ); // Reserving space for 10 non-zero elements
v1.append( 5, -2 ); // Appending the element -2 at index 5
v1.append( 6, 4 ); // Appending the element 4 at index 6
// ...
\endcode
// \n \section vector_operations_member_functions Member Functions
// <hr>
//
// \subsection vector_operations_size Size of a Vector
//
// Via the \c size() member function, the current size of a dense or sparse vector can be queried:
\code
// Instantiating a dynamic vector with size 10
blaze::DynamicVector<int> v1( 10UL );
v1.size(); // Returns 10
// Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements
blaze::CompressedVector<double> v2( 12UL, 3UL );
v2.size(); // Returns 12
\endcode
// Alternatively, the free function \c size() can be used to query to current size of a vector.
// In contrast to the member function, the free function can also be used to query the size of
// vector expressions:
\code
size( v1 ); // Returns 10, i.e. has the same effect as the member function
size( v2 ); // Returns 12, i.e. has the same effect as the member function
blaze::DynamicMatrix<int> A( 15UL, 12UL );
size( A * v2 ); // Returns 15, i.e. the size of the resulting vector
\endcode
// \n \subsection vector_operations_capacity Capacity of a Vector
//
// Via the \c capacity() (member) function the internal capacity of a dense or sparse vector
// can be queried. Note that the capacity of a vector doesn't have to be equal to the size
// of a vector. In case of a dense vector the capacity will always be greater or equal than
// the size of the vector, in case of a sparse vector the capacity may even be less than
// the size.
\code
v1.capacity(); // Returns at least 10
\endcode
// For symmetry reasons, there is also a free function /c capacity() available that can be used
// to query the capacity:
\code
capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function
\endcode
// Note, however, that it is not possible to query the capacity of a vector expression:
\code
capacity( A * v1 ); // Compilation error!
\endcode
// \n \subsection vector_operations_nonzeros Number of Non-Zero Elements
//
// For both dense and sparse vectors the number of non-zero elements can be determined via the
// \c nonZeros() member function. Sparse vectors directly return their number of non-zero
// elements, dense vectors traverse their elements and count the number of non-zero elements.
\code
v1.nonZeros(); // Returns the number of non-zero elements in the dense vector
v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector
\endcode
// There is also a free function \c nonZeros() available to query the current number of non-zero
// elements:
\code
nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector
nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in
// a vector expression. However, the result is not the exact number of non-zero elements, but
// may be a rough estimation:
\code
nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression
\endcode
// \n \subsection vector_operations_resize_reserve Resize/Reserve
//
// The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector
// cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as
// \c CompressedVectors can be changed via the \c resize() function:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
DynamicVector<int,columnVector> v1;
CompressedVector<int,rowVector> v2( 4 );
v2[1] = -2;
v2[3] = 11;
// Adapting the size of the dynamic and compressed vectors. The (optional) second parameter
// specifies whether the existing elements should be preserved. Per default, the existing
// elements are not preserved.
v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain
// uninitialized, elements of class type are default constructed.
v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the
// new elements are NOT initialized!
v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved.
v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost.
\endcode
// Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors)
// on the vector:
\code
typedef blaze::DynamicVector<int,rowVector> VectorType;
typedef blaze::DenseSubvector<VectorType> SubvectorType;
VectorType v1( 10UL ); // Creating a dynamic vector of size 10
SubvectorType sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6]
v1.resize( 6UL ); // Resizing the vector invalidates the view
\endcode
// When the internal capacity of a vector is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicVector<int> v1;
v1.reserve( 100 );
v1.size(); // Returns 0
v1.capacity(); // Returns at least 100
\endcode
// Note that the size of the vector remains unchanged, but only the internal capacity is set
// according to the specified value!
//
//
// \n \section vector_operations_free_functions Free Functions
// <hr>
//
// \subsection vector_operations_reset_clear Reset/Clear
//
// In order to reset all elements of a vector, the \c reset() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with 2.0F.
blaze::DynamicVector<float> v1( 3UL, 2.0F );
// Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged.
reset( v1 ); // Resetting all elements
v1.size(); // Returns 3: size and capacity remain unchanged
\endcode
// In order to return a vector to its default state (i.e. the state of a default constructed
// vector), the \c clear() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with -1.0F.
blaze::DynamicVector<float> v1( 5, -1.0F );
// Resetting the entire vector.
clear( v1 ); // Resetting the entire vector
v1.size(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// Note that resetting or clearing both dense and sparse vectors does not change the capacity
// of the vectors.
//
//
// \n \subsection vector_operations_isnan isnan
//
// The \c isnan() function provides the means to check a dense or sparse vector for non-a-number
// elements:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
// If at least one element of the vector is not-a-number, the function returns \c true, otherwise
// it returns \c false. Please note that this function only works for vectors with floating point
// elements. The attempt to use it for a vector with a non-floating point element type results in
// a compile time error.
//
//
// \n \subsection vector_operations_isdefault isDefault
//
// The \c isDefault() function returns whether the given dense or sparse vector is in default state:
\code
blaze::HybridVector<int,20UL> a;
// ... Resizing and initialization
if( isDefault( a ) ) { ... }
\endcode
// A vector is in default state if it appears to just have been default constructed. All resizable
// vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are
// in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all
// subvectors, rows, and columns) is in default state if all its elements are in default state.
// For instance, in case the vector is instantiated for a built-in integral or floating point data
// type, the function returns \c true in case all vector elements are 0 and \c false in case any
// vector element is not 0.
//
//
// \n \subsection vector_operations_isUniform isUniform
//
// In order to check if all vector elements are identical, the \c isUniform function can be used:
\code
blaze::DynamicVector<int> a;
// ... Resizing and initialization
if( isUniform( a ) ) { ... }
\endcode
// Note that in case of a sparse vector also the zero elements are also taken into account!
//
//
// \n \subsection vector_operators_abs Absolute Values
//
// The \c abs() function can be used to compute the absolute values of each element of a vector.
// For instance, the following computation
\code
blaze::StaticVector<int,3UL,rowVector> a( -1, 2, -3 );
blaze::StaticVector<int,3UL,rowVector> b( abs( a ) );
\endcode
// results in the vector
\f$ b = \left(\begin{array}{*{1}{c}}
1 \\
2 \\
3 \\
\end{array}\right)\f$
// \n \subsection vector_operations_min_max Minimum/Maximum Values
//
// The \c min() and the \c max() functions return the smallest and largest element of the given
// dense or sparse vector, respectively:
\code
blaze::StaticVector<int,4UL,rowVector> a( -5, 2, 7, 4 );
blaze::StaticVector<int,4UL,rowVector> b( -5, 2, -7, -4 );
min( a ); // Returns -5
min( b ); // Returns -7
max( a ); // Returns 7
max( b ); // Returns 2
\endcode
// In case the vector currently has a size of 0, both functions return 0. Additionally, in case
// a given sparse vector is not completely filled, the zero elements are taken into account. For
// example: the following compressed vector has only 2 non-zero elements. However, the minimum
// of this vector is 0:
\code
blaze::CompressedVector<int> c( 4UL, 2UL );
c[0] = 1;
c[2] = 3;
min( c ); // Returns 0
\endcode
// Also note that the \c min() and \c max() functions can be used to compute the smallest and
// largest element of a vector expression:
\code
min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector
max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector
\endcode
// \n \subsection vector_operators_conj Complex Conjugates
//
// The \c conj() function can be applied on a dense or sparse vector to compute the complex
// conjugate of each element of the vector:
\code
using blaze::StaticVector;
typedef std::complex<double> cplx;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a( cplx(-2.0,-1.0), cplx(1.0,1.0) );
// Computing the vector of complex conjugates
// ( (-2, 1) )
// ( ( 1,-1) )
StaticVector<cplx,2UL> b;
b = conj( a );
\endcode
// Additionally, vectors can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicVector<cplx> c( 5UL );
conjugate( c ); // In-place conjugate operation.
c = conj( c ); // Same as above
\endcode
// \n \subsection vector_operators_real Real Part
//
// The \c real() function can be used on a dense or sparse vector to extract the real part of
// each element of the vector:
\code
using blaze::StaticVector;
typedef std::complex<double> cplx;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a( cplx(-2.0,-1.0), cplx(1.0,1.0) );
// Extracting the real part of each vector element
// ( -2 )
// ( 1 )
StaticVector<double,2UL> b;
b = real( a );
\endcode
// \n \subsection vector_operators_imag Imaginary Part
//
// The \c imag() function can be used on a dense or sparse vector to extract the imaginary part
// of each element of the vector:
\code
using blaze::StaticVector;
typedef std::complex<double> cplx;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a( cplx(-2.0,-1.0), cplx(1.0,1.0) );
// Extracting the imaginary part of each vector element
// ( -1 )
// ( 1 )
StaticVector<double,2UL> b;
b = imag( a );
\endcode
// \n \subsection vector_operations_length Vector Length
//
// In order to calculate the length of a vector, both the \c length() and \c sqrLength() function
// can be used:
\code
blaze::StaticVector<float,3UL,rowVector> v( -1.2F, 2.7F, -2.3F );
const float len = length ( v ); // Computes the current length of the vector
const float sqrlen = sqrLength( v ); // Computes the square length of the vector
\endcode
// Note that both functions can only be used for vectors with built-in or complex element type!
//
//
// \n \subsection vector_operations_vector_transpose Vector Transpose
//
// As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors
// (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However,
// vectors can be transposed via the \c trans() function:
\code
blaze::DynamicVector<int,columnVector> v1( 4UL );
blaze::CompressedVector<int,rowVector> v2( 4UL );
v1 = v2; // Compilation error: Cannot assign a row vector to a column vector
v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it
// to the column vector v1
v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2
v1 += trans( v2 ); // OK: Addition assignment of two column vectors
\endcode
// \n \subsection vector_operations_conjugate_transpose Conjugate Transpose
//
// It is also possible to compute the conjugate transpose of a vector. This operation is available
// via the \c ctrans() function:
\code
blaze::CompressedVector< complex<float>, rowVector > v1( 4UL );
blaze::DynamicVector< complex<float>, columnVector > v2( 4UL );
v1 = ctrans( v2 ); // Compute the conjugate transpose vector
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector
v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector
\endcode
// \n \subsection vector_operations_normalize Normalize
//
// The \c normalize() function can be used to scale any non-zero vector to a length of 1. In
// case the vector does not contain a single non-zero element (i.e. is a zero vector), the
// \c normalize() function returns a zero vector.
\code
blaze::DynamicVector<float,columnVector> v1( 10UL );
blaze::CompressedVector<double,columnVector> v2( 12UL );
v1 = normalize( v1 ); // Normalizing the dense vector v1
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
v1 = normalize( v2 ); // Assigning v1 the normalized vector v2
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
\endcode
// Note that the \c normalize() function only works for floating point vectors. The attempt to
// use it for an integral vector results in a compile time error.
//
// \n \subsection vector_operations_swap Swap
//
// Via the \c swap() function it is possible to completely swap the contents of two vectors of
// the same type:
\code
blaze::DynamicVector<int,columnVector> v1( 10UL );
blaze::DynamicVector<int,columnVector> v2( 20UL );
swap( v1, v2 ); // Swapping the contents of v1 and v2
\endcode
// \n Previous: \ref vector_types Next: \ref matrices
*/
//*************************************************************************************************
//**Matrices***************************************************************************************
/*!\page matrices Matrices
//
// \tableofcontents
//
//
// \n \section matrices_general General Concepts
// <hr>
//
// The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix,
// \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix)
// and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be
// stored as row-major matrices or column-major matrices:
\code
using blaze::DynamicMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
// Setup of the 2x3 row-major dense matrix
//
// ( 1 2 3 )
// ( 4 5 6 )
//
DynamicMatrix<int,rowMajor> A( 2UL, 3UL );
A(0,0) = 1; A(0,1) = 2; A(0,2) = 3;
A(1,0) = 4; A(1,1) = 5; A(1,2) = 6;
// Setup of the 3x2 column-major dense matrix
//
// ( 1 4 )
// ( 2 5 )
// ( 3 6 )
//
DynamicMatrix<int,columnMajor> B( 3UL, 2UL );
B(0,0) = 1; B(0,1) = 4;
B(1,0) = 2; B(1,1) = 5;
B(2,0) = 3; B(2,1) = 6;
\endcode
// Per default, all matrices in \b Blaze are row-major matrices:
\code
// Instantiation of a 3x3 row-major matrix
blaze::DynamicMatrix<int> C( 3UL, 3UL );
\endcode
// \n \section matrices_details Matrix Details
// <hr>
//
// - \ref matrix_types
// - \ref matrix_operations
//
//
// \n \section matrices_examples Examples
// <hr>
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix
CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix
DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix
// ... Resizing and initialization
C = A * B;
\endcode
// \n Previous: \ref vector_operations Next: \ref matrix_types
*/
//*************************************************************************************************
//**Matrix Types***********************************************************************************
/*!\page matrix_types Matrix Types
//
// \tableofcontents
//
//
// \n \section matrix_types_static_matrix StaticMatrix
// <hr>
//
// The blaze::StaticMatrix class template is the representation of a fixed size matrix with
// statically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/StaticMatrix.h>
\endcode
// The type of the elements, the number of rows and columns, and the storage order of the matrix
// can be specified via the four template parameters:
\code
template< typename Type, size_t M, size_t N, bool SO >
class StaticMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c M : specifies the total number of rows of the matrix.
// - \c N : specifies the total number of columns of the matrix. Note that it is expected
// that StaticMatrix is only used for tiny and small matrices.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are
// known at compile time:
\code
// Definition of a 3x4 integral row-major matrix
blaze::StaticMatrix<int,3UL,4UL> A;
// Definition of a 4x6 single precision row-major matrix
blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B;
// Definition of a 6x4 double precision column-major matrix
blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_dynamic_matrix DynamicMatrix
// <hr>
//
// The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix
// with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included
// via the header file
\code
#include <blaze/math/DynamicMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class DynamicMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best
// choice for medium to large matrices. The number of rows and columns can be modified at runtime:
\code
// Definition of a 3x4 integral row-major matrix
blaze::DynamicMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::DynamicMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_hybrid_matrix HybridMatrix
// <hr>
//
// The HybridMatrix class template combines the flexibility of a dynamically sized matrix with
// the efficiency and performance of a fixed size matrix. It is implemented as a crossing between
// the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static
// matrix it uses static stack memory instead of dynamically allocated memory and similar to the
// dynamic matrix it can be resized (within the extend of the static memory). It can be included
// via the header file
\code
#include <blaze/math/HybridMatrix.h>
\endcode
// The type of the elements, the maximum number of rows and columns and the storage order of the
// matrix can be specified via the four template parameters:
\code
template< typename Type, size_t M, size_t N, bool SO >
class HybridMatrix;
\endcode
// - Type: specifies the type of the matrix elements. HybridMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - M : specifies the maximum number of rows of the matrix.
// - N : specifies the maximum number of columns of the matrix. Note that it is expected
// that HybridMatrix is only used for tiny and small matrices.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions
// are not known at compile time or not fixed at runtime, but whose maximum dimensions are known
// at compile time:
\code
// Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8
blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16
blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6
blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_custom_matrix CustomMatrix
// <hr>
//
// The blaze::CustomMatrix class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data
// structure. Thus in contrast to all other dense matrix types a custom matrix does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom matrix can therefore be considered an alias to the existing
// array. It can be included via the header file
\code
#include <blaze/math/CustomMatrix.h>
\endcode
// The type of the elements, the properties of the given array of elements and the storage order
// of the matrix can be specified via the following four template parameters:
\code
template< typename Type, bool AF, bool PF, bool SO >
class CustomMatrix;
\endcode
// - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::CustomMatrix is the right choice if any external array needs to be represented as
// a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomMatrix;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays
typedef CustomMatrix<int,unaligned,unpadded,rowMajor> UnalignedUnpadded;
std::vector<int> vec( 12UL )
UnalignedUnpadded A( &vec[0], 3UL, 4UL );
// Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays
typedef CustomMatrix<float,unaligned,padded,columnMajor> UnalignedPadded;
UnalignedPadded B( new float[40], 5UL, 6UL, 8UL, blaze::ArrayDelete() );
// Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays
typedef CustomMatrix<double,aligned,unpadded,rowMajor> AlignedUnpadded;
AlignedUnpadded C( blaze::allocate<double>( 192UL ), 12UL, 13UL, 16UL, blaze::Deallocate );
// Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays
typedef CustomMatrix<complex<double>,aligned,padded,columnMajor> AlignedPadded;
AlignedPadded D( blaze::allocate<double>( 112UL ), 7UL, 14UL, 16UL, blaze::Deallocate() );
\endcode
// In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several
// special characteristics. All of these result from the fact that a custom matrix is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref matrix_types_custom_matrix_memory_management</b>
// -# <b>\ref matrix_types_custom_matrix_copy_operations</b>
// -# <b>\ref matrix_types_custom_matrix_alignment</b>
// -# <b>\ref matrix_types_custom_matrix_padding</b>
//
// \n \subsection matrix_types_custom_matrix_memory_management Memory Management
//
// The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// matrix data structure. However, this flexibility comes with the price that the user of a custom
// matrix is responsible for the resource management.
//
// When constructing a custom matrix there are two choices: Either a user manually manages the
// array of elements outside the custom matrix, or alternatively passes the responsibility for
// the memory management to an instance of CustomMatrix. In the second case the CustomMatrix
// class employs shared ownership between all copies of the custom matrix, which reference the
// same array.
//
// The following examples give an impression of several possible types of custom matrices:
\code
using blaze::CustomMatrix;
using blaze::ArrayDelete;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom matrix!
std::vector<int> vec( 12UL );
CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL );
// Definition of a 3x4 custom row-major matrix for unaligned, unpadded integer arrays.
// The responsibility for the memory management is passed to the custom matrix by
// providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction
// of the custom matrix.
CustomMatrix<int,unaligned,unpadded,rowMajor> B( new int[12], 3UL, 4UL, ArrayDelete() );
// Definition of a custom 8x12 matrix for an aligned and padded integer array of
// capacity 128 (including 8 padding elements per row). The memory management is passed
// to the custom matrix by providing a deleter of type 'blaze::Deallocate'.
CustomMatrix<int,aligned,padded> C( allocate<int>( 128UL ), 8UL, 12UL, 16UL, Deallocate() );
\endcode
// It is possible to pass any type of deleter to the constructor. The deleter is only required
// to provide a function call operator that can be passed the pointer to the managed array. As
// an example the following code snipped shows the implementation of two native \b Blaze deleters
// blaze::ArrayDelete and blaze::Deallocate:
\code
namespace blaze {
struct ArrayDelete
{
template< typename Type >
inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); }
};
struct Deallocate
{
template< typename Type >
inline void operator()( Type ptr ) const { deallocate( ptr ); }
};
} // namespace blaze
\endcode
// \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations
//
// As with all dense matrices it is possible to copy construct a custom matrix:
\code
using blaze::CustomMatrix;
using blaze::unaligned;
using blaze::unpadded;
typedef CustomMatrix<int,unaligned,unpadded> CustomType;
std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10
CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
a[1] = 20; // Also modifies the std::vector
CustomType B( a ); // Creating a copy of vector a
b[2] = 20; // Also affect matrix A and the std::vector
\endcode
// It is important to note that a custom matrix acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom matrix that is referencing and representing
// the same array as the original custom matrix. In case a deleter has been provided to the first
// custom matrix, both matrices share the responsibility to destroy the array when the last matrix
// goes out of scope.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom matrices, but modifies the values of the array:
\code
std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4
CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
A = C; // Copy assignment: Set all values of matrix A and B to 4.
\endcode
// \n \subsection matrix_types_custom_matrix_alignment Alignment
//
// In case the custom matrix is specified as \c aligned the passed array must adhere to some
// alignment restrictions based on the alignment requirements of the used data type and the
// used instruction set (SSE, AVX, ...). The restriction applies to the first element of each
// row/column: In case of a row-major matrix the first element of each row must be properly
// aligned, in case of a column-major matrix the first element of each column must be properly
// aligned. For instance, if a row-major matrix is used and AVX is active the first element of
// each row must be 32-bit aligned:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::aligned;
using blaze::padded;
using blaze::rowMajor;
int* array = blaze::allocate<int>( 40UL ); // Is guaranteed to be 32-bit aligned
CustomMatrix<int,aligned,padded,rowMajor> A( array, 5UL, 6UL, 8UL, Deallocate() );
\endcode
// In the example, the row-major matrix has six columns. However, since with AVX eight integer
// values are loaded together the matrix is padded with two additional elements. This guarantees
// that the first element of each row is 32-bit aligned. In case the alignment requirements are
// violated, a \c std::invalid_argument exception is thrown.
//
// \n \subsection matrix_types_custom_matrix_padding Padding
//
// Adding padding elements to the end of each row/column can have a significant impact on the
// performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double
// precision matrices can be added via three intrinsic addition instruction:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
typedef CustomMatrix<double,aligned,padded> CustomType;
// Creating padded custom 3x3 matrix with an additional padding element in each row
CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() );
CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() );
CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() );
// ... Initialization
C = A + B; // AVX-based matrix addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted a scalar addition has to be used:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
typedef CustomMatrix<double,aligned,unpadded> CustomType;
// Creating unpadded custom 3x3 matrix
CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() );
CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() );
CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() );
// ... Initialization
C = A + B; // Scalar matrix addition
\endcode
// Note that the construction of padded and unpadded aligned matrices looks identical. However,
// in case of padded matrices, \b Blaze will zero initialize the padding element and use them
// in all computations in order to achieve maximum performance. In case of an unpadded matrix
// \b Blaze will ignore the elements with the downside that it is not possible to load a complete
// row to an AVX register, which makes it necessary to fall back to a scalar addition.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom matrix the added padding elements must
// guarantee that the total number of elements in each row/column is a multiple of the intrinsic
// vector width. In case of an unaligned padded matrix the number of padding elements can be
// greater or equal the number of padding elements of an aligned padded custom matrix. In case
// the padding is insufficient with respect to the available instruction set, a
// \c std::invalid_argument exception is thrown.
//
//
// \n \section matrix_types_compressed_matrix CompressedMatrix
// <hr>
//
// The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse
// matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be
// included via the header file
\code
#include <blaze/math/CompressedMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class CompressedMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices:
\code
// Definition of a 3x4 integral row-major matrix
blaze::CompressedMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::CompressedMatrix<double,blaze::columnMajor> C;
\endcode
// \n Previous: \ref matrices Next: \ref matrix_operations
*/
//*************************************************************************************************
//**Matrix Operations******************************************************************************
/*!\page matrix_operations Matrix Operations
//
// \tableofcontents
//
//
// \n \section matrix_operations_constructors Constructors
// <hr>
//
// Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules
// to be aware of:
// - In case the last template parameter (the storage order) is omitted, the matrix is per
// default stored in row-major order.
// - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection matrix_operations_default_construction Default Construction
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
// All matrices can be default constructed. Whereas the size of
// a StaticMatrix is fixed via the second and third template
// parameter, the initial size of a constructed DynamicMatrix
// or CompressedMatrix is 0.
StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major
// matrix. All elements are initialized to 0.
DynamicMatrix<float> M2; // Instantiation of a single precision dynamic
// row-major matrix with 0 rows and 0 columns.
DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic
// column-major matrix with 0 rows and 0 columns.
CompressedMatrix<int> M4; // Instantiation of a compressed integer
// row-major matrix of size 0x0.
CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision
// column-major matrix of size 0x0.
\endcode
// \n \subsection matrix_operations_size_construction Construction with Specific Size
//
// The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor
// that allows to immediately give the matrices a specific number of rows and columns:
\code
DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major
// matrix. The elements are not initialized.
HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major
// matrix. The elements are not initialized.
CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed
// column-major matrix.
\endcode
// Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately
// allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this
// example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory.
//
//
// \n \subsection matrix_operations_initialization_constructors Initialization Constructors
//
// All dense matrix classes offer a constructor for a direct, homogeneous initialization of all
// matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements
// can be specified.
\code
StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major
// matrix. All elements are initialized to 7.
DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major
// matrix. All elements are initialized to 2.0F.
CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major
// matrix with capacity for 4 non-zero elements.
\endcode
// The \c StaticMatrix class offers a special initialization constructor. For \c StaticMatrix of
// up to 10 elements the matrix elements can be individually specified in the constructor:
\code
using blaze::StaticMatrix;
StaticMatrix<int,3UL,1UL> M12( 2, 5, -1 );
StaticMatrix<float,2UL,3UL,columnMajor> M13( -0.1F, 4.2F, -7.1F,
-0.8F, 1.3F, 4.2F );
StaticMatrix<double,3UL,3UL,rowVector> M14( 1.3, -0.4, 8.3,
0.2, -1.5, -2.6,
1.3, 9.3, -7.1 );
\endcode
// \n \subsection matrix_operations_array_construction Array Construction
//
// Alternatively, all dense matrix classes offer a constructor for an initialization with a
// dynamic or static array. If the matrix is initialized from a dynamic array, the constructor
// expects the dimensions of values provided by the array as first and second argument, the
// array as third argument. In case of a static array, the fixed size of the array is used:
\code
const double array1* = new double[6];
// ... Initialization of the dynamic array
float array2[3][2] = { { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } };
blaze::StaticMatrix<double,2UL,3UL> v1( 2UL, 3UL, array1 );
blaze::DynamicMatrix<float> v2( array2 );
delete[] array1;
\endcode
// \n \subsection matrix_operations_copy_construction Copy Construction
//
// All dense and sparse matrices can be created as a copy of another dense or sparse matrix.
\code
StaticMatrix<int,5UL,4UL,rowMajor> M15( M6 ); // Instantiation of the dense row-major matrix M15
// as copy of the dense row-major matrix M6.
DynamicMatrix<float,columnMajor> M16( M8 ); // Instantiation of the dense column-major matrix M16
// as copy of the sparse column-major matrix M8.
CompressedMatrix<double,columnMajor> M17( M7 ); // Instantiation of the compressed column-major matrix
// M17 as copy of the dense row-major matrix M7.
CompressedMatrix<float,rowMajor> M18( M8 ); // Instantiation of the compressed row-major matrix
// M18 as copy of the compressed column-major matrix M8.
\endcode
// Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different
// number of rows and/or columns:
\code
StaticMatrix<int,4UL,5UL,rowMajor> M19( M6 ); // Runtime error: Number of rows and columns
// does not match!
StaticMatrix<int,4UL,4UL,columnMajor> M20( M9 ); // Compile time error: Number of columns does
// not match!
\endcode
// \n \section matrix_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse matrices:
// \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment,
// \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment.
//
//
// \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment
//
// It is possible to assign the same value to all elements of a dense matrix. All dense matrix
// classes provide an according assignment operator:
\code
blaze::StaticMatrix<int,3UL,2UL> M1;
blaze::DynamicMatrix<double> M2;
// Setting all integer elements of the StaticMatrix to 4
M1 = 4;
// Setting all double precision elements of the DynamicMatrix to 3.5
M2 = 3.5
\endcode
// \n \subsection matrix_operations_array_assignment Array Assignment
//
// Dense matrices can also be assigned a static array:
\code
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1;
blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2;
blaze::DynamicMatrix<double> M3;
int array1[2][2] = { { 1, 2 }, { 3, 4 } };
double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
M1 = array1;
M2 = array1;
M3 = array2;
\endcode
// Note that due to the different storage order, the matrix M1 is initialized differently than
// matrix M2:
\f$ M1 = \left(\begin{array}{*{2}{c}}
1 & 2 \\
3 & 4 \\
\end{array}\right),\quad
M2 = \left(\begin{array}{*{2}{c}}
1 & 3 \\
2 & 4 \\
\end{array}\right)\f$
// Also note that the dimensions of the static array have to match the size of a \c StaticMatrix,
// whereas a \c DynamicMatrix is resized according to the array dimensions:
\f$ M1 = \left(\begin{array}{*{2}{c}}
3.1 & 6.4 \\
-0.9 & -1.2 \\
4.8 & 0.6 \\
\end{array}\right)\f$
// \n \subsection matrix_operations_copy_assignment Copy Assignment
//
// All kinds of matrices can be assigned to each other. The only restriction is that since a
// \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of
// rows and in the number of columns.
\code
blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL );
blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL );
blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL );
blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL );
// ... Initialization of the matrices
M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix
M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix
M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix
M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix
\endcode
// \n \subsection matrix_operations_compound_assignment Compound Assignment
//
// Compound assignment is also available for matrices: addition assignment, subtraction assignment,
// and multiplication assignment. In contrast to plain assignment, however, the number of rows
// and columns of the two operands have to match according to the arithmetic operation.
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL );
blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL );
blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL );
blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5;
blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL );
// ... Initialization of the matrices
M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions
M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix
M1 += M4; // Runtime error: No compound assignment between matrices of different size
M1 -= M5; // Compilation error: No compound assignment between matrices of different size
M2 *= M6; // OK: Multiplication assignment between two row-major matrices
\endcode
// Note that the multiplication assignment potentially changes the number of columns of the
// target matrix:
\f$\left(\begin{array}{*{3}{c}}
2 & 0 & 1 \\
0 & 3 & 2 \\
\end{array}\right) \times
\left(\begin{array}{*{2}{c}}
4 & 0 \\
1 & 0 \\
0 & 3 \\
\end{array}\right) =
\left(\begin{array}{*{2}{c}}
8 & 3 \\
3 & 6 \\
\end{array}\right)\f$
// Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a
// multiplication assignment with other square matrices of the same dimensions.
//
//
// \n \section matrix_operations_element_access Element Access
// <hr>
//
// The easiest way to access a specific dense or sparse matrix element is via the function call
// operator. The indices to access a matrix are zero-based:
\code
blaze::DynamicMatrix<int> M1( 4UL, 6UL );
M1(0,0) = 1;
M1(0,1) = 3;
// ...
blaze::CompressedMatrix<double> M2( 5UL, 3UL );
M2(0,2) = 4.1;
M2(1,1) = -6.3;
\endcode
// Since dense matrices allocate enough memory for all contained elements, using the function
// call operator on a dense matrix directly returns a reference to the accessed value. In case
// of a sparse matrix, if the accessed value is currently not contained in the matrix, the
// value is inserted into the matrix prior to returning a reference to the value, which can
// be much more expensive than the direct access to a dense matrix. Consider the following
// example:
\code
blaze::CompressedMatrix<int> M1( 4UL, 4UL );
for( size_t i=0UL; i<M1.rows(); ++i ) {
for( size_t j=0UL; j<M1.columns(); ++j ) {
... = M1(i,j);
}
}
\endcode
// Although the compressed matrix is only used for read access within the for loop, using the
// function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore,
// all matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(),
// \c end() and \c cend() functions to traverse all contained elements by iterator. Note that
// it is not possible to traverse all elements of the matrix, but that it is only possible to
// traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and
// \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of
// a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> M1( 4UL, 6UL );
// Traversing the matrix by Iterator
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
// Traversing the matrix by ConstIterator
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) {
// ...
}
}
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) {
// ...
}
}
\endcode
// \n \section matrix_operations_element_insertion Element Insertion
// <hr>
//
// Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse
// matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements
// to the matrix. The first possibility to add elements to a sparse matrix is the function call
// operator:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int> M1( 3UL, 4UL );
M1(1,2) = 9;
\endcode
// In case the element at the given position is not yet contained in the sparse matrix, it is
// automatically inserted. Otherwise the old value is replaced by the new value 2. The operator
// returns a reference to the sparse vector element.\n
// An alternative is the \c set() function: In case the element is not yet contained in the matrix
// the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at position (2,0)
M1.set( 2, 0, 1 );
\endcode
// However, insertion of elements can be better controlled via the \c insert() function. In
// contrast to the function call operator and the \c set() function it emits an exception in case
// the element is already contained in the matrix. In order to check for this case, the \c find()
// function can be used:
\code
// In case the element at position (2,3) is not yet contained in the matrix it is inserted
// with a value of 4.
if( M1.find( 2, 3 ) == M1.end( 2 ) )
M1.insert( 2, 3, 4 );
\endcode
// Although the \c insert() function is very flexible, due to performance reasons it is not
// suited for the setup of large sparse matrices. A very efficient, yet also very low-level
// way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to
// provide enough capacity to insert a new element in the specified row. Additionally, the
// index of the new element must be larger than the index of the previous element in the same
// row. Violating these conditions results in undefined behavior!
\code
M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0
M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1
M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2
// ...
\endcode
// The most efficient way to fill a sparse matrix with elements, however, is a combination of
// \c reserve(), \c append(), and the \c finalize() function:
\code
blaze::CompressedMatrix<int> M1( 3UL, 5UL );
M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements
M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1
M1.finalize( 0 ); // Finalizing row 0
M1.append( 1, 1, 2 ); // Appending the value 2 in row 1 with column index 1
M1.finalize( 1 ); // Finalizing row 1
M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0
M1.finalize( 2 ); // Finalizing row 2
\endcode
// \n \section matrix_operations_member_functions Member Functions
// <hr>
//
// \subsection matrix_operations_rows Number of Rows of a Matrix
//
// The current number of rows of a matrix can be acquired via the \c rows() member function:
\code
// Instantiating a dynamic matrix with 10 rows and 8 columns
blaze::DynamicMatrix<int> M1( 10UL, 8UL );
M1.rows(); // Returns 10
// Instantiating a compressed matrix with 8 rows and 12 columns
blaze::CompressedMatrix<double> M2( 8UL, 12UL );
M2.rows(); // Returns 8
\endcode
// Alternatively, the free functions \c rows() can be used to query the current number of rows of
// a matrix. In contrast to the member function, the free function can also be used to query the
// number of rows of a matrix expression:
\code
rows( M1 ); // Returns 10, i.e. has the same effect as the member function
rows( M2 ); // Returns 8, i.e. has the same effect as the member function
rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix
\endcode
// \n \subsection matrix_operations_columns Number of Columns of a Matrix
//
// The current number of columns of a matrix can be acquired via the \c columns() member function:
\code
// Instantiating a dynamic matrix with 6 rows and 8 columns
blaze::DynamicMatrix<int> M1( 6UL, 8UL );
M1.columns(); // Returns 8
// Instantiating a compressed matrix with 8 rows and 7 columns
blaze::CompressedMatrix<double> M2( 8UL, 7UL );
M2.columns(); // Returns 7
\endcode
// There is also a free function \c columns() available, which can also be used to query the number
// of columns of a matrix expression:
\code
columns( M1 ); // Returns 8, i.e. has the same effect as the member function
columns( M2 ); // Returns 7, i.e. has the same effect as the member function
columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix
\endcode
// \n \subsection matrix_operations_capacity Capacity of a Matrix
//
// The \c capacity() member function returns the internal capacity of a dense or sparse matrix.
// Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of
// a dense matrix the capacity will always be greater or equal than the total number of elements
// of the matrix. In case of a sparse matrix, the capacity will usually be much less than the
// total number of elements.
\code
blaze::DynamicMatrix<float> M1( 5UL, 7UL );
blaze::StaticMatrix<float,7UL,4UL> M2;
M1.capacity(); // Returns at least 35
M2.capacity(); // Returns at least 28
\endcode
// There is also a free function \c capacity() available to query the capacity. However, please
// note that this function cannot be used to query the capacity of a matrix expression:
\code
capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function
capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function
capacity( M1 * M2 ); // Compilation error!
\endcode
// \n \subsection matrix_operations_nonzeros Number of Non-Zero Elements
//
// For both dense and sparse matrices the current number of non-zero elements can be queried
// via the \c nonZeros() member function. In case of matrices there are two flavors of the
// \c nonZeros() function: One returns the total number of non-zero elements in the matrix,
// the second returns the number of non-zero elements in a specific row (in case of a row-major
// matrix) or column (in case of a column-major matrix). Sparse matrices directly return their
// number of non-zero elements, dense matrices traverse their elements and count the number of
// non-zero elements.
\code
blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL );
// ... Initializing the dense matrix
M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix
M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2
\endcode
\code
blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL );
// ... Initializing the sparse matrix
M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix
M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in a
// matrix expression. However, the result is not the exact number of non-zero elements, but may be
// a rough estimation:
\code
nonZeros( M1 ); // Has the same effect as the member function
nonZeros( M1, 2 ); // Has the same effect as the member function
nonZeros( M2 ); // Has the same effect as the member function
nonZeros( M2, 3 ); // Has the same effect as the member function
nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression
\endcode
// \n \subsection matrix_operations_resize_reserve Resize/Reserve
//
// The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template
// parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns
// of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<int,rowMajor> M1;
CompressedMatrix<int,columnMajor> M2( 3UL, 2UL );
// Adapting the number of rows and columns via the resize() function. The (optional)
// third parameter specifies whether the existing elements should be preserved.
M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type
// remain uninitialized, elements of class type are default
// constructed.
M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the
// new elements are NOT initialized!
M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved.
M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost.
\endcode
// Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices)
// on the matrix:
\code
typedef blaze::DynamicMatrix<int,rowMajor> MatrixType;
typedef blaze::DenseRow<MatrixType> RowType;
MatrixType M1( 10UL, 20UL ); // Creating a 10x20 matrix
RowType row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix
M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view
\endcode
// When the internal capacity of a matrix is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicMatrix<int> M1;
M1.reserve( 100 );
M1.rows(); // Returns 0
M1.capacity(); // Returns at least 100
\endcode
// Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or
// column (for a column-major matrix):
\code
blaze::CompressedMatrix<int> M1( 4UL, 6UL );
M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1
\endcode
// \n \section matrix_operations_free_functions Free Functions
// <hr>
//
// \subsection matrix_operations_reset_clear Reset/Clear
//
// In order to reset all elements of a dense or sparse matrix, the \c reset() function can be
// used. The number of rows and columns of the matrix are preserved:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
reset( M1 ); // Resetting all elements
M1.rows(); // Returns 4: size and capacity remain unchanged
\endcode
// Alternatively, only a single row or column of the matrix can be resetted:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix
blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix
reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix
reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix
\endcode
// In order to reset a row of a column-major matrix or a column of a row-major matrix, use a
// row or column view (see \ref views_rows and views_colums).
//
// In order to return a matrix to its default state (i.e. the state of a default constructed
// matrix), the \c clear() function can be used:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
clear( M1 ); // Resetting the entire matrix
M1.rows(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// \n \subsection matrix_operations_isnan isnan
//
// The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number
// elements:
\code
blaze::DynamicMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
\code
blaze::CompressedMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
// If at least one element of the matrix is not-a-number, the function returns \c true, otherwise
// it returns \c false. Please note that this function only works for matrices with floating point
// elements. The attempt to use it for a matrix with a non-floating point element type results in
// a compile time error.
//
//
// \n \subsection matrix_operations_isdefault isDefault
//
// The \c isDefault() function returns whether the given dense or sparse matrix is in default state:
\code
blaze::HybridMatrix<int,5UL,4UL> A;
// ... Resizing and initialization
if( isDefault( A ) ) { ... }
\endcode
// A matrix is in default state if it appears to just have been default constructed. All resizable
// matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in
// default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all
// submatrices) is in default state if all its elements are in default state. For instance, in case
// the matrix is instantiated for a built-in integral or floating point data type, the function
// returns \c true in case all matrix elements are 0 and \c false in case any vector element is
// not 0.
//
//
// \n \subsection matrix_operations_isSquare isSquare
//
// Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the
// number of columns) can be checked via the \c isSquare() function:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
if( isSquare( A ) ) { ... }
\endcode
// \n \subsection matrix_operations_issymmetric isSymmetric
//
// Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix
// is symmetric:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isSymmetric( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be symmetric!
//
//
// \n \subsection matrix_operations_isUniform isUniform
//
// In order to check if all matrix elements are identical, the \c isUniform function can be used:
\code
blaze::DynamicMatrix<int> A;
// ... Resizing and initialization
if( isUniform( A ) ) { ... }
\endcode
// Note that in case of a sparse matrix also the zero elements are also taken into account!
//
//
// \n \subsection matrix_operations_islower isLower
//
// Via the \c isLower() function it is possible to check whether a dense or sparse matrix is
// lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower triangular!
//
//
// \n \subsection matrix_operations_isunilower isUniLower
//
// Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is
// lower unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlylower isStrictlyLower
//
// Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix
// is strictly lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly lower triangular!
//
//
// \n \subsection matrix_operations_isUpper isUpper
//
// Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is
// upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper triangular!
//
//
// \n \subsection matrix_operations_isuniupper isUniUpper
//
// Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is
// upper unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper
//
// Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix
// is strictly upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly upper triangular!
//
//
// \n \subsection matrix_operations_isdiagonal isDiagonal
//
// The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix,
// i.e. if it has only elements on its diagonal and if the non-diagonal elements are default
// elements:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isDiagonal( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be diagonal!
//
//
// \n \subsection matrix_operations_isidentity isIdentity
//
// The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix,
// i.e. if all diagonal elements are 1 and all non-diagonal elements are 0:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isIdentity( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be identity matrices!
//
//
// \n \subsection matrix_operators_abs Absolute Values
//
// The \c abs() function can be used to compute the absolute values of each element of a matrix.
// For instance, the following computation
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A( -1, 2, -3, 4, -5, 6 );
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) );
\endcode
// results in the matrix
\f$ B = \left(\begin{array}{*{3}{c}}
1 & 2 & 3 \\
4 & 5 & 6 \\
\end{array}\right)\f$
// \n \subsection matrix_operations_min_max Minimum/Maximum Values
//
// The \c min() and the \c max() functions return the smallest and largest element of the given
// dense or sparse matrix, respectively:
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A( -5, 2, 7,
4, 0, 1 );
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( -5, 2, -7,
-4, 0, -1 );
min( A ); // Returns -5
min( B ); // Returns -7
max( A ); // Returns 7
max( B ); // Returns 2
\endcode
// In case the matrix currently has 0 rows or 0 columns, both functions return 0. Additionally, in
// case a given sparse matrix is not completely filled, the zero elements are taken into account.
// For example: the following compressed matrix has only 2 non-zero elements. However, the minimum
// of this matrix is 0:
\code
blaze::CompressedMatrix<int> C( 2UL, 3UL );
C(0,0) = 1;
C(0,2) = 3;
min( C ); // Returns 0
\endcode
// Also note that the \c min() and \c max() functions can be used to compute the smallest and
// largest element of a matrix expression:
\code
min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix
max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix
\endcode
// \n \subsection matrix_operators_conj Complex Conjugates
//
// The \c conj() function can be applied on a dense or sparse matrix to compute the complex
// conjugate of each element of the matrix:
\code
using blaze::StaticMatrix;
typedef std::complex<double> cplx;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A( cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ),
cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) );
// Computing the matrix of conjugate values
// ( (1, 0) (-2, 1) )
// ( (1,-1) ( 0,-1) )
StaticMatrix<cplx,2UL,2UL> B;
B = conj( A );
\endcode
// Additionally, matrices can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicMatrix<cplx> C( 5UL, 2UL );
conjugate( C ); // In-place conjugate operation.
C = conj( C ); // Same as above
\endcode
// \n \subsection matrix_operators_real Real Part
//
// The \c real() function can be used on a dense or sparse matrix to extract the real part of
// each element of the matrix:
\code
using blaze::StaticMatrix;
typedef std::complex<double> cplx;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A( cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ),
cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) );
// Extracting the real part of each matrix element
// ( 1 -2 )
// ( 1 0 )
StaticMatrix<double,2UL,2UL> B;
B = real( A );
\endcode
// \n \subsection matrix_operators_imag Imaginary Part
//
// The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part
// of each element of the matrix:
\code
using blaze::StaticMatrix;
typedef std::complex<double> cplx;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A( cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ),
cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) );
// Extracting the imaginary part of each matrix element
// ( 0 -1 )
// ( 1 1 )
StaticMatrix<double,2UL,2UL> B;
B = imag( A );
\endcode
// \n \subsection matrix_operations_matrix_transpose Matrix Transpose
//
// Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into
// a column-major matrix and vice versa:
\code
blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL );
blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL );
M1 = M2; // Assigning a column-major matrix to a row-major matrix
M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1
M1 += trans( M2 ); // Addition assignment of two row-major matrices
\endcode
// Additionally, matrices can be transposed in-place via the \c transpose() function:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
transpose( M ); // In-place transpose operation.
M = trans( M ); // Same as above
\endcode
// Note however that the transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_conjugate_transpose Conjugate Transpose
//
// The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian
// conjugate, or transjugate) can be computed via the \c ctrans() function:
\code
blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL );
blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL );
M1 = ctrans( M2 ); // Compute the conjugate transpose matrix
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix
M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix
\endcode
// The \c ctranspose() function can be used to perform an in-place conjugate transpose operation:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
ctranspose( M ); // In-place conjugate transpose operation.
M = ctrans( M ); // Same as above
\endcode
// Note however that the conjugate transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_matrix_determinant Matrix Determinant
//
// The determinant of a square dense matrix can be computed by means of the \c det() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
double d = det( A ); // Compute the determinant of A
\endcode
// In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
// \note The \c det() function can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The function is depending on LAPACK kernels. Thus the function can only be used if the
// fitting LAPACK library is available and linked to the executable. Otherwise a linker error
// will be created.
//
//
// \n \subsection matrix_operations_swap Swap
//
// Via the \c \c swap() function it is possible to completely swap the contents of two matrices
// of the same type:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL );
blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL );
swap( M1, M2 ); // Swapping the contents of M1 and M2
\endcode
// \n \section matrix_operations_matrix_inversion Matrix Inversion
// <hr>
//
// The inverse of a square dense matrix can be computed via the \c inv() function:
\code
blaze::DynamicMatrix<float,blaze::rowMajor> A, B;
// ... Resizing and initialization
B = inv( A ); // Compute the inverse of A
\endcode
// Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert()
// function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
invert( A ); // In-place matrix inversion
\endcode
// Both the \c inv() and the \c invert() functions will automatically select the most suited matrix
// inversion algorithm depending on the size and type of the given matrix. For small matrices of
// up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices
// larger than 6x6 the inversion is performed by means of the most suited matrix decomposition
// method: In case of a general or triangular matrix the LU decomposition is used, for symmetric
// matrices the LDLT decomposition is applied and for Hermitian matrices the LDLH decomposition is
// performed. However, via the \c invert() function it is possible to explicitly specify the matrix
// inversion algorithm:
\code
using blaze::byLU;
using blaze::byLDLT;
using blaze::byLDLH;
using blaze::byLLH;
// In-place inversion with automatic selection of the inversion algorithm
invert( A );
// In-place inversion of a general matrix by means of an LU decomposition
invert<byLU>( A );
// In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLT>( A );
// In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLH>( A );
// In-place inversion of a positive definite matrix by means of a Cholesky decomposition
invert<byLLH>( A );
\endcode
// Whereas the inversion by means of an LU decomposition works for every general square matrix,
// the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is
// restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works
// for Hermitian positive definite matrices. Please note that it is in the responsibility of the
// function caller to guarantee that the selected algorithm is suited for the given matrix. In
// case this precondition is violated the result can be wrong and might not represent the inverse
// of the given matrix!
//
// For both the \c inv() and \c invert() function the matrix inversion fails if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// In all failure cases either a compilation error is created if the failure can be predicted at
// compile time or a \c std::invalid_argument exception is thrown.
//
// \note The matrix inversion can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if the fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \note It is not possible to use any kind of view on the expression object returned by the
// \c inv() function. Also, it is not possible to access individual elements via the function call
// operator on the expression object:
\code
row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression!
inv( A )(1,2); // Compilation error: It is not possible to access individual elements!
\endcode
// \note The inversion functions do not provide any exception safety guarantee, i.e. in case an
// exception is thrown the matrix may already have been modified.
//
//
// \n \section matrix_operations_decomposition Matrix Decomposition
// <hr>
//
// \note All decomposition functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if the fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \subsection matrix_operations_decomposition_lu LU Decomposition
//
// The LU decomposition of a dense matrix can be computed via the \c lu() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a row-major matrix
assert( A == L * U * P );
\endcode
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a column-major matrix
assert( A == P * L * U );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the
// three matrices \c A, \c L and \c U are required to have the same storage order. Also, please
// note that the way the permutation matrix \c P needs to be applied differs between row-major and
// column-major matrices, since the algorithm uses column interchanges for row-major matrices and
// row interchanges for column-major matrices.
//
// Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates
// the LU decomposition of a symmetric matrix into a lower and upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U;
blaze::DynamicMatrix<double,blaze::columnMajor> P;
lu( A, L, U, P ); // LU decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition
//
// The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
llh( A, L ); // LLH decomposition of a row-major matrix
assert( A == L * ctrans( L ) );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A
// and \c L can have any storage order.
//
// Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates
// the LLH decomposition of a symmetric matrix into a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
llh( A, L ); // Cholesky decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_qr QR Decomposition
//
// The QR decomposition of a dense matrix can be computed via the \c qr() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
blaze::DynamicMatrix<double,blaze::rowMajor> R;
qr( A, Q, R ); // QR decomposition of a row-major matrix
assert( A == Q * R );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c R can have any storage order.
//
// Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates
// the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R;
qr( A, Q, R ); // QR decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_rq RQ Decomposition
//
// Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via
// the \c rq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> R;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
rq( A, R, Q ); // RQ decomposition of a row-major matrix
assert( A == R * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c R and \c Q can have any storage order.
//
// Also the \c rq() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the RQ decomposition of an Hermitian matrix into a general
// matrix and an upper triangular matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
rq( A, R, Q ); // RQ decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_ql QL Decomposition
//
// The QL decomposition of a dense matrix can be computed via the \c ql() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::DynamicMatrix<double,blaze::columnMajor> L;
ql( A, Q, L ); // QL decomposition of a row-major matrix
assert( A == Q * L );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c L can have any storage order.
//
// Also the \c ql() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the QL decomposition of a symmetric matrix into a general
// matrix and a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
ql( A, Q, L ); // QL decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_lq LQ Decomposition
//
// The LQ decomposition of a dense matrix can be computed via the \c lq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
lq( A, L, Q ); // LQ decomposition of a row-major matrix
assert( A == L * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c L and \c Q can have any storage order.
//
// Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates
// the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
lq( A, L, Q ); // LQ decomposition of A
\endcode
// \n Previous: \ref matrix_types Next: \ref adaptors
*/
//*************************************************************************************************
//**Adaptors***************************************************************************************
/*!\page adaptors Adaptors
//
// \tableofcontents
//
//
// \section adaptors_general General Concepts
// <hr>
//
// Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the
// matrices such that certain invariants are preserved. Due to this adaptors can provide a compile
// time guarantee of certain properties, which can be exploited for optimized performance.
//
// The \b Blaze library provides a total of 9 different adaptors:
//
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices
// <ul>
// <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_lowermatrix </li>
// <li> \ref adaptors_triangular_matrices_unilowermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_uppermatrix </li>
// <li> \ref adaptors_triangular_matrices_uniuppermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Diagonal Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_diagonalmatrix </li>
// </ul>
// </li>
// </ul>
// </li>
// </ul>
//
// In combination with the general matrix types, \b Blaze provides a total of 40 different matrix
// types that make it possible to exactly adapt the type of matrix to every specific problem.
//
//
// \n \section adaptors_examples Examples
// <hr>
//
// The following code examples give an impression on the use of adaptors. The first example shows
// the multiplication between two lower matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant
// performance advantage in comparison to a general matrix multiplication, especially for large
// matrices.
//
// The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse
// vector multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which significantly increases the performance.
//
// \n Previous: \ref matrix_operations Next: \ref adaptors_symmetric_matrices
*/
//*************************************************************************************************
//**Symmetric Matrices*****************************************************************************
/*!\page adaptors_symmetric_matrices Symmetric Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_symmetric_matrices_general Symmetric Matrices
// <hr>
//
// In contrast to general matrices, which have no restriction in their number of rows and columns
// and whose elements can have any value, symmetric matrices provide the compile time guarantee
// to be square matrices with pair-wise identical values. Mathematically, this means that a
// symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal
// values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can
// be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze
// library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix
// class template.
//
//
// \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix
// <hr>
//
// The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it
// by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its
// transpose \f$ A = A^T \f$). It can be included via the header file
\code
#include <blaze/math/SymmetricMatrix.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
template< typename MT >
class SymmetricMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible symmetric matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense symmetric matrix with static memory
blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense symmetric matrix based on HybridMatrix
blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix
blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision symmetric matrix
blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E;
\endcode
// The storage order of a symmetric matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices
// <hr>
//
// A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the symmetry constraint:
//
// -# <b>\ref adaptors_symmetric_matrices_square</b>
// -# <b>\ref adaptors_symmetric_matrices_symmetry</b>
// -# <b>\ref adaptors_symmetric_matrices_initialization</b>
//
// \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 symmetric static matrix
SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced!
//
// This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are
// symmetric themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, row-major 3x3 symmetric compressed matrix
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
// Initializing three elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
*A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from a symmetric dense matrix
StaticMatrix<double,3UL,3UL> B( 3.0, 8.0, -2.0,
8.0, 0.0, -1.0,
-2.0, -1.0, 4.0 );
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-symmetric dense matrix
StaticMatrix<double,3UL,3UL> D( 3.0, 7.0, -2.0,
8.0, 0.0, -1.0,
-2.0, -1.0, 4.0 );
C = D; // Throws an exception; symmetric invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Setup of the symmetric matrix
//
// ( 0 1 3 )
// A = ( 1 2 0 )
// ( 3 0 0 )
//
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0)
A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1)
A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2)
\endcode
// The symmetry property is also enforced for symmetric custom matrices: In case the given array
// of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::SymmetricMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
typedef SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomSymmetric;
// Creating a 3x3 symmetric custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomSymmetric A( array, 3UL ); // OK
// Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array
CustomSymmetric B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception
\endcode
// Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the
// symmetric matrix. The following example demonstrates that modifying the elements of an entire
// row of the symmetric matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of the symmetric matrix
//
// ( 0 1 0 2 )
// A = ( 1 3 4 0 )
// ( 0 4 0 5 )
// ( 2 0 5 0 )
//
SymmetricMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = 1;
A(0,3) = 2;
A(1,1) = 3;
A(1,2) = 4;
A(2,3) = 5;
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( 0 0 0 2 )
// A = ( 0 0 0 0 )
// ( 0 0 0 5 )
// ( 2 0 5 0 )
//
row( A, 1 ) = 0;
\endcode
// The next example demonstrates the (compound) assignment to submatrices of symmetric matrices.
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 0 9 )
// B = ( 9 8 )
// ( 0 7 )
//
DynamicMatrix<int> B( 3UL, 2UL );
B(0,0) = 1;
B(0,1) = 2;
B(1,0) = 3;
B(1,1) = 4;
B(2,1) = 5;
B(2,2) = 6;
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the symmetric property of
// dense symmetric matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A SymmetricMatrix matrix can participate in numerical operations in any way any other dense
// or sparse matrix can participate. It can also be combined with any other dense or sparse vector
// or matrix. The following code example gives an impression of the use of SymmetricMatrix within
// arithmetic operations:
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
CompressedMatrix<float> E( 3, 3 ); // Empty row-major sparse single precision 3x3 matrix
SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > F;
SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > G;
F = A + B; // Matrix addition and assignment to a row-major symmetric matrix
G = A - C; // Matrix subtraction and assignment to a column-major symmetric matrix
G = A * E; // Matrix multiplication between a dense and a sparse matrix
A *= 2.0; // In-place scaling of matrix A
F = 2.0 * B; // Scaling of matrix B
G = E * 2.0; // Scaling of matrix E
F += A - B; // Addition assignment
G -= A + C; // Subtraction assignment
G *= A * E; // Multiplication assignment
\endcode
// \n \section adaptors_symmetric_matrices_block_structured Block-Structured Symmetric Matrices
// <hr>
//
// It is also possible to use block-structured symmetric matrices:
\code
using blaze::CompressedMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
// Definition of a 3x3 block-structured symmetric matrix based on CompressedMatrix
SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 );
\endcode
// Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and
// guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also
// applied to element \f$ a_{ji} \f$:
\code
// Inserting the elements (2,4) and (4,2)
A.insert( 2, 4, StaticMatrix<int,3UL,3UL>( 1, -4, 5,
6, 8, -3,
2, -1, 2 ) );
// Manipulating the elements (2,4) and (4,2)
A(2,4)(1,1) = -5;
\endcode
// \n \section adaptors_symmetric_matrices_performance Performance Considerations
// <hr>
//
// When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. The \b Blaze library
// tries to exploit the properties of symmetric matrices whenever possible. However, there are
// also situations when using a symmetric matrix introduces some overhead. The following examples
// demonstrate several situations where symmetric matrices can positively or negatively impact
// performance.
//
// \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
SymmetricMatrix< CompressedMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the
// SymmetricMatrix adapter is obviously an advantage.
//
// \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
typedef SymmetricMatrix< DynamicMatrix<double,columnMajor> > DynamicSymmetric;
DynamicSymmetric A( 10UL );
DenseRow<DynamicSymmetric> row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using a symmetric matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not symmetric at compile time:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
SymmetricMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the symmetric matrix; no performance penalty
C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead
C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead
\endcode
// When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary
// to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property
// of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two symmetric matrices does not necessarily result in another symmetric matrix:
\code
SymmetricMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a symmetric matrix; no runtime overhead
C = A - B; // Results in a symmetric matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors Next: \ref adaptors_hermitian_matrices
*/
//*************************************************************************************************
//**Hermitian Matrices*****************************************************************************
/*!\page adaptors_hermitian_matrices Hermitian Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_hermitian_matrices_general Hermitian Matrices
// <hr>
//
// In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices.
// Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise
// conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal
// to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have
// a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze
// library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix
// class template.
//
//
// \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix
// <hr>
//
// The HermitianMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to
// its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file
\code
#include <blaze/math/HermitianMatrix.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
template< typename MT >
class HermitianMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible Hermitian matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense Hermitian matrix with static memory
blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix
blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix
blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C;
// Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix
blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision complex Hermitian matrix
blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E;
\endcode
// The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices
//
// The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits.
// However, there are a couple of differences, both from a mathematical point of view as well as
// from an implementation point of view.
//
// From a mathematical point of view, a matrix is called symmetric when it is equal to its
// transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate
// transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two
// conditions coincide, which means that symmetric matrices of real values are also Hermitian
// and Hermitian matrices of real values are also symmetric.
//
// From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data
// types (i.e. all integral types except \c bool, floating point and complex types), whereas
// symmetric matrices can also be block structured (i.e. can have vector or matrix elements).
// For built-in element types, the HermitianMatrix adaptor behaves exactly like the according
// SymmetricMatrix implementation. For complex element types, however, the Hermitian property
// is enforced (see also \ref adaptors_hermitian_matrices_hermitian).
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::HermitianMatrix;
using blaze::SymmetricMatrix;
// The following two matrices provide an identical experience (including performance)
HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric
SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric
// The following two matrices will behave differently
HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian
SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric
// Block-structured Hermitian matrices are not allowed
HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error!
SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Block-structured symmetric matrix
\endcode
// \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices
// <hr>
//
// A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the Hermitian symmetry constraint:
//
// -# <b>\ref adaptors_hermitian_matrices_square</b>
// -# <b>\ref adaptors_hermitian_matrices_hermitian</b>
// -# <b>\ref adaptors_hermitian_matrices_initialization</b>
//
// \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 Hermitian static matrix
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced!
//
// This means that the following properties of a Hermitian matrix are always guaranteed:
//
// - The diagonal elements are real numbers, i.e. the imaginary part is zero
// - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$
//
// Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that
// are Hermitian themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
typedef std::complex<double> cplx;
// Default constructed, row-major 3x3 Hermitian compressed matrix
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
// Initializing the matrix via the function call operator
//
// ( (1, 0) (0,0) (2,1) )
// ( (0, 0) (0,0) (0,0) )
// ( (2,-1) (0,0) (0,0) )
//
A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0)
A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
//
// ( (1,-3) (0,0) (2, 1) )
// ( (0, 0) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
//
// ( (1,-3) (8,1) (2, 1) )
// ( (8,-1) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
*A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
//
// ( (0, 0) (8,1) (0, 0) )
// ( (8,-1) (2,0) (4,-2) )
// ( (0, 0) (4,2) (0, 0) )
//
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from a Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> B( ( 3.0, 0.0 ), ( 8.0, 2.0 ), ( -2.0, 2.0 ),
( 8.0, 1.0 ), ( 0.0, 0.0 ), ( -1.0, -1.0 ),
( -2.0, -2.0 ), ( -1.0, 1.0 ), ( 4.0, 0.0 ) );
HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> D( ( 3.0, 0.0 ), ( 7.0, 2.0 ), ( 3.0, 2.0 ),
( 8.0, 1.0 ), ( 0.0, 0.0 ), ( 6.0, 4.0 ),
( -2.0, 2.0 ), ( -1.0, 1.0 ), ( 4.0, 0.0 ) );
C = D; // Throws an exception; Hermitian invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
typedef std::complex<double> cplx;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,2) (3,-4) )
// A = ( (1,-2) (2,0) (0, 0) )
// ( (3, 4) (0,0) (0, 0) )
//
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0)
A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1)
A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2)
\endcode
// The Hermitian property is also enforced for Hermitian custom matrices: In case the given array
// of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::HermitianMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
typedef HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomHermitian;
// Creating a 3x3 Hermitian custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomHermitian A( array, 3UL ); // OK
// Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array
CustomHermitian B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception
\endcode
// Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the
// Hermitian matrix. The following example demonstrates that modifying the elements of an entire
// row of the Hermitian matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermtianMatrix;
typedef std::complex<double> cplx;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,-1) (0,0) (2, 1) )
// A = ( (1, 1) (3, 0) (4,2) (0, 0) )
// ( (0, 0) (4,-2) (0,0) (5,-3) )
// ( (2,-1) (0, 0) (5,3) (0, 0) )
//
HermitianMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = cplx( 1.0, -1.0 );
A(0,3) = cplx( 2.0, 1.0 );
A(1,1) = cplx( 3.0, 0.0 );
A(1,2) = cplx( 4.0, 2.0 );
A(2,3) = cplx( 5.0, 3.0 );
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( (0, 0) (0,0) (0,0) (2, 1) )
// A = ( (0, 0) (0,0) (0,0) (0, 0) )
// ( (0, 0) (0,0) (0,0) (5,-3) )
// ( (2,-1) (0,0) (5,3) (0, 0) )
//
row( A, 1 ) = cplx( 0.0, 0.0 );
\endcode
// The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices.
// Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian
// symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
std::complex<double> cplx;
// Setup of two default 4x4 Hermitian matrices
HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( (1,-1) (2, 5) )
// B = ( (3, 0) (4,-6) )
// ( (5, 0) (6, 0) )
//
DynamicMatrix<int> B( 3UL, 2UL );
B(0,0) = cplx( 1.0, -1.0 );
B(0,1) = cplx( 2.0, 5.0 );
B(1,0) = cplx( 3.0, 0.0 );
B(1,1) = cplx( 4.0, -6.0 );
B(2,1) = cplx( 5.0, 0.0 );
B(2,2) = cplx( 6.0, 7.0 );
// OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved
//
// ( (0, 0) (0, 0) (1,-1) (2, 5) )
// A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) )
// ( (1, 1) (3, 0) (5, 0) (6, 0) )
// ( (2,-5) (4, 6) (6, 0) (0, 0) )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( (0, 0) (1,-1) (2,5) (0,0) )
// A2 = ( (1, 1) (3, 0) (X,X) (0,0) )
// ( (2,-5) (X, X) (6,0) (0,0) )
// ( (0, 0) (0, 0) (0,0) (0,0) )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the Hermitian property of
// dense Hermitian matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A HermitianMatrix can be used within all numerical operations in any way any other dense or
// sparse matrix can be used. It can also be combined with any other dense or sparse vector or
// matrix. The following code example gives an impression of the use of HermitianMatrix within
// arithmetic operations:
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
typedef complex<float> cplx;
DynamicMatrix<cplx,rowMajor> A( 3, 3 );
CompressedMatrix<cplx,rowMajor> B( 3, 3 );
HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 );
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 );
HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E;
HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix
F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix
F = A * D; // Matrix multiplication between a dense and a sparse matrix
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B
F = C * 2.0; // Scaling of matrix C
E += A - B; // Addition assignment
F -= C + D; // Subtraction assignment
F *= A * D; // Multiplication assignment
\endcode
// \n \section adaptors_hermitian_matrices_performance Performance Considerations
// <hr>
//
// When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. This is particularly
// true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The
// \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever
// possible. However, there are also situations when using a Hermitian matrix introduces some
// overhead. The following examples demonstrate several situations where Hermitian matrices can
// positively or negatively impact performance.
//
// \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric
HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a
// symmetric matrix is obviously an advantage.
//
// \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
typedef HermitianMatrix< DynamicMatrix<double,columnMajor> > DynamicHermitian;
DynamicHermitian A( 10UL ); // Both Hermitian and symmetric
DenseRow<DynamicHermitian> row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using a Hermitian matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not Hermitian at compile time:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
HermitianMatrix< DynamicMatrix< complex<double> > > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the Hermitian matrix; no performance penalty
C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead
C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead
\endcode
// When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary
// to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property
// of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix:
\code
HermitianMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a Hermitian matrix; no runtime overhead
C = A - B; // Results in a Hermitian matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors_symmetric_matrices Next: \ref adaptors_triangular_matrices
*/
//*************************************************************************************************
//**Triangular Matrices****************************************************************************
/*!\page adaptors_triangular_matrices Triangular Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_triangular_matrices_general Triangular Matrices
// <hr>
//
// Triangular matrices come in three flavors: Lower triangular matrices provide the compile time
// guarantee to be square matrices and that the upper part of the matrix contains only default
// elements that cannot be modified. Upper triangular matrices on the other hand provide the
// compile time guarantee to be square and that the lower part of the matrix contains only fixed
// default elements. Finally, diagonal matrices provide the compile time guarantee to be square
// and that both the lower and upper part of the matrix contain only immutable default elements.
// These properties can be exploited to gain higher performance and/or to save memory. Within the
// \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized
// by the following class templates:
//
// Lower triangular matrices:
// - <b>\ref adaptors_triangular_matrices_lowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_unilowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b>
//
// Upper triangular matrices:
// - <b>\ref adaptors_triangular_matrices_uppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b>
//
// Diagonal matrices
// - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b>
//
//
// \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix
// <hr>
//
// The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/LowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class LowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense lower matrix with static memory
blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense lower matrix based on HybridMatrix
blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense lower matrix based on DynamicMatrix
blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense lower matrix based on CustomMatrix
blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision lower matrix
blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E;
\endcode
// The storage order of a lower matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix
// <hr>
//
// The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements above the diagonal are 0 (lower unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 1 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UniLowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UniLowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower unitriangular matrices:
\code
// Definition of a 3x3 row-major dense unilower matrix with static memory
blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense unilower matrix based on HybridMatrix
blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense unilower matrix based on DynamicMatrix
blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision unilower matrix
blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a lower unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the unilower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix
// <hr>
//
// The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements above the diagonal are 0 (strictly lower triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 0 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/StrictlyLowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class StrictlyLowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly lower triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly lower matrix with static memory
blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix
blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix
blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly lower matrix
blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly lower triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly lower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix
// <hr>
//
// The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & u_{2,2} & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & u_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper matrices:
\code
// Definition of a 3x3 row-major dense upper matrix with static memory
blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense upper matrix based on HybridMatrix
blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense upper matrix based on DynamicMatrix
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision upper matrix
blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix
// <hr>
//
// The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements below the diagonal are 0 (upper unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 1 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 1 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UniUpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UniUpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper unitriangular matrices:
\code
// Definition of a 3x3 row-major dense uniupper matrix with static memory
blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense uniupper matrix based on HybridMatrix
blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix
blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision uniupper matrix
blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the uniupper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix
// <hr>
//
// The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements below the diagonal are 0 (strictly upper triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 0 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 0 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/StrictlyUpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class StrictlyUpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly upper triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly upper matrix with static memory
blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix
blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix
blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly upper matrix
blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly upper triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly upper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix
// <hr>
//
// The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all matrix elements above and below the diagonal
// are 0 (diagonal matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
0 & l_{1,1} & 0 & \cdots & 0 \\
0 & 0 & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/DiagonalMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class DiagonalMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible diagonal matrices:
\code
// Definition of a 3x3 row-major dense diagonal matrix with static memory
blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense diagonal matrix based on HybridMatrix
blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix
blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision diagonal matrix
blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a diagonal matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices
// <hr>
//
// A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the triangular matrix constraint:
//
// -# <b>\ref adaptors_triangular_matrices_square</b>
// -# <b>\ref adaptors_triangular_matrices_triangular</b>
// -# <b>\ref adaptors_triangular_matrices_initialization</b>
// -# <b>\ref adaptors_triangular_matrices_storage</b>
// -# <b>\ref adaptors_triangular_matrices_scaling</b>
//
// \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 lower dynamic matrix
LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 lower static matrix
LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced!
//
// This means that it is only allowed to modify elements in the lower part or the diagonal of
// a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix.
// Unitriangular and strictly triangular matrices are even more restrictive and don't allow the
// modification of diagonal elements. Also, triangular matrices can only be assigned matrices that
// don't violate their triangular property. The following example demonstrates this restriction
// by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
typedef LowerMatrix< CompressedMatrix<double,rowMajor> > CompressedLower;
// Default constructed, row-major 3x3 lower compressed matrix
CompressedLower A( 3 );
// Initializing elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(2,0) = 2.0; // Initialization of the lower element (2,0)
A(1,2) = 9.0; // Throws an exception; invalid modification of upper element
// Inserting two more elements via the insert() function
A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0)
A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1)
A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element
// Appending an element via the append() function
A.reserve( 1, 3 ); // Reserving enough capacity in row 1
A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1)
A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part
// Access via a non-const iterator
CompressedLower::Iterator it = A.begin(1);
*it = 6.0; // Modifies the lower element (1,0)
++it;
*it = 9.0; // Modifies the diagonal element (1,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 2, 0 ); // Erasing the lower element (2,0)
// Construction from a lower dense matrix
StaticMatrix<double,3UL,3UL> B( 3.0, 0.0, 0.0,
8.0, 0.0, 0.0,
-2.0, -1.0, 4.0 );
LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-lower dense matrix
StaticMatrix<double,3UL,3UL> D( 3.0, 0.0, -2.0,
8.0, 0.0, 0.0,
-2.0, -1.0, 4.0 );
C = D; // Throws an exception; lower matrix invariant would be violated!
\endcode
// The triangular property is also enforced during the construction of triangular custom matrices:
// In case the given array of elements does not represent the according triangular matrix type, a
// \c std::invalid_argument exception is thrown:
\code
using blaze::CustomMatrix;
using blaze::LowerMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
typedef LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomLower;
// Creating a 3x3 lower custom matrix from a properly initialized array
double array[9] = { 1.0, 0.0, 0.0,
2.0, 3.0, 0.0,
4.0, 5.0, 6.0 };
CustomLower A( array, 3UL ); // OK
// Attempt to create a second 3x3 lower custom matrix from an uninitialized array
CustomLower B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception
\endcode
// Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...)
// on the triangular matrix. The following example demonstrates that modifying the elements of an
// entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements.
// Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
// Setup of the lower matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 0 3 0 0 )
// ( 4 0 5 0 )
//
LowerMatrix< DynamicMatrix<int> > A( 4 );
A(1,0) = 1;
A(1,1) = 2;
A(2,1) = 3;
A(3,0) = 4;
A(3,2) = 5;
// Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 9 9 9 0 )
// ( 4 0 5 0 )
//
row( A, 2 ) = 9;
// Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in
//
// ( 0 0 0 0 )
// A = ( 1 7 0 0 )
// ( 9 7 7 0 )
// ( 4 7 7 0 )
//
submatrix( A, 0, 1, 4, 2 ) = 7;
\endcode
// The next example demonstrates the (compound) assignment to rows/columns and submatrices of
// triangular matrices. Since only lower/upper and potentially diagonal elements may be modified
// the matrix to be assigned must be structured such that the triangular matrix invariant of the
// matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::LowerMatrix;
using blaze::rowVector;
// Setup of two default 4x4 lower matrices
LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of a 4-dimensional vector
//
// v = ( 1 2 3 0 )
//
DynamicVector<int,rowVector> v( 4, 0 );
v[0] = 1;
v[1] = 2;
v[2] = 3;
// OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant
//
// ( 0 0 0 0 )
// A1 = ( 0 0 0 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 2 ) = v; // OK
// Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element
// marked with X cannot be assigned and triggers an exception.
//
// ( 0 0 0 0 )
// A1 = ( 1 2 X 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 1 ) = v; // Assignment throws an exception!
// Setup of the 3x2 dynamic matrix
//
// ( 0 0 )
// B = ( 7 0 )
// ( 8 9 )
//
DynamicMatrix<int> B( 3UL, 2UL, 0 );
B(1,0) = 7;
B(2,0) = 8;
B(2,1) = 9;
// OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved
//
// ( 0 0 0 0 )
// A2 = ( 0 7 0 0 )
// ( 0 8 9 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be
// preserved! The elements marked with X cannot be assigned without violating the invariant!
//
// ( 0 0 0 0 )
// A2 = ( 0 7 X 0 )
// ( 0 8 8 X )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency during the creation of a dense lower or
// upper matrix this initialization is important since otherwise the lower/upper matrix property
// of dense lower matrices would not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// 5x5 row-major lower dynamic matrix with default initialized upper matrix
LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
// 7x7 column-major upper dynamic matrix with default initialized lower matrix
UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 );
// 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix
DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 );
\endcode
// \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements!
//
// All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable
// elements in the lower or upper part, respectively. Therefore dense triangular matrices don't
// provide any kind of memory reduction! There are two main reasons for this: First, storing also
// the zero elements guarantees maximum performance for many algorithms that perform vectorized
// operations on the triangular matrices, which is especially true for small dense matrices.
// Second, conceptually all triangular adaptors merely restrict the interface to the matrix type
// \c MT and do not change the data layout or the underlying matrix type.
//
// This property matters most for diagonal matrices. In order to achieve the perfect combination
// of performance and memory consumption for a diagonal matrix it is recommended to use dense
// matrices for small diagonal matrices and sparse matrices for large diagonal matrices:
\code
// Recommendation 1: use dense matrices for small diagonal matrices
typedef blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> > SmallDiagonalMatrix;
// Recommendation 2: use sparse matrices for large diagonal matrices
typedef blaze::DiagonalMatrix< blaze::CompressedMatrix<float> > LargeDiagonalMatrix;
\endcode
// \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled!
//
// Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible
// to self-scale such a matrix:
\code
using blaze::DynamicMatrix;
using blaze::UniLowerMatrix;
UniLowerMatrix< DynamicMatrix<int> > A( 4 );
A *= 2; // Compilation error; Scale operation is not available on an unilower matrix
A /= 2; // Compilation error; Scale operation is not available on an unilower matrix
A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix
A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix
A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix
\endcode
// \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A lower and upper triangular matrix can participate in numerical operations in any way any other
// dense or sparse matrix can participate. It can also be combined with any other dense or sparse
// vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix
// and blaze::UpperMatrix within arithmetic operations:
\code
using blaze::LowerMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
UpperMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
UpperMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major lower matrix
F = C - D; // Matrix subtraction and assignment to a column-major upper matrix
F = A * D; // Matrix multiplication between a dense and a sparse matrix
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B
F = C * 2.0; // Scaling of matrix C
E += A - B; // Addition assignment
F -= C + D; // Subtraction assignment
F *= A * D; // Multiplication assignment
\endcode
// Note that diagonal, unitriangular and strictly triangular matrix types can be used in the same
// way, but may pose some additional restrictions (see the according class documentations).
//
//
// \n \section adaptors_triangular_matrices_block_structured Block-Structured Triangular Matrices
// <hr>
//
// It is also possible to use block-structured triangular matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Definition of a 5x5 block-structured lower matrix based on DynamicMatrix
LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Definition of a 7x7 block-structured upper matrix based on CompressedMatrix
UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to
// manipulate elements in the upper part (lower triangular matrix) or the lower part (upper
// triangular matrix) of the matrix:
\code
const StaticMatrix<int,3UL,3UL> C( 1, -4, 5,
6, 8, -3,
2, -1, 2 )
A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception
B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception
\endcode
// Note that unitriangular matrices are restricted to numeric element types and therefore cannot
// be used for block-structured matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::UniLowerMatrix;
using blaze::UniUpperMatrix;
// Compilation error: lower unitriangular matrices are restricted to numeric element types
UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Compilation error: upper unitriangular matrices are restricted to numeric element types
UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// \n \section adaptors_triangular_matrices_performance Performance Considerations
// <hr>
//
// The \b Blaze library tries to exploit the properties of lower and upper triangular matrices
// whenever and wherever possible. Therefore using triangular matrices instead of a general
// matrices can result in a considerable performance improvement. However, there are also
// situations when using a triangular matrix introduces some overhead. The following examples
// demonstrate several common situations where triangular matrices can positively or negatively
// impact performance.
//
// \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. The following example demonstrates this by
// means of a dense matrix/dense matrix multiplication with lower triangular matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// In comparison to a general matrix multiplication, the performance advantage is significant,
// especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix
// and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular,
// respectively. Note however that the performance advantage is most pronounced for dense matrices
// and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar performance improvement can be gained when using a triangular matrix in a matrix/vector
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
DynamicVector<double,columnVector> x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example, \b Blaze also exploits the structure of the matrix and approx. halves the
// runtime of the multiplication. Also in case of matrix/vector multiplications the performance
// improvement is most pronounced for dense matrices and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for
// read access), which introduces absolutely no performance penalty, using a triangular matrix
// on the left-hand side of an assignment (i.e. for write access) may introduce additional
// overhead when it is assigned a general matrix, which is not triangular at compile time:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
LowerMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the lower matrix; no performance penalty
C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead
C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead
\endcode
// When assigning a general (potentially not lower triangular) matrix to a lower matrix or a
// general (potentially not upper triangular) matrix to an upper matrix it is necessary to check
// whether the matrix is lower or upper at runtime in order to guarantee the triangular property
// of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as
// efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime
// overhead it is therefore generally advisable to assign lower or upper triangular matrices to
// other lower or upper triangular matrices.\n
// In this context it is especially noteworthy that the addition, subtraction, and multiplication
// of two triangular matrices of the same structure always results in another triangular matrix:
\code
LowerMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a lower matrix; no runtime overhead
C = A - B; // Results in a lower matrix; no runtime overhead
C = A * B; // Results in a lower matrix; no runtime overhead
\endcode
\code
UpperMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a upper matrix; no runtime overhead
C = A - B; // Results in a upper matrix; no runtime overhead
C = A * B; // Results in a upper matrix; no runtime overhead
\endcode
// \n Previous: \ref adaptors_hermitian_matrices Next: \ref views
*/
//*************************************************************************************************
//**Views******************************************************************************************
/*!\page views Views
//
// \tableofcontents
//
//
// \section views_general General Concepts
// <hr>
//
// Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific
// row or column of a matrix. As such, views act as a reference to a specific part of a vector
// or matrix. This reference is valid and can be used in every way as any other vector or matrix
// can be used as long as the referenced vector or matrix is not resized or entirely destroyed.
// Views also act as alias to the elements of the vector or matrix: Changes made to the elements
// (e.g. modifying values, inserting or erasing elements) via the view are immediately visible in
// the vector or matrix and changes made via the vector or matrix are immediately visible in the
// view.
//
// The \b Blaze library provides the following views on vectors and matrices:
//
// Vector views:
// - \ref views_subvectors
//
// Matrix views:
// - \ref views_submatrices
// - \ref views_rows
// - \ref views_columns
//
//
// \n \section views_examples Examples
\code
using blaze::DynamicMatrix;
using blaze::StaticVector;
// Setup of the 3x5 row-major matrix
//
// ( 1 0 -2 3 0 )
// ( 0 2 5 -1 -1 )
// ( 1 0 0 2 1 )
//
DynamicMatrix<int> A( 3UL, 5UL );
A(0,0) = 1; A(0,1) = 0; A(0,2) = -2; A(0,3) = 3; A(0,4) = 0;
A(1,0) = 0; A(1,1) = 2; A(1,2) = 5; A(1,3) = -1; A(1,4) = -1;
A(2,0) = 1; A(2,1) = 0; A(2,2) = 0; A(2,3) = 2; A(2,4) = 1;
// Setup of the 2-dimensional row vector
//
// ( 18 19 )
//
StaticVector<int,rowVector> vec( 18, 19 );
// Assigning to the elements (1,2) and (1,3) via a subvector of a row
//
// ( 1 0 -2 3 0 )
// ( 0 2 18 19 -1 )
// ( 1 0 0 2 1 )
//
subvector( row( A, 1UL ), 2UL, 2UL ) = vec;
\endcode
// \n Previous: \ref adaptors_triangular_matrices Next: \ref views_subvectors
*/
//*************************************************************************************************
//**Subvectors*************************************************************************************
/*!\page views_subvectors Subvectors
//
// \tableofcontents
//
//
// Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors
// act as a reference to a specific range within a vector. This reference is valid and can be
// used in every way any other dense or sparse vector can be used as long as the vector containing
// the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the
// vector elements in the specified range: Changes made to the elements (e.g. modifying values,
// inserting or erasing elements) are immediately visible in the vector and changes made via the
// vector are immediately visible in the subvector. \b Blaze provides two subvector types:
// \ref views_dense_subvector and \ref views_sparse_subvector.
//
//
// \n \section views_dense_subvector DenseSubvector
// <hr>
//
// The blaze::DenseSubvector template represents a view on a specific subvector of a dense vector
// primitive. It can be included via the header file
\code
#include <blaze/math/DenseSubvector.h>
\endcode
// The type of the dense vector is specified two template parameters:
\code
template< typename VT, bool AF >
class DenseSubvector;
\endcode
// - \c VT: specifies the type of the dense vector primitive. DenseSubvector can be used with
// every dense vector primitive or view, but does not work with any vector expression
// type.
// - \c AF: the alignment flag specifies whether the subvector is aligned (blaze::aligned) or
// unaligned (blaze::unaligned). The default value is blaze::unaligned.
//
//
// \n \section views_sparse_subvector SparseSubvector
// <hr>
//
// The blaze::SparseSubvector template represents a view on a specific subvector of a sparse
// vector primitive. It can be included via the header file
\code
#include <blaze/math/SparseSubvector.h>
\endcode
// The type of the sparse vector is specified via two template parameters:
\code
template< typename VT, bool AF >
class SparseSubvector;
\endcode
// - \c VT: specifies the type of the sparse vector primitive. As in case of DenseSubvector, a
// SparseSubvector can be used with every sparse vector primitive or view, but does not
// work with any vector expression type.
// - \c AF: the alignment flag specifies whether the subvector is aligned (blaze::aligned) or
// unaligned (blaze::unaligned). The default value is blaze::unaligned.
//
//
// \n \section views_subvectors_setup Setup of Subvectors
// <hr>
//
// A view on a dense or sparse subvector can be created very conveniently via the \c subvector()
// function. This view can be treated as any other vector, i.e. it can be assigned to, it can
// be copied from, and it can be used in arithmetic operations. A subvector created from a row
// vector can be used as any other row vector, a subvector created from a column vector can be
// used as any other column vector. The view can also be used on both sides of an assignment:
// The subvector can either be used as an alias to grant write access to a specific subvector
// of a dense vector primitive on the left-hand side of an assignment or to grant read-access
// to a specific subvector of a vector primitive or expression on the right-hand side of an
// assignment. The following example demonstrates this in detail:
\code
typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType;
typedef blaze::CompressedVector<int,blaze::rowVector> SparseVectorType;
DenseVectorType d1, d2;
SparseVectorType s1, s2;
// ... Resizing and initialization
// Creating a view on the first ten elements of the dense vector d1
blaze::DenseSubvector<DenseVectorType> dsv = subvector( d1, 0UL, 10UL );
// Creating a view on the second ten elements of the sparse vector s1
blaze::SparseSubvector<SparseVectorType> ssv = subvector( s1, 10UL, 10UL );
// Creating a view on the addition of d2 and s2
dsv = subvector( d2 + s2, 5UL, 10UL );
// Creating a view on the multiplication of d2 and s2
ssv = subvector( d2 * s2, 2UL, 10UL );
\endcode
// The \c subvector() function can be used on any dense or sparse vector, including expressions,
// as demonstrated in the example. Note however that a \ref views_dense_subvector or
// \ref views_sparse_subvector can only be instantiated with a dense or sparse vector primitive,
// respectively, i.e. with types that can be written, and not with an expression type.
//
//
// \n \section views_subvectors_common_operations Common Operations
// <hr>
//
// A subvector view can be used like any other dense or sparse vector. For instance, the current
// number of elements can be obtained via the \c size() function, the current capacity via the
// \c capacity() function, and the number of non-zero elements via the \c nonZeros() function.
// However, since subvectors are references to a specific range of a vector, several operations
// are not possible on views, such as resizing and swapping. The following example shows this by
// means of a dense subvector view:
\code
typedef blaze::DynamicVector<int,blaze::rowVector> VectorType;
typedef blaze::DenseSubvector<VectorType> SubvectorType;
VectorType v( 42UL );
// ... Resizing and initialization
// Creating a view on the range [5..15] of vector v
SubvectorType sv = subvector( v, 5UL, 10UL );
sv.size(); // Returns the number of elements in the subvector
sv.capacity(); // Returns the capacity of the subvector
sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector
sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector
SubvectorType sv2 = subvector( v, 15UL, 10UL );
swap( sv, sv2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_subvectors_element_access Element Access
// <hr>
//
// The elements of a subvector can be directly accessed via the subscript operator:
\code
typedef blaze::DynamicVector<double,blaze::rowVector> VectorType;
VectorType v;
// ... Resizing and initialization
// Creating an 8-dimensional subvector, starting from index 4
blaze::DenseSubvector<VectorType> sv = subvector( v, 4UL, 8UL );
// Setting the 1st element of the subvector, which corresponds to
// the element at index 5 in vector v
sv[1] = 2.0;
\endcode
\code
typedef blaze::CompressedVector<double,blaze::rowVector> VectorType;
VectorType v;
// ... Resizing and initialization
// Creating an 8-dimensional subvector, starting from index 4
blaze::SparseSubvector<VectorType> sv = subvector( v, 4UL, 8UL );
// Setting the 1st element of the subvector, which corresponds to
// the element at index 5 in vector v
sv[1] = 2.0;
\endcode
// The numbering of the subvector elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the specified size of the subvector. Alternatively, the elements of a subvector can
// be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin()
// and \c end() return an Iterator, which allows a manipulation of the non-zero values, in case
// of constant subvectors a ConstIterator is returned:
\code
typedef blaze::DynamicVector<int,blaze::rowVector> VectorType;
typedef blaze::DenseSubvector<VectorType> SubvectorType;
VectorType v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of the dense vector v
SubvectorType sv = subvector( v, 16UL, 64UL );
for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) {
*it = ...; // OK: Write access to the dense subvector value.
... = *it; // OK: Read access to the dense subvector value.
}
for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense subvector value.
}
\endcode
\code
typedef blaze::CompressedVector<int,blaze::rowVector> VectorType;
typedef blaze::SparseSubvector<VectorType> SubvectorType;
VectorType v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of the sparse vector v
SubvectorType sv = subvector( v, 16UL, 64UL );
for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_subvectors_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse subvector can be done by several alternative functions.
// The following example demonstrates all options:
\code
typedef blaze::CompressedVector<double,blaze::rowVector> VectorType;
VectorType v( 256UL ); // Non-initialized vector of size 256
typedef blaze::SparseSubvector<VectorType> SubvectorType;
SubvectorType sv( subvector( v, 10UL, 60UL ) ); // View on the range [10..69] of v
// The subscript operator provides access to all possible elements of the sparse subvector,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse subvector, the element is inserted into the
// subvector.
sv[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the vector it is inserted into the vector, if it is already contained
// in the vector its value is modified.
sv.set( 45UL, -1.2 );
// An alternative for inserting elements into the subvector is the insert() function. However,
// it inserts the element only in case the element is not already contained in the subvector.
sv.insert( 50UL, 3.7 );
// Just as in case of vectors, elements can also be inserted via the append() function. In
// case of subvectors, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index of the subvector and that the subvector's
// capacity is large enough to hold the new element. Note however that due to the nature of
// a subvector, which may be an alias to the middle of a sparse vector, the append() function
// does not work as efficiently for a subvector as it does for a vector.
sv.reserve( 10UL );
sv.append( 51UL, -2.1 );
\endcode
// \n \section views_subvectors_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse subvectors can be used in all arithmetic operations that any other dense
// or sparse vector can be used in. The following example gives an impression of the use of dense
// subvectors within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with
// fitting element types:
\code
typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType;
typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType;
DenseVectorType d1, d2, d3;
SparseVectorType s1, s2;
// ... Resizing and initialization
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType;
DenseMatrixType A;
typedef blaze::DenseSubvector<DenseVectorType> SubvectorType;
SubvectorType dsv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1
dsv = d2; // Dense vector initialization of the range [0..9]
subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19]
d3 = dsv + d2; // Dense vector/dense vector addition
s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition
d2 = dsv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication
subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6]
d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9]
d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9]
subvector( d1, 0UL , 10UL ) += d2; // Addition assignment
subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment
subvector( d1, 20UL, 10UL ) *= dsv; // Multiplication assignment
double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors
A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors
\endcode
// \n \section views_aligned_subvectors Aligned Subvectors
// <hr>
//
// Usually subvectors can be defined anywhere within a vector. They may start at any position and
// may have an arbitrary size (only restricted by the size of the underlying vector). However, in
// contrast to vectors themselves, which are always properly aligned in memory and therefore can
// provide maximum performance, this means that subvectors in general have to be considered to be
// unaligned. This can be made explicit by the blaze::unaligned flag:
\code
using blaze::unaligned;
typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType;
DenseVectorType x;
// ... Resizing and initialization
// Identical creations of an unaligned subvector in the range [8..23]
blaze::DenseSubvector<DenseVectorType> sv1 = subvector ( x, 8UL, 16UL );
blaze::DenseSubvector<DenseVectorType> sv2 = subvector<unaligned>( x, 8UL, 16UL );
blaze::DenseSubvector<DenseVectorType,unaligned> sv3 = subvector ( x, 8UL, 16UL );
blaze::DenseSubvector<DenseVectorType,unaligned> sv4 = subvector<unaligned>( x, 8UL, 16UL );
\endcode
// All of these calls to the \c subvector() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide
// full flexibility in the creation of subvectors, this might result in performance disadvantages
// in comparison to vector primitives (even in case the specified subvector could be aligned).
// Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a vector might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned subvectors. Aligned subvectors are identical to
// unaligned subvectors in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying vector. Aligned subvectors are created by
// explicitly specifying the blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned dense subvector in the range [8..23]
blaze::DenseSubvector<DenseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of the subvector must be aligned. The following source code gives some examples
// for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double
// values into an intrinsic vector:
\code
using blaze::aligned;
using blaze::columnVector;
typedef blaze::DynamicVector<double,columnVector> VectorType;
typedef blaze::DenseSubvector<VectorType,aligned> SubvectorType;
VectorType d( 17UL );
// ... Resizing and initialization
// OK: Starts at the beginning, i.e. the first element is aligned
SubvectorType dsv1 = subvector<aligned>( d, 0UL, 13UL );
// OK: Start index is a multiple of 4, i.e. the first element is aligned
SubvectorType dsv2 = subvector<aligned>( d, 4UL, 7UL );
// OK: The start index is a multiple of 4 and the subvector includes the last element
SubvectorType dsv3 = subvector<aligned>( d, 8UL, 9UL );
// Error: Start index is not a multiple of 4, i.e. the first element is not aligned
SubvectorType dsv4 = subvector<aligned>( d, 5UL, 8UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense subvectors.
// In contrast, aligned sparse subvectors at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case
// the blaze::aligned flag is specified during setup, an aligned subvector is created:
\code
using blaze::aligned;
typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType;
SparseVectorType x;
// ... Resizing and initialization
// Creating an aligned subvector in the range [8..23]
blaze::SparseSubvector<SparseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL );
\endcode
// \n \section views_subvectors_on_subvectors Subvectors on Subvectors
// <hr>
//
// It is also possible to create a subvector view on another subvector. In this context it is
// important to remember that the type returned by the \c subvector() function is the same type
// as the type of the given subvector, not a nested subvector type, since the view on a subvector
// is just another view on the underlying vector:
\code
typedef blaze::DynamicVector<double,blaze::rowVector> VectorType;
typedef blaze::DenseSubvector<VectorType> SubvectorType;
VectorType d1;
// ... Resizing and initialization
// Creating a subvector view on the dense vector d1
SubvectorType sv1 = subvector( d1, 5UL, 10UL );
// Creating a subvector view on the dense subvector sv1
SubvectorType sv2 = subvector( sv1, 1UL, 5UL );
\endcode
// \n Previous: \ref views Next: \ref views_submatrices
*/
//*************************************************************************************************
//**Submatrices************************************************************************************
/*!\page views_submatrices Submatrices
//
// \tableofcontents
//
//
// Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors
// provide views on specific parts of vectors. As such, submatrices act as a reference to a
// specific block within a matrix. This reference is valid and can be used in evary way any
// other dense or sparse matrix can be used as long as the matrix containing the submatrix is
// not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements
// in the specified block: Changes made to the elements (e.g. modifying values, inserting or
// erasing elements) are immediately visible in the matrix and changes made via the matrix are
// immediately visible in the submatrix. \b Blaze provides two submatrix types:
// \ref views_dense_submatrix and \ref views_sparse_submatrix.
//
//
// \n \section views_dense_submatrix DenseSubmatrix
// <hr>
//
// The blaze::DenseSubmatrix template represents a view on a specific submatrix of a dense matrix
// primitive. It can be included via the header file
\code
#include <blaze/math/DenseSubmatrix.h>
\endcode
// The type of the dense matrix is specified via two template parameters:
\code
template< typename MT, bool AF >
class DenseSubmatrix;
\endcode
// - \c MT: specifies the type of the dense matrix primitive. DenseSubmatrix can be used with
// every dense matrix primitive, but does not work with any matrix expression type.
// - \c AF: the alignment flag specifies whether the submatrix is aligned (blaze::aligned) or
// unaligned (blaze::unaligned). The default value is blaze::unaligned.
//
//
// \n \section views_sparse_submatrix SparseSubmatrix
// <hr>
//
// The blaze::SparseSubmatrix template represents a view on a specific submatrix of a sparse
// matrix primitive. It can be included via the header file
\code
#include <blaze/math/SparseSubmatrix.h>
\endcode
// The type of the sparse matrix is specified via two template parameters:
\code
template< typename MT, bool AF >
class SparseSubmatrix;
\endcode
// - \c MT: specifies the type of the sparse matrix primitive. SparseSubmatrix can be used with
// every sparse matrix primitive, but does not work with any matrix expression type.
// - \c AF: the alignment flag specifies whether the submatrix is aligned (blaze::aligned) or
// unaligned (blaze::unaligned). The default value is blaze::unaligned.
//
//
// \n \section views_submatrices_setup Setup of Submatrices
// <hr>
//
// A view on a submatrix can be created very conveniently via the \c submatrix() function.
// This view can be treated as any other matrix, i.e. it can be assigned to, it can be copied
// from, and it can be used in arithmetic operations. A submatrix created from a row-major
// matrix will itself be a row-major matrix, a submatrix created from a column-major matrix
// will be a column-major matrix. The view can also be used on both sides of an assignment:
// The submatrix can either be used as an alias to grant write access to a specific submatrix
// of a dense matrix primitive on the left-hand side of an assignment or to grant read-access
// to a specific submatrix of a matrix primitive or expression on the right-hand side of an
// assignment. The following example demonstrates this in detail:
\code
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType;
typedef blaze::CompressedVector<int,blaze::columnMajor> SparseMatrixType;
DenseMatrixType D1, D2;
SparseMatrixType S1, S2;
// ... Resizing and initialization
// Creating a view on the first 8x16 block of the dense matrix D1
blaze::DenseSubmatrix<DenseMatrixType> dsm = submatrix( D1, 0UL, 0UL, 8UL, 16UL );
// Creating a view on the second 8x16 block of the sparse matrix S1
blaze::SparseSubmatrix<SparseMatrixType> ssm = submatrix( S1, 0UL, 16UL, 8UL, 16UL );
// Creating a view on the addition of D2 and S2
dsm = submatrix( D2 + S2, 5UL, 10UL, 8UL, 16UL );
// Creating a view on the multiplication of D2 and S2
ssm = submatrix( D2 * S2, 7UL, 13UL, 8UL, 16UL );
\endcode
//
//
// \n \section views_submatrices_common_operations Common Operations
// <hr>
//
// The current size of the matrix, i.e. the number of rows or columns can be obtained via the
// \c rows() and \c columns() functions, the current total capacity via the \c capacity() function,
// and the number of non-zero elements via the \c nonZeros() function. However, since submatrices
// are views on a specific submatrix of a matrix, several operations are not possible on views,
// such as resizing and swapping:
\code
typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType;
typedef blaze::DenseSubmatrix<MatrixType> SubmatrixType;
MatrixType A;
// ... Resizing and initialization
// Creating a view on the a 8x12 submatrix of matrix A
SubmatrixType sm = submatrix( A, 0UL, 0UL, 8UL, 12UL );
sm.rows(); // Returns the number of rows of the submatrix
sm.columns(); // Returns the number of columns of the submatrix
sm.capacity(); // Returns the capacity of the submatrix
sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix
sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix
SubmatrixType sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL );
swap( sm, sm2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_submatrices_element_access Element Access
// <hr>
//
// The elements of a submatrix can be directly accessed with the function call operator:
\code
typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType;
MatrixType A;
// ... Resizing and initialization
// Creating a 8x8 submatrix, starting from position (4,4)
blaze::DenseSubmatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL );
// Setting the element (0,0) of the submatrix, which corresponds to
// the element at position (4,4) in matrix A
sm(0,0) = 2.0;
\endcode
\code
typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType;
MatrixType A;
// ... Resizing and initialization
// Creating a 8x8 submatrix, starting from position (4,4)
blaze::SparseSubmatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL );
// Setting the element (0,0) of the submatrix, which corresponds to
// the element at position (4,4) in matrix A
sm(0,0) = 2.0;
\endcode
// Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as
// with matrices, in case of non-const submatrices, \c begin() and \c end() return an Iterator,
// which allows a manipulation of the non-zero values, in case of constant submatrices a
// ConstIterator is returned:
\code
typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType;
typedef blaze::DenseSubmatrix<MatrixType> SubmatrixType;
MatrixType A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of the dense matrix A
SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) {
*it = ...; // OK: Write access to the dense submatrix value.
... = *it; // OK: Read access to the dense submatrix value.
}
// Traversing the elements of the 1st row via iterators to const elements
for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense submatrix value.
}
\endcode
\code
typedef blaze::CompressedMatrix<int,blaze::rowMajor> MatrixType;
typedef blaze::SparseSubmatrix<MatrixType> SubmatrixType;
MatrixType A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of the sparse matrix A
SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st row via iterators to const elements
for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_submatrices_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse submatrix can be done by several alternative functions.
// The following example demonstrates all options:
\code
typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType;
MatrixType A( 256UL, 512UL ); // Non-initialized matrix of size 256x512
typedef blaze::SparseSubmatrix<MatrixType> SubmatrixType;
SubmatrixType sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A
// The function call operator provides access to all possible elements of the sparse submatrix,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse submatrix, the element is inserted into the
// submatrix.
sm(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the submatrix it is inserted into the submatrix, if it is already contained
// in the submatrix its value is modified.
sm.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the submatrix is the insert() function. However,
// it inserts the element only in case the element is not already contained in the submatrix.
sm.insert( 2UL, 6UL, 3.7 );
// Just as in case of sparse matrices, elements can also be inserted via the append() function.
// In case of submatrices, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index in the according row or column of the
// submatrix and that the according row's or column's capacity is large enough to hold the new
// element. Note however that due to the nature of a submatrix, which may be an alias to the
// middle of a sparse matrix, the append() function does not work as efficiently for a
// submatrix as it does for a matrix.
sm.reserve( 2UL, 10UL );
sm.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_submatrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse submatrices can be used in all arithmetic operations that any other dense
// or sparse matrix can be used in. The following example gives an impression of the use of dense
// submatrices within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse matrices with
// fitting element types:
\code
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType;
typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType;
DenseMatrixType D1, D2, D3;
SparseMatrixType S1, S2;
typedef blaze::CompressedVector<double,blaze::columnVector> SparseVectorType;
SparseVectorType a, b;
// ... Resizing and initialization
typedef DenseSubmatrix<DenseMatrixType> SubmatrixType;
SubmatrixType sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1
// starting from row 0 and column 0
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix
// starting in row 0 and column 8
sm = S1; // Sparse matrix initialization of the second 8x8 submatrix
D3 = sm + D2; // Dense matrix/dense matrix addition
S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction
D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1
D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1
D2 = 2.0 * sm; // Scaling of the a submatrix of D1
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment
submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment
a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_aligned_submatrices Aligned Submatrices
// <hr>
//
// Usually submatrices can be defined anywhere within a matrix. They may start at any position and
// may have an arbitrary extension (only restricted by the extension of the underlying matrix).
// However, in contrast to matrices themselves, which are always properly aligned in memory and
// therefore can provide maximum performance, this means that submatrices in general have to be
// considered to be unaligned. This can be made explicit by the blaze::unaligned flag:
\code
using blaze::unaligned;
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType;
DenseMatrixType A;
// ... Resizing and initialization
// Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0
blaze::DenseSubmatrix<DenseMatrixType> sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL );
blaze::DenseSubmatrix<DenseMatrixType> sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL );
blaze::DenseSubmatrix<DenseMatrixType,unaligned> sm3 = submatrix ( A, 0UL, 0UL, 8UL, 8UL );
blaze::DenseSubmatrix<DenseMatrixType,unaligned> sm4 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// All of these calls to the \c submatrix() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide
// full flexibility in the creation of submatrices, this might result in performance disadvantages
// in comparison to matrix primitives (even in case the specified submatrix could be aligned).
// Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a matrix might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned submatrices. Aligned submatrices are identical to
// unaligned submatrices in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying matrix. Aligned submatrices are created by
// explicitly specifying the blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
blaze::DenseSubmatrix<DenseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of each row/column of the submatrix must be aligned. The following source code
// gives some examples for a double precision row-major dynamic matrix, assuming that padding is
// enabled and that AVX is available, which packs 4 \c double values into an intrinsic vector:
\code
using blaze::aligned;
using blaze::rowMajor;
typedef blaze::DynamicMatrix<double,rowMajor> MatrixType;
typedef blaze::DenseSubmatrix<MatrixType,aligned> SubmatrixType;
MatrixType D( 13UL, 17UL );
// ... Resizing and initialization
// OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding)
SubmatrixType dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL );
// OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding)
SubmatrixType dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL );
// OK: First column is a multiple of 4 and the submatrix includes the last row and column
SubmatrixType dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL );
// Error: First column is not a multiple of 4, i.e. the first element is not aligned
SubmatrixType dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense submatrices.
// In contrast, aligned sparse submatrices at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case
// the blaze::aligned flag is specified during setup, an aligned submatrix is created:
\code
using blaze::aligned;
typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType;
SparseMatrixType A;
// ... Resizing and initialization
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
blaze::SparseSubmatrix<SparseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// \n \section views_submatrices_on_submatrices Submatrices on Submatrices
// <hr>
//
// It is also possible to create a submatrix view on another submatrix. In this context it is
// important to remember that the type returned by the \c submatrix() function is the same type
// as the type of the given submatrix, since the view on a submatrix is just another view on the
// underlying matrix:
\code
typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType;
typedef blaze::DenseSubmatrix<MatrixType> SubmatrixType;
MatrixType D1;
// ... Resizing and initialization
// Creating a submatrix view on the dense matrix D1
SubmatrixType sm1 = submatrix( D1, 4UL, 4UL, 8UL, 16UL );
// Creating a submatrix view on the dense submatrix sm1
SubmatrixType sm2 = submatrix( sm1, 1UL, 1UL, 4UL, 8UL );
\endcode
// \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices
//
// Submatrices can also be created on symmetric matrices (see the SymmetricMatrix class template):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::DenseSubmatrix;
typedef SymmetricMatrix< DynamicMatrix<int> > SymmetricDynamicType;
typedef DenseSubmatrix< SymmetricDynamicType > SubmatrixType;
// Setup of a 16x16 symmetric matrix
SymmetricDynamicType A( 16UL );
// Creating a dense submatrix of size 8x12, starting in row 2 and column 4
SubmatrixType sm = submatrix( A, 2UL, 4UL, 8UL, 12UL );
\endcode
// It is important to note, however, that (compound) assignments to such submatrices have a
// special restriction: The symmetry of the underlying symmetric matrix must not be broken!
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 0 9 )
// B = ( 9 8 )
// ( 0 7 )
//
DynamicMatrix<int> B( 3UL, 2UL );
B(0,0) = 1;
B(0,1) = 2;
B(1,0) = 3;
B(1,1) = 4;
B(2,1) = 5;
B(2,2) = 6;
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n Previous: \ref views_subvectors Next: \ref views_rows
*/
//*************************************************************************************************
//**Rows*******************************************************************************************
/*!\page views_rows Rows
//
// \tableofcontents
//
//
// Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a
// reference to a specific row. This reference is valid and can be used in every way any other
// row vector can be used as long as the matrix containing the row is not resized or entirely
// destroyed. The row also acts as an alias to the row elements: Changes made to the elements
// (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix
// and changes made via the matrix are immediately visible in the row. \b Blaze provides two
// row types: \ref views_dense_row and \ref views_sparse_row.
//
//
// \n \section views_dense_row DenseRow
// <hr>
//
// The blaze::DenseRow class template represents a reference to a specific row of a dense matrix
// primitive. It can be included via the header file
\code
#include <blaze/math/DenseRow.h>
\endcode
// The type of the dense matrix is specified via template parameter:
\code
template< typename MT >
class DenseRow;
\endcode
// \c MT specifies the type of the dense matrix primitive. DenseRow can be used with every dense
// matrix primitive, but does not work with any matrix expression type.
//
//
// \n \section views_sparse_row SparseRow
// <hr>
//
// The blaze::SparseRow class template represents a reference to a specific row of a sparse matrix
// primitive. It can be included via the header file
\code
#include <blaze/math/SparseRow.h>
\endcode
// The type of the sparse matrix is specified via template parameter:
\code
template< typename MT >
class SparseRow;
\endcode
// \c MT specifies the type of the sparse matrix primitive. SparseRow can be used with every
// sparse matrix primitive, but does not work with any matrix expression type.
//
//
// \n \section views_rows_setup Setup of Rows
// <hr>
//
// A reference to a dense or sparse row can be created very conveniently via the \c row() function.
// This reference can be treated as any other row vector, i.e. it can be assigned to, it can be
// copied from, and it can be used in arithmetic operations. The reference can also be used on
// both sides of an assignment: The row can either be used as an alias to grant write access to a
// specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access
// to a specific row of a matrix primitive or expression on the right-hand side of an assignment.
// The following two examples demonstrate this for dense and sparse matrices:
\code
typedef blaze::DynamicVector<double,rowVector> DenseVectorType;
typedef blaze::CompressedVector<double,rowVector> SparseVectorType;
typedef blaze::DynamicMatrix<double,rowMajor> DenseMatrixType;
typedef blaze::CompressedMatrix<double,rowMajor> SparseMatrixType;
DenseVectorType x;
SparseVectorType y;
DenseMatrixType A, B;
SparseMatrixType C, D;
// ... Resizing and initialization
// Setting the 2nd row of matrix A to x
blaze::DenseRow<DenseMatrixType> row2 = row( A, 2UL );
row2 = x;
// Setting the 3rd row of matrix B to y
row( B, 3UL ) = y;
// Setting x to the 4th row of the result of the matrix multiplication
x = row( A * B, 4UL );
// Setting y to the 2nd row of the result of the sparse matrix multiplication
y = row( C * D, 2UL );
\endcode
// The \c row() function can be used on any dense or sparse matrix, including expressions, as
// illustrated by the source code example. However, both \ref views_dense_row and
// \ref views_sparse_row cannot be instantiated for expression types, but only for dense and
// sparse matrix primitives, respectively, i.e. for matrix types that offer write access.
//
//
// \n \section views_rows_common_operations Common Operations
// <hr>
//
// A row view can be used like any other row vector. For instance, the current number of elements
// can be obtained via the \c size() function, the current capacity via the \c capacity() function,
// and the number of non-zero elements via the \c nonZeros() function. However, since rows are
// references to specific rows of a matrix, several operations are not possible on views, such
// as resizing and swapping. The following example shows this by means of a dense row view:
\code
typedef blaze::DynamicMatrix<int,rowMajor> MatrixType;
typedef blaze::DenseRow<MatrixType> RowType;
MatrixType A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd row of matrix A
RowType row2 = row( A, 2UL );
row2.size(); // Returns the number of elements in the row
row2.capacity(); // Returns the capacity of the row
row2.nonZeros(); // Returns the number of non-zero elements contained in the row
row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix
RowType row3 = row( A, 3UL );
swap( row2, row3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_rows_element_access Element Access
// <hr>
//
// The elements of the row can be directly accessed with the subscript operator. The numbering
// of the row elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of columns of the referenced matrix. Alternatively, the elements of
// a row can be traversed via iterators. Just as with vectors, in case of non-const rows,
// \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero
// value, in case of a constant row a ConstIterator is returned:
\code
typedef blaze::DynamicMatrix<int,rowMajor> MatrixType;
typedef blaze::DenseRow<MatrixType> RowType;
MatrixType A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
RowType row31 = row( A, 31UL );
for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) {
*it = ...; // OK; Write access to the dense row value
... = *it; // OK: Read access to the dense row value.
}
for( RowType::ConstIterator it=row31.begin(); it!=row31.end(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense row value.
}
\endcode
\code
typedef blaze::CompressedMatrix<int,rowMajor> MatrixType;
typedef blaze::SparseRow<MatrixType> RowType;
MatrixType A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
RowType row31 = row( A, 31UL );
for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_rows_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse row can be done by several alternative functions.
// The following example demonstrates all options:
\code
typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType;
MatrixType A( 10UL, 100UL ); // Non-initialized 10x100 matrix
typedef blaze::SparseRow<MatrixType> RowType;
RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A
// The subscript operator provides access to all possible elements of the sparse row,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse row, the element is inserted into the row.
row0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the row it is inserted into the row, if it is already contained in
// the row its value is modified.
row0.set( 45UL, -1.2 );
// An alternative for inserting elements into the row is the insert() function. However,
// it inserts the element only in case the element is not already contained in the row.
row0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse row is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the row and that the row's capacity is large
// enough to hold the new element.
row0.reserve( 10UL );
row0.append( 51UL, -2.1 );
\endcode
// \n \section views_rows_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse rows can be used in all arithmetic operations that any other dense or
// sparse row vector can be used in. The following example gives an impression of the use of
// dense rows within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse rows with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::rowVector> c( 2UL );
c[1] = 3.0;
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrix;
DenseMatrix A( 4UL, 2UL ); // Non-initialized 4x2 matrix
typedef blaze::DenseRow<DenseMatrix> RowType;
RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A
row0[0] = 0.0; // Manual initialization of the 0th row of A
row0[1] = 0.0;
row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A
row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A
row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A
b = row0 + a; // Dense vector/dense vector addition
b = c + row( A, 1UL ); // Sparse vector/dense vector addition
b = row0 * row( A, 2UL ); // Component-wise vector multiplication
row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row
b = row( A, 1UL ) * 2.0; // Scaling of the 1st row
b = 2.0 * row( A, 1UL ); // Scaling of the 1st row
row( A, 2UL ) += a; // Addition assignment
row( A, 2UL ) -= c; // Subtraction assignment
row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment
double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors
A = trans( c ) * row( A, 1UL ); // Outer product between two vectors
\endcode
// \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that row views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
typedef blaze::CompressedMatrix<int,columnMajor> MatrixType;
typedef blaze::SparseRow<MatrixType> RowType;
MatrixType A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of a column-major matrix A
RowType row1 = row( A, 1UL );
for( RowType::Iterator it=row1.begin(); it!=row1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a row view on a matrix stored in a column-major fashion
// can result in a considerable performance decrease in comparison to a view on a matrix with
// a fitting storage orientation. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two column-major matrices
CompressedMatrix<double,columnMajor> A( 128UL, 128UL );
CompressedMatrix<double,columnMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th row of the multiplication between A and B ...
CompressedVector<double,rowVector> x = row( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// the 15th row of the column-major matrix A with B.
CompressedVector<double,rowVector> x = row( A, 15UL ) * B;
\endcode
// Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible
// using a row-major storage order for matrix A would result in a more efficient evaluation.
//
// \n Previous: \ref views_submatrices Next: \ref views_columns
*/
//*************************************************************************************************
//**Columns****************************************************************************************
/*!\page views_columns Columns
//
// \tableofcontents
//
//
// Just as rows provide a view on a specific row of a matrix, columns provide views on a specific
// column of a dense or sparse matrix. As such, columns act as a reference to a specific column.
// This reference is valid an can be used in every way any other column vector can be used as long
// as the matrix containing the column is not resized or entirely destroyed. Changes made to the
// elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the
// matrix and changes made via the matrix are immediately visible in the column. \b Blaze provides
// two column types: \ref views_dense_column and \ref views_sparse_column.
//
//
// \n \section views_dense_column DenseColumn
// <hr>
//
// The blaze::DenseColumn class template represents a reference to a specific column of a dense
// matrix primitive. It can be included via the header file
\code
#include <blaze/math/DenseColumn.h>
\endcode
// The type of the dense matrix is specified via template parameter:
\code
template< typename MT >
class DenseColumn;
\endcode
// \c MT specifies the type of the dense matrix primitive. DenseColumn can be used with every
// dense matrix primitive, but does not work with any matrix expression type.
//
//
// \n \section views_sparse_column SparseColumn
// <hr>
//
// The blaze::SparseColumn class template represents a reference to a specific column of a sparse
// matrix primitive. It can be included via the header file
\code
#include <blaze/math/SparseColumn.h>
\endcode
// The type of the sparse matrix is specified via template parameter:
\code
template< typename MT >
class SparseColumn;
\endcode
// \c MT specifies the type of the sparse matrix primitive. SparseColumn can be used with every
// sparse matrix primitive, but does not work with any matrix expression type.
//
//
// \n \section views_colums_setup Setup of Columns
// <hr>
//
// Similar to the setup of a row, a reference to a dense or sparse column can be created very
// conveniently via the \c column() function. This reference can be treated as any other column
// vector, i.e. it can be assigned to, copied from, and be used in arithmetic operations. The
// column can either be used as an alias to grant write access to a specific column of a matrix
// primitive on the left-hand side of an assignment or to grant read-access to a specific column
// of a matrix primitive or expression on the right-hand side of an assignment. The following
// two examples demonstrate this for dense and sparse matrices:
\code
typedef blaze::DynamicVector<double,columnVector> DenseVectorType;
typedef blaze::CompressedVector<double,columnVector> SparseVectorType;
typedef blaze::DynamicMatrix<double,columnMajor> DenseMatrixType;
typedef blaze::CompressedMatrix<double,columnMajor> SparseMatrixType;
DenseVectorType x;
SparseVectorType y;
DenseMatrixType A, B;
SparseMatrixType C, D;
// ... Resizing and initialization
// Setting the 1st column of matrix A to x
blaze::DenseColumn<DenseMatrixType> col1 = column( A, 1UL );
col1 = x;
// Setting the 4th column of matrix B to y
column( B, 4UL ) = y;
// Setting x to the 2nd column of the result of the matrix multiplication
x = column( A * B, 2UL );
// Setting y to the 2nd column of the result of the sparse matrix multiplication
y = column( C * D, 2UL );
\endcode
// The \c column() function can be used on any dense or sparse matrix, including expressions,
// as illustrated by the source code example. However, both \ref views_dense_column and
// \ref views_sparse_column cannot be instantiated for expression types, but only for dense
// and sparse matrix primitives, respectively, i.e. for matrix types that offer write access.
//
//
// \n \section views_columns_common_operations Common Operations
// <hr>
//
// A column view can be used like any other column vector. For instance, the current number of
// elements can be obtained via the \c size() function, the current capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// columns are references to specific columns of a matrix, several operations are not possible on
// views, such as resizing and swapping. The following example shows this by means of a dense
// column view:
\code
typedef blaze::DynamicMatrix<int,columnMajor> MatrixType;
typedef blaze::DenseColumn<MatrixType> ColumnType;
MatrixType A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd column of matrix A
ColumnType col2 = column( A, 2UL );
col2.size(); // Returns the number of elements in the column
col2.capacity(); // Returns the capacity of the column
col2.nonZeros(); // Returns the number of non-zero elements contained in the column
col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix
ColumnType col3 = column( A, 3UL );
swap( col2, col3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_columns_element_access Element Access
// <hr>
//
// The elements of the column can be directly accessed with the subscript operator. The numbering
// of the column elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of rows of the referenced matrix. Alternatively, the elements of
// a column can be traversed via iterators. Just as with vectors, in case of non-const columns,
// \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero
// value, in case of a constant column a ConstIterator is returned:
\code
typedef blaze::DynamicMatrix<int,columnMajor> MatrixType;
typedef blaze::DenseColumn<MatrixType> ColumnType;
MatrixType A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
ColumnType col31 = column( A, 31UL );
for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) {
*it = ...; // OK; Write access to the dense column value
... = *it; // OK: Read access to the dense column value.
}
for( ColumnType::ConstIterator it=col31.begin(); it!=col31.end(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense column value.
}
\endcode
\code
typedef blaze::CompressedMatrix<int,columnMajor> MatrixType;
typedef blaze::SparseColumn<MatrixType> ColumnType;
MatrixType A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
ColumnType col31 = column( A, 31UL );
for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_columns_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse column can be done by several alternative functions.
// The following example demonstrates all options:
\code
typedef blaze::CompressedMatrix<double,blaze::columnMajor> MatrixType;
MatrixType A( 100UL, 10UL ); // Non-initialized 10x100 matrix
typedef blaze::SparseColumn<MatrixType> ColumnType;
ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A
// The subscript operator provides access to all possible elements of the sparse column,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse column, the element is inserted into the column.
col0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the column it is inserted into the column, if it is already contained
// in the column its value is modified.
col0.set( 45UL, -1.2 );
// An alternative for inserting elements into the column is the insert() function. However,
// it inserts the element only in case the element is not already contained in the column.
col0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse column is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the column and that the column's capacity is
// large enough to hold the new element.
col0.reserve( 10UL );
col0.append( 51UL, -2.1 );
\endcode
// \n \section views_columns_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse columns can be used in all arithmetic operations that any other dense or
// sparse column vector can be used in. The following example gives an impression of the use of
// dense columns within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse columns with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::columnVector> c( 2UL );
c[1] = 3.0;
typedef blaze::DynamicMatrix<double,blaze::columnMajor> MatrixType;
MatrixType A( 2UL, 4UL ); // Non-initialized 2x4 matrix
typedef blaze::DenseColumn<DenseMatrix> RowType;
RowType col0( column( A, 0UL ) ); // Reference to the 0th column of A
col0[0] = 0.0; // Manual initialization of the 0th column of A
col0[1] = 0.0;
column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A
column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A
column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A
b = col0 + a; // Dense vector/dense vector addition
b = c + column( A, 1UL ); // Sparse vector/dense vector addition
b = col0 * column( A, 2UL ); // Component-wise vector multiplication
column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column
b = column( A, 1UL ) * 2.0; // Scaling of the 1st column
b = 2.0 * column( A, 1UL ); // Scaling of the 1st column
column( A, 2UL ) += a; // Addition assignment
column( A, 2UL ) -= c; // Subtraction assignment
column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment
double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors
A = column( A, 1UL ) * trans( c ); // Outer product between two vectors
\endcode
// \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that column views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
typedef blaze::CompressedMatrix<int,rowMajor> MatrixType;
typedef blaze::SparseColumn<MatrixType> ColumnType;
MatrixType A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of a row-major matrix A
ColumnType col1 = column( A, 1UL );
for( ColumnType::Iterator it=col1.begin(); it!=col1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a column view on a matrix stored in a row-major fashion
// can result in a considerable performance decrease in comparison to a view on a matrix with
// a fitting storage orientation. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two row-major matrices
CompressedMatrix<double,rowMajor> A( 128UL, 128UL );
CompressedMatrix<double,rowMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th column of the multiplication between A and B ...
CompressedVector<double,columnVector> x = column( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// the 15th column of the row-major matrix B with A.
CompressedVector<double,columnVector> x = A * column( B, 15UL );
\endcode
// Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible
// using a column-major storage order for matrix B would result in a more efficient evaluation.
//
// \n Previous: \ref views_rows Next: \ref arithmetic_operations
*/
//*************************************************************************************************
//**Arithmetic Operations**************************************************************************
/*!\page arithmetic_operations Arithmetic Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following arithmetic operations for vectors and matrices:
//
// <ul>
// <li> \ref addition </li>
// <li> \ref subtraction </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// </ul>
// </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication </li>
// </ul>
//
// \n Previous: \ref views_columns Next: \ref addition
*/
//*************************************************************************************************
//**Addition***************************************************************************************
/*!\page addition Addition
//
// The addition of vectors and matrices is as intuitive as the addition of scalar values. For both
// the vector addition as well as the matrix addition the addition operator can be used. It even
// enables the addition of dense and sparse vectors as well as the addition of dense and sparse
// matrices:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 + v2; // Addition of a two column vectors of different data type
\endcode
\code
blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL );
blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that in case of vectors it is only possible to
// add vectors with the same transpose flag:
\code
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 + v2; // Compilation error: Cannot add a column vector and a row vector
v1 + trans( v2 ); // OK: Addition of two column vectors
\endcode
// In case of matrices, however, it is possible to add row-major and column-major matrices. Note
// however that in favor of performance the addition of two matrices with the same storage order
// is favorable. The same argument holds for the element type: In case two vectors or matrices
// with the same element type are added, the performance can be much higher due to vectorization
// of the operation.
\code
blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 + v2; // Vectorized addition of two double precision vectors
\endcode
\code
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices
\endcode
// \n Previous: \ref arithmetic_operations Next: \ref subtraction
*/
//*************************************************************************************************
//**Subtraction************************************************************************************
/*!\page subtraction Subtraction
//
// The subtraction of vectors and matrices works exactly as intuitive as the addition, but with
// the subtraction operator. For both the vector subtraction as well as the matrix subtraction
// the subtraction operator can be used. It also enables the subtraction of dense and sparse
// vectors as well as the subtraction of dense and sparse matrices:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 - v2; // Subtraction of a two column vectors of different data type
blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL );
blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that in case of vectors it is only possible to
// subtract vectors with the same transpose flag:
\code
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector
v1 - trans( v2 ); // OK: Subtraction of two column vectors
\endcode
// In case of matrices, however, it is possible to subtract row-major and column-major matrices.
// Note however that in favor of performance the subtraction of two matrices with the same storage
// order is favorable. The same argument holds for the element type: In case two vectors or matrices
// with the same element type are added, the performance can be much higher due to vectorization
// of the operation.
\code
blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 - v2; // Vectorized subtraction of two double precision vectors
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices
\endcode
// \n Previous: \ref addition Next: \ref scalar_multiplication
*/
//*************************************************************************************************
//**Scalar Multiplication**************************************************************************
/*!\page scalar_multiplication Scalar Multiplication
//
// The scalar multiplication is the multiplication of a scalar value with a vector or a matrix.
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Additionally, it is possible to use std::complex values with the same built-in data
// types as element type.
\code
blaze::StaticVector<int,3UL> v1( 1, 2, 3 );
blaze::DynamicVector<double> v2 = v1 * 1.2;
blaze::CompressedVector<float> v3 = -0.3F * v1;
\endcode
\code
blaze::StaticMatrix<int,3UL,2UL> M1( 1, 2, 3, 4, 5, 6 );
blaze::DynamicMatrix<double> M2 = M1 * 1.2;
blaze::CompressedMatrix<float> M3 = -0.3F * M1;
\endcode
// Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the
// following example). However, each vector and matrix provides the \c scale() function, which
// can be used to scale a vector or matrix element-wise with arbitrary scalar data types:
\code
blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1;
blaze::StaticMatrix<int,3UL,3UL> scalar;
M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication
M1.scale( scalar ); // Scalar multiplication
\endcode
// \n Previous: \ref subtraction Next: \ref componentwise_multiplication
*/
//*************************************************************************************************
//**Vector/Vector Multiplication*******************************************************************
/*!\page vector_vector_multiplication Vector/Vector Multiplication
//
// \n \section componentwise_multiplication Componentwise Multiplication
// <hr>
//
// Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or
// blaze::rowVector) via the multiplication operator results in a componentwise multiplication
// of the two vectors:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and
// a dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row
// vectors. The result is a dense row vector.
\endcode
// \n \section inner_product Inner Product / Scalar Product / Dot Product
// <hr>
//
// The multiplication between a row vector and a column vector results in an inner product between
// the two vectors:
\code
blaze::StaticVector<int,3UL,rowVector> v1( 2, 5, -1 );
blaze::DynamicVector<int,columnVector> v2( 3UL );
v2[0] = -1;
v2[1] = 3;
v2[2] = -2;
int result = v1 * v2; // Results in the value 15
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1( 2, 5, -1 );
blaze::StaticVector<int,3UL,rowVector> v2( -1, 3, -2 );
int result = v1 * trans( v2 ); // Also results in the value 15
\endcode
// Alternatively, the comma operator can used for any combination of vectors (row or column vectors)
// to perform an inner product:
\code
blaze::StaticVector<int,3UL,rowVector> v1( 2, 5, -1 );
blaze::StaticVector<int,3UL,rowVector> v2( -1, 3, -2 );
int result = (v1,v2); // Inner product between two row vectors
\endcode
// Please note the brackets embracing the inner product expression. Due to the low precedence
// of the comma operator (lower even than the assignment operator) these brackets are strictly
// required for a correct evaluation of the inner product.
//
//
// \n \section outer_product Outer Product
// <hr>
//
// The multiplication between a column vector and a row vector results in the outer product of
// the two vectors:
\code
blaze::StaticVector<int,3UL,columnVector> v1( 2, 5, -1 );
blaze::DynamicVector<int,rowVector> v2( 3UL );
v2[0] = -1;
v2[1] = 3;
v2[2] = -2;
StaticMatrix<int,3UL,3UL> M1 = v1 * v2;
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1( 2, 5, -1 );
blaze::StaticVector<int,3UL,rowVector> v2( -1, 3, -2 );
int result = trans( v1 ) * v2;
\endcode
// \n \section cross_product Cross Product
// <hr>
//
// Two column vectors can be multiplied via the cross product. The cross product between two
// vectors \f$ a \f$ and \f$ b \f$ is defined as
\f[
\left(\begin{array}{*{1}{c}}
c_0 \\
c_1 \\
c_2 \\
\end{array}\right)
=
\left(\begin{array}{*{1}{c}}
a_1 b_2 - a_2 b_1 \\
a_2 b_0 - a_0 b_2 \\
a_0 b_1 - a_1 b_0 \\
\end{array}\right).
\f]
// Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is
// realized via the modulo operator (i.e. \c operator%):
\code
blaze::StaticVector<int,3UL,columnVector> v1( 2, 5, -1 );
blaze::DynamicVector<int,columnVector> v2( 3UL );
v2[0] = -1;
v2[1] = 3;
v2[2] = -2;
blaze::StaticVector<int,3UL,columnVector> v3( v1 % v2 );
\endcode
// Please note that the cross product is restricted to three dimensional (dense and sparse)
// column vectors.
//
// \n Previous: \ref scalar_multiplication Next: \ref matrix_vector_multiplication
*/
//*************************************************************************************************
//**Matrix/Vector Multiplication*******************************************************************
/*!\page matrix_vector_multiplication Matrix/Vector Multiplication
//
// In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical
// textbooks. Just as in textbooks there are two different multiplications between a matrix and
// a vector: a matrix/column vector multiplication and a row vector/matrix multiplication:
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::DynamicMatrix;
DynamicMatrix<int> M1( 39UL, 12UL );
StaticVector<int,12UL,columnVector> v1;
// ... Initialization of the matrix and the vector
DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication
DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication
\endcode
// Note that the storage order of the matrix poses no restrictions on the operation. Also note,
// that the highest performance for a multiplication between a dense matrix and a dense vector can
// be achieved if both the matrix and the vector have the same scalar element type.
//
// \n Previous: \ref vector_vector_multiplication Next: \ref matrix_matrix_multiplication
*/
//*************************************************************************************************
//**Matrix/Matrix Multiplication*******************************************************************
/*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication
//
// The matrix/matrix multiplication can be formulated exactly as in mathematical textbooks:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 45UL, 85UL );
CompressedMatrix<float> M2( 85UL, 37UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 * M2;
\endcode
// The storage order of the two matrices poses no restrictions on the operation, all variations
// are possible. Note however that the highest performance for a multiplication between two dense
// matrices can be expected for two matrices with the same scalar element type.
//
// \n Previous: \ref matrix_vector_multiplication Next: \ref shared_memory_parallelization
*/
//*************************************************************************************************
//**Shared Memory Parallelization******************************************************************
/*!\page shared_memory_parallelization Shared Memory Parallelization
//
// One of the main motivations of the \b Blaze 1.x releases was to achieve maximum performance
// on a single CPU core for all possible operations. However, today's CPUs are not single core
// anymore, but provide several (homogeneous or heterogeneous) compute cores. In order to fully
// exploit the performance potential of a multicore CPU, computations have to be parallelized
// across all available cores of a CPU. For this purpose, \b Blaze provides three different
// shared memory parallelization techniques:
//
// - \ref openmp_parallelization
// - \ref cpp_threads_parallelization
// - \ref boost_threads_parallelization
//
// In addition, \b Blaze provides means to enforce the serial execution of specific operations:
//
// - \ref serial_execution
//
// \n Previous: \ref matrix_matrix_multiplication Next: \ref openmp_parallelization
*/
//*************************************************************************************************
//**OpenMP Parallelization*************************************************************************
/*!\page openmp_parallelization OpenMP Parallelization
//
// \tableofcontents
//
//
// \n \section openmp_setup OpenMP Setup
// <hr>
//
// To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify
// the use of OpenMP on the command line:
\code
-fopenmp // GNU C++ compiler
-openmp // Intel C++ compiler
/openmp // Visual Studio
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of threads.
//
// As common for OpenMP, the number of threads can be specified either via an environment variable
\code
export OMP_NUM_THREADS=4 // Unix systems
set OMP_NUM_THREADS=4 // Windows systems
\endcode
// or via an explicit call to the \c omp_set_num_threads() function:
\code
omp_set_num_threads( 4 );
\endcode
// Alternatively, the number of threads can also be specified via the \c setNumThreads() function
// provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of OpenMP, the function returns the maximum number of threads OpenMP will use
// within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function.
//
//
// \n \section openmp_configuration OpenMP Configuration
// <hr>
//
// Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze
// deems the parallel execution as counterproductive for the overall performance, the operation
// is executed serially. One of the main reasons for not executing an operation in parallel is
// the size of the operands. For instance, a vector addition is only executed in parallel if the
// size of both vector operands exceeds a certain threshold. Otherwise, the performance could
// seriously decrease due to the overhead caused by the thread setup. However, in order to be
// able to adjust the \b Blaze library to a specific system, it is possible to configure these
// thresholds manually. All shared memory thresholds are contained within the configuration file
// <em>./blaze/config/Thresholds.h</em>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique (see also \ref cpp_threads_parallelization and
// \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum
// performance for all possible situations and configurations. They merely provide a reasonable
// standard for the current CPU generation.
//
//
// \n \section openmp_first_touch First Touch Policy
// <hr>
//
// So far the \b Blaze library does not (yet) automatically initialize dynamic memory according
// to the first touch principle. Consider for instance the following vector triad example:
\code
using blaze::columnVector;
const size_t N( 1000000UL );
blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N );
// Initialization of the vectors b, c, and d
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// Performing a vector triad
a = b + c * d;
\endcode
// If this code, which is prototypical for many OpenMP applications that have not been optimized
// for ccNUMA architectures, is run across several locality domains (LD), it will not scale
// beyond the maximum performance achievable on a single LD if the working set does not fit into
// the cache. This is because the initialization loop is executed by a single thread, writing to
// \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will
// be mapped into a single LD.
//
// As mentioned above, this problem can be solved by performing vector initialization in parallel:
\code
// ...
// Initialization of the vectors b, c, and d
#pragma omp parallel for
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// ...
\endcode
// This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for
// instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in
// order to achieve the maximum possible performance, it is imperative to initialize the memory
// according to the later use of the data structures.
//
//
// \n \section openmp_limitations Limitations of the OpenMP Parallelization
// <hr>
//
// There are a few important limitations to the current \b Blaze OpenMP parallelization. The first
// one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the
// other one the OpenMP \c sections directive (see \ref openmp_sections).
//
//
// \n \subsection openmp_parallel The Parallel Directive
//
// In OpenMP threads are explicitly spawned via the an OpenMP parallel directive:
\code
// Serial region, executed by a single thread
#pragma omp parallel
{
// Parallel region, executed by the specified number of threads
}
// Serial region, executed by a single thread
\endcode
// Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a
// parallel directive is encountered. Therefore, from a performance point of view, it seems to be
// beneficial to use a single OpenMP parallel directive for several operations:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
#pragma omp parallel
{
y1 = A * x;
y2 = B * x;
}
\endcode
// Unfortunately, this optimization approach is not allowed within the \b Blaze library. More
// explicitly, it is not allowed to put an operation into a parallel region. The reason is that
// the entire code contained within a parallel region is executed by all threads. Although this
// appears to just comprise the contained computations, a computation (or more specifically the
// assignment of an expression to a vector or matrix) can contain additional logic that must not
// be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.).
// Therefore it is not possible to manually start a parallel region for several operations, but
// \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand
// and the given operands.
//
// \n \subsection openmp_sections The Sections Directive
//
// OpenMP provides several work-sharing construct to distribute work among threads. One of these
// constructs is the \c sections directive:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = A * x;
#pragma omp section
y2 = B * x;
}
\endcode
// In this example, two threads are used to compute two distinct matrix/vector multiplications
// concurrently. Thereby each of the \c sections is executed by exactly one thread.
//
// Unfortunately \b Blaze does not support concurrent parallel computations and therefore this
// approach does not work with any of the \b Blaze parallelization techniques. All techniques
// (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization
// and \ref boost_threads_parallelization) are optimized for the parallel computation of an
// operation within a single thread of execution. This means that \b Blaze tries to use all
// available threads to compute the result of a single operation as efficiently as possible.
// Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations
// and to let \b Blaze compute all operations within a \c sections directive in serial. This can
// be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution)
// or by selectively serializing all operations within a \c sections directive via the \c serial()
// function:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = serial( A * x );
#pragma omp section
y2 = serial( B * x );
}
\endcode
// Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does
// NOT work in this context!
//
// \n Previous: \ref shared_memory_parallelization Next: \ref cpp_threads_parallelization
*/
//*************************************************************************************************
//**C++11 Thread Parallelization*******************************************************************
/*!\page cpp_threads_parallelization C++11 Thread Parallelization
//
// \tableofcontents
//
//
// In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1,
// \b Blaze also provides a shared memory parallelization based on C++11 threads.
//
//
// \n \section cpp_threads_setup C++11 Thread Setup
// <hr>
//
// In order to enable the C++11 thread-based parallelization, first the according C++11-specific
// compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument
// has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the
// compiler flags have to be extended by
\code
... -std=c++11 -DBLAZE_USE_CPP_THREADS ...
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11
// threads are enabled on the command line, the OpenMP-based parallelization has priority and
// is preferred.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of C++11 threads, the function will return the previously specified number of
// threads.
//
//
// \n \section cpp_threads_configuration C++11 Thread Configuration
// <hr>
//
// As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an
// operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for
// the overall performance, the operation is executed serially. One of the main reasons for not
// executing an operation in parallel is the size of the operands. For instance, a vector addition
// is only executed in parallel if the size of both vector operands exceeds a certain threshold.
// Otherwise, the performance could seriously decrease due to the overhead caused by the thread
// setup. However, in order to be able to adjust the \b Blaze library to a specific system, it
// is possible to configure these thresholds manually. All thresholds are contained within the
// configuration file <em>./blaze/config/Thresholds.h</em>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the C++11 thread parallelization.
//
//
// \n \section cpp_threads_known_issues Known Issues
// <hr>
//
// There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang
// if their destructor is executed after the \c main() function:
//
// http://connect.microsoft.com/VisualStudio/feedback/details/747145
//
// Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug.
// In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function,
// which can be used to manually destroy all threads at the end of the \c main() function:
\code
int main()
{
// ... Using the C++11 thread parallelization of Blaze
shutDownThreads();
}
\endcode
// Please note that this function may only be used at the end of the \c main() function. After
// this function no further computation may be executed! Also note that this function has an
// effect for Visual Studio compilers only and doesn't need to be used with any other compiler.
//
// \n Previous: \ref openmp_parallelization Next: \ref boost_threads_parallelization
*/
//*************************************************************************************************
//**Boost Thread Parallelization*******************************************************************
/*!\page boost_threads_parallelization Boost Thread Parallelization
//
// \tableofcontents
//
//
// The third available shared memory parallelization provided with \b Blaze is based on Boost
// threads.
//
//
// \n \section boost_threads_setup Boost Thread Setup
// <hr>
//
// In order to enable the Boost thread-based parallelization, two steps have to be taken: First,
// the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during
// compilation:
\code
... -DBLAZE_USE_BOOST_THREADS ...
\endcode
// Second, the according Boost libraries have to be linked. These two simple actions will cause
// the \b Blaze library to automatically try to run all operations in parallel with the specified
// number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations
// have priority, i.e. are preferred in case either is enabled in combination with the Boost
// thread parallelization.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of Boost threads, the function will return the previously specified number of
// threads.
//
//
// \n \section boost_threads_configuration Boost Thread Configuration
// <hr>
//
// As in case of the other shared memory parallelizations \b Blaze is not unconditionally running
// an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization).
// All thresholds related to the Boost thread parallelization are also contained within the
// configuration file <em>./blaze/config/Thresholds.h</em>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the Boost thread parallelization.
//
// \n Previous: \ref cpp_threads_parallelization Next: \ref serial_execution
*/
//*************************************************************************************************
//**Serial Execution*******************************************************************************
/*!\page serial_execution Serial Execution
//
// Sometimes it may be necessary to enforce the serial execution of specific operations. For this
// purpose, the \b Blaze library offers three possible options: the serialization of a single
// expression via the \c serial() function, the serialization of a block of expressions via the
// \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution.
//
//
// \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression
// <hr>
//
// The first option is the serialization of a specific operation via the \c serial() function:
\code
blaze::DynamicMatrix<double> A, B, C;
// ... Resizing and initialization
C = serial( A + B );
\endcode
// \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any
// kind of dense or sparse vector or matrix expression.
//
//
// \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions
// <hr>
//
// The second option is the temporary and local enforcement of a serial execution via the
// \c BLAZE_SERIAL_SECTION:
\code
using blaze::rowMajor;
using blaze::columnVector;
blaze::DynamicMatrix<double,rowMajor> A;
blaze::DynamicVector<double,columnVector> b, c, d, x, y, z;
// ... Resizing and initialization
// Parallel execution
// If possible and beneficial for performance the following operation is executed in parallel.
x = A * b;
// Serial execution
// All operations executed within the serial section are guaranteed to be executed in
// serial (even if a parallel execution would be possible and/or beneficial).
BLAZE_SERIAL_SECTION
{
y = A * c;
z = A * d;
}
// Parallel execution continued
// ...
\endcode
// Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial.
// Outside the scope of the serial section, all operations are run in parallel (if beneficial for
// the performance).
//
// Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution.
// The use of the serial section within several concurrent threads will result undefined behavior!
//
//
// \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution
// <hr>
//
// The third option is the general deactivation of the parallel execution (even in case OpenMP is
// enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION
// switch in the <em>./blaze/config/SMP.h</em> configuration file:
\code
#define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1
\endcode
// In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory
// parallelization is deactivated altogether.
//
// \n Previous: \ref boost_threads_parallelization Next: \ref serialization
*/
//*************************************************************************************************
//**Serialization**********************************************************************************
/*!\page serialization Serialization
//
// Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing
// results or for sharing specific setups with other people. The \b Blaze math serialization
// module provides the according functionality to create platform independent, portable, binary
// representations of vectors and matrices that can be used to store the \b Blaze data structures
// without loss of precision and to reliably transfer them from one machine to another.
//
// The following two pages explain how to serialize vectors and matrices:
//
// - \ref vector_serialization
// - \ref matrix_serialization
//
// \n Previous: \ref serial_execution Next: \ref vector_serialization
*/
//*************************************************************************************************
//**Vector Serialization***************************************************************************
/*!\page vector_serialization Vector Serialization
//
// The following example demonstrates the (de-)serialization of dense and sparse vectors:
\code
using blaze::columnVector;
using blaze::rowVector;
// Serialization of both vectors
{
blaze::StaticVector<double,5UL,rowVector> d;
blaze::CompressedVector<int,columnVector> s;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vectors.blaze"
blaze::Archive<std::ofstream> archive( "vectors.blaze" );
// Serialization of both vectors into the same archive. Note that d lies before s!
archive << d << s;
}
// Reconstitution of both vectors
{
blaze::DynamicVector<double,rowVector> d1;
blaze::DynamicVector<int,rowVector> d2;
// Creating an archive that reads from the file "vectors.blaze"
blaze::Archive<std::ifstream> archive( "vectors.blaze" );
// Reconstituting the former d vector into d1. Note that it is possible to reconstitute
// the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that
// the type of elements has to be the same.
archive >> d1;
// Reconstituting the former s vector into d2. Note that is is even possible to reconstitute
// a sparse vector as a dense vector (also the reverse is possible) and that a column vector
// can be reconstituted as row vector (and vice versa). Note however that also in this case
// the type of elements is the same!
archive >> d2
}
\endcode
// The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can
// also be used for vectors with vector or matrix element type:
\code
// Serialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vector.blaze"
blaze::Archive<std::ofstream> archive( "vector.blaze" );
// Serialization of the vector into the archive
archive << vec;
}
// Deserialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// Creating an archive that reads from the file "vector.blaze"
blaze::Archive<std::ifstream> archive( "vector.blaze" );
// Reconstitution of the vector from the archive
archive >> vec;
}
\endcode
// As the examples demonstrates, the vector serialization offers an enormous flexibility. However,
// several actions result in errors:
//
// - vectors cannot be reconstituted as matrices (and vice versa)
// - the element type of the serialized and reconstituted vector must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticVector, its size must match the size of the serialized vector
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref serialization Next: \ref matrix_serialization
*/
//*************************************************************************************************
//**Matrix Serialization***************************************************************************
/*!\page matrix_serialization Matrix Serialization
//
// The serialization of matrices works in the same manner as the serialization of vectors. The
// following example demonstrates the (de-)serialization of dense and sparse matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
// Serialization of both matrices
{
blaze::StaticMatrix<double,3UL,5UL,rowMajor> D;
blaze::CompressedMatrix<int,columnMajor> S;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrices.blaze"
blaze::Archive<std::ofstream> archive( "matrices.blaze" );
// Serialization of both matrices into the same archive. Note that D lies before S!
archive << D << S;
}
// Reconstitution of both matrices
{
blaze::DynamicMatrix<double,rowMajor> D1;
blaze::DynamicMatrix<int,rowMajor> D2;
// Creating an archive that reads from the file "matrices.blaze"
blaze::Archive<std::ifstream> archive( "matrices.blaze" );
// Reconstituting the former D matrix into D1. Note that it is possible to reconstitute
// the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that
// the type of elements has to be the same.
archive >> D1;
// Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute
// a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major
// matrix can be reconstituted as row-major matrix (and vice versa). Note however that also
// in this case the type of elements is the same!
archive >> D2
}
\endcode
// Note that also in case of matrices it is possible to (de-)serialize matrices with vector or
// matrix elements:
\code
// Serialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrix.blaze"
blaze::Archive<std::ofstream> archive( "matrix.blaze" );
// Serialization of the matrix into the archive
archive << mat;
}
// Deserialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// Creating an archive that reads from the file "matrix.blaze"
blaze::Archive<std::ifstream> archive( "matrix.blaze" );
// Reconstitution of the matrix from the archive
archive >> mat;
}
\endcode
// Note that just as the vector serialization, the matrix serialization is restricted by a
// few important rules:
//
// - matrices cannot be reconstituted as vectors (and vice versa)
// - the element type of the serialized and reconstituted matrix must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticMatrix, the number of rows and columns must match those
// of the serialized matrix
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref vector_serialization Next: \ref blas_functions \n
*/
//*************************************************************************************************
//**BLAS Functions*********************************************************************************
/*!\page blas_functions BLAS Functions
//
// \tableofcontents
//
//
// For matrix/vector and matrix/matrix multiplications with large dense matrices \b Blaze relies
// on the efficiency of BLAS libraries. For this purpose, \b Blaze implements several convenient
// C++ wrapper functions for several BLAS functions. The following sections give a complete
// overview of all available BLAS level 2 and 3 functions.
//
//
// \n \section blas_level_2 BLAS Level 2
// <hr>
//
// \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()):
\code
namespace blaze {
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha,
const float* A, int lda, const float* x, int incX,
float beta, float* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha,
const double* A, int lda, const double* x, int incX,
double beta, double* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha,
const complex<float>* A, int lda, const complex<float>* x, int incX,
complex<float> beta, complex<float>* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha,
const complex<double>* A, int lda, const complex<double>* x, int incX,
complex<double> beta, complex<double>* y, int incY );
template< typename VT1, typename MT1, bool SO, typename VT2, typename ST >
void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A,
const DenseVector<VT2,false>& x, ST alpha, ST beta );
template< typename VT1, typename VT2, typename MT1, bool SO, typename ST >
void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x,
const DenseMatrix<MT1,SO>& A, ST alpha, ST beta );
} // namespace blaze
\endcode
// \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(),
// and \c ztrmv()):
\code
namespace blaze {
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const float* A, int lda, float* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const double* A, int lda, double* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const complex<float>* A, int lda, complex<float>* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const complex<double>* A, int lda, complex<double>* x, int incX );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
} // namespace blaze
\endcode
// \n \section blas_level_3 BLAS Level 3
// <hr>
//
// \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()):
\code
namespace blaze {
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, float alpha, const float* A, int lda,
const float* B, int ldb, float beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, double alpha, const double* A, int lda,
const double* B, int ldb, double beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda,
const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda,
const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST >
void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A,
const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta );
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and
// \c ztrmm()):
\code
namespace blaze {
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, float alpha, const float* A,
int lda, float* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, double alpha, const double* A,
int lda, double* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A,
int lda, complex<float>* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A,
int lda, complex<double>* B, int ldb );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n Previous: \ref matrix_serialization Next: \ref lapack_functions \n
*/
//*************************************************************************************************
//**LAPACK Functions*******************************************************************************
/*!\page lapack_functions LAPACK Functions
//
// \tableofcontents
//
//
// The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks
// (including the decomposition, inversion and the computation of the determinant of dense matrices).
// For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required
// LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper
// functions. For more details on the individual LAPACK functions see the \b Blaze function
// documentation or the LAPACK online documentation browser:
//
// http://www.netlib.org/lapack/explore-html/
//
// \note All functions only work for general, non-adapted matrices with \c float, \c double,
// \c complex<float>, or \c complex<double> element type. The attempt to call the function with
// adaptors or matrices of any other element type results in a compile time error!
//
// \note All functions can only be used if the fitting LAPACK library is available and linked to
// the final executable. Otherwise a call to this function will result in a linker error.
//
// \note For performance reasons all functions do only provide the basic exception safety guarantee,
// i.e. in case an exception is thrown the given matrix may already have been modified.
//
//
// \n \section lapack_decomposition Matrix Decomposition
// <hr>
//
// The following functions decompose/factorize the given dense matrix. Based on this decomposition
// the matrix can be inverted or used to solve a linear system of equations.
//
//
// \n \subsection lapack_lu_decomposition LU Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(),
// \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix:
\code
namespace blaze {
void getrf( int m, int n, float* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, double* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info );
template< typename MT, bool SO >
void getrf( DenseMatrix<MT,SO>& A, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = P \cdot L \cdot U, \f]\n
// where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper
// triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major
// matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit
// diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is
// transposed.
//
// \note The LU decomposition will never fail, even for singular matrices. However, in case of a
// singular matrix the resulting decomposition cannot be used for a matrix inversion or solving
// a linear system of equations.
//
//
// \n \subsection lapack_ldlt_decomposition LDLT Decomposition
//
// The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(),
// \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given
// symmetric indefinite matrix:
\code
namespace blaze {
void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info );
void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info );
void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info );
void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or }
A = L D L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_ldlh_decomposition LDLH Decomposition
//
// The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(),
// which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix:
\code
namespace blaze {
void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info );
void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or }
A = L D L^{H} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_llh_decomposition Cholesky Decomposition
//
// The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(),
// \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given
// positive definite matrix:
\code
namespace blaze {
void potrf( char uplo, int n, float* A, int lda, int* info );
void potrf( char uplo, int n, double* A, int lda, int* info );
void potrf( char uplo, int n, complex<float>* A, int lda, int* info );
void potrf( char uplo, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void potrf( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U^{T} U \texttt{ (if uplo = 'U'), or }
A = L L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky
// decomposition fails if the given matrix \a A is not a positive definite matrix. In this case
// a \a std::std::invalid_argument exception is thrown.
//
//
// \n \subsection lapack_qr_decomposition QR Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(),
// \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix:
\code
namespace blaze {
void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot R, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the
// min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n);
// the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as
// a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(),
// \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition:
\code
namespace blaze {
void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_rq_decomposition RQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(),
// \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix:
\code
namespace blaze {
void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = R \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and
// <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>,
// and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray
// <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case
// \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n
// upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau
// represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(),
// \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition:
\code
namespace blaze {
void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_ql_decomposition QL Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(),
// \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix:
\code
namespace blaze {
void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot L, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and
// <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>,
// and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray
// A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n,
// the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower
// trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent
// the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(),
// \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition:
\code
namespace blaze {
void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_lq_decomposition LQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(),
// \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix:
\code
namespace blaze {
void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = L \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the
// \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n);
// the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q
// as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(),
// \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition:
\code
namespace blaze {
void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// \n \section lapack_inversion Matrix Inversion
// <hr>
//
// Given a matrix that has already been decomposed, the following functions can be used to invert
// the matrix in-place.
//
//
// \n \subsection lapack_lu_inversion LU-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(),
// \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by
// an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info );
void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info );
void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info );
void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void getri( DenseMatrix<MT,SO>& A, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_inversion LDLT-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(),
// \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been
// decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info );
void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info );
void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info );
void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info );
template< typename MT, bool SO >
void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_inversion LDLH-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c chetri() and
// \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by
// an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info );
void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info );
template< typename MT, bool SO >
void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_inversion Cholesky-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(),
// \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been
// decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potri( char uplo, int n, float* A, int lda, int* info );
void potri( char uplo, int n, double* A, int lda, int* info );
void potri( char uplo, int n, complex<float>* A, int lda, int* info );
void potri( char uplo, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void potri( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(),
// \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place:
\code
namespace blaze {
void trtri( char uplo, char diag, int n, float* A, int lda, int* info );
void trtri( char uplo, char diag, int n, double* A, int lda, int* info );
void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info );
void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_substitution Substitution
// <hr>
//
// Given a matrix that has already been decomposed the following functions can be used to perform
// the forward/backward substitution step to compute the solution to a system of linear equations.
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \n \subsection lapack_lu_substitution LU-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(),
// \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has
// already been decomposed by an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info );
void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info );
void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The function fails if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_substitution LDLT-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(),
// \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite
// matrix that has already been decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The function fails if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_substitution LDLH-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(),
// which perform the substitution step for an Hermitian indefinite matrix that has already been
// decomposed by an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The function fails if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_substitution Cholesky-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(),
// \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix
// that has already been decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The function fails if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(),
// \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix:
\code
namespace blaze {
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The function fails if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_linear_system_solver Linear System Solver
// <hr>
//
// The following functions represent compound functions that perform both the decomposition step
// as well as the substitution step to compute the solution to a system of linear equations. Note
// that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \subsection lapack_lu_linear_system_solver LU-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(),
// \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according
// \ref lapack_lu_substitution :
\code
namespace blaze {
void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info );
void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info );
void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info );
void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_lu_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(),
// \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according
// \ref lapack_ldlt_substitution :
\code
namespace blaze {
void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlt_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(),
// \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according
// \ref lapack_ldlh_substitution :
\code
namespace blaze {
void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info );
void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first two functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(),
// \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according
// \ref lapack_llh_substitution :
\code
namespace blaze {
void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_llh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(),
// \c ctrsv(), and \c ztrsv():
\code
namespace blaze {
void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX );
template< typename MT, bool SO, typename VT, bool TF >
void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N'.
//
// The last function throws a \a std::invalid_argument exception in case of an error. Note that
// none of the functions does perform any test for singularity or near-singularity. Such tests
// must be performed prior to calling this function!
//
//
// \n Previous: \ref blas_functions Next: \ref configuration_files \n
*/
//*************************************************************************************************
//**Configuration Files****************************************************************************
/*!\page configuration_files Configuration Files
//
// \tableofcontents
//
//
// Sometimes it might necessary to adapt \b Blaze to specific requirements. For this purpose
// \b Blaze provides several configuration files in the <em>./blaze/config/</em> subdirectory,
// which provide ample opportunity to customize internal settings, behavior, and thresholds.
// This chapter explains the most important of these configuration files.
//
//
// \n \section transpose_flag Default Vector Storage
// <hr>
//
// The \b Blaze default is that all vectors are created as column vectors (if not specified
// explicitly):
\code
blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector
\endcode
// The header file <em>./blaze/config/TransposeFlag.h</em> allows the configuration of the default
// vector storage (i.e. the default transpose flag of the vectors). Via the \c defaultTransposeFlag
// value the default transpose flag for all vector of the \b Blaze library can be specified:
\code
const bool defaultTransposeFlag = columnVector;
\endcode
// Valid settings for the \c defaultTransposeFlag are blaze::rowVector and blaze::columnVector.
//
//
// \n \section storage_order Default Matrix Storage
// <hr>
//
// Matrices are by default created as row-major matrices:
\code
blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix
\endcode
// The header file <em>./blaze/config/StorageOrder.h</em> allows the configuration of the default
// matrix storage order. Via the \c defaultStorageOrder value the default storage order for all
// matrices of the \b Blaze library can be specified.
\code
const bool defaultStorageOrder = rowMajor;
\endcode
// Valid settings for the \c defaultStorageOrder are blaze::rowMajor and blaze::columnMajor.
//
//
// \n \section vectorization Vectorization
//
// In order to achieve maximum performance and to exploit the compute power of a target platform
// the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or
// MIC intrinsics, depending on which instruction set is available. However, it is possible to
// disable the vectorization entirely by the compile time switch in the configuration file
// <em>./blaze/config/Vectorization.h</em>:
\code
#define BLAZE_USE_VECTORIZATION 1
\endcode
// In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed
// to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is
// disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for
// the operations. Note that deactivating the vectorization may pose a severe performance
// limitation for a large number of operations!
//
//
// \n \section thresholds Thresholds
//
// \b Blaze provides several thresholds that can be adapted to the characteristics of the target
// platform. For instance, the \c DMATDVECMULT_THRESHOLD specifies the threshold between the
// application of the custom \b Blaze kernels for small dense matrix/dense vector multiplications
// and the BLAS kernels for large multiplications. All thresholds, including the thresholds for
// the OpenMP-based parallelization, are contained within the configuration file
// <em>./blaze/config/Thresholds.h</em>.
//
//
// \n \section padding Padding
//
// By default the \b Blaze library uses padding for all dense vectors and matrices in order to
// achieve maximum performance in all operations. Due to padding, the proper alignment of data
// elements can be guaranteed and the need for remainder loops is minimized. However, on the
// downside padding introduces an additional memory overhead, which can be large depending on
// the used data type.
//
// The configuration file <em>./blaze/config/Optimizations.h</em> provides a compile time switch
// that can be used to (de-)activate padding:
\code
const bool usePadding = true;
\endcode
// If \c usePadding is set to \c true padding is enabled for all dense vectors and matrices, if
// it is set to \c false padding is disabled. Note however that disabling padding can considerably
// reduce the performance of all dense vector and matrix operations!
//
//
// \n \section streaming Streaming (Non-Temporal Stores)
//
// For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide
// a significant performance advantage of about 20%. However, this advantage is only in effect in
// case the memory bandwidth of the target architecture is maxed out. If the target architecture's
// memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance
// instead of increasing it.
//
// The configuration file <em>./blaze/config/Optimizations.h</em> provides a compile time switch
// that can be used to (de-)activate streaming:
\code
const bool useStreaming = true;
\endcode
// If \c useStreaming is set to \c true streaming is enabled, if it is set to \c false streaming
// is disabled. It is recommended to consult the target architecture's white papers to decide
// whether streaming is beneficial or hurtful for performance.
//
//
// \n Previous: \ref lapack_functions Next: \ref custom_data_types \n
*/
//*************************************************************************************************
//**Custom Data Types******************************************************************************
/*!\page custom_data_types Custom Data Types
//
//
// The \b Blaze library tries hard to make the use of custom data types as convenient, easy and
// intuitive as possible. However, unfortunately it is not possible to meet the requirements of
// all possible data types. Thus it might be necessary to provide \b Blaze with some additional
// information about the data type. The following sections give an overview of the necessary steps
// to enable the use of the hypothetical custom data type \c custom::double_t for vector and
// matrix operations. For example:
\code
blaze::DynamicVector<custom::double_t> a, b, c;
// ... Resizing and initialization
c = a + b;
\endcode
// The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+()
// for additions, \c operator-() for subtractions, \c operator*() for multiplications and
// \c operator/() for divisions. If any of these functions is missing it is necessary to implement
// the operator to perform the according operation. For this example we assume that the custom
// data type provides the four following functions instead of operators:
\code
namespace custom {
double_t add ( const double_t& a, const double_t b );
double_t sub ( const double_t& a, const double_t b );
double_t mult( const double_t& a, const double_t b );
double_t div ( const double_t& a, const double_t b );
} // namespace custom
\endcode
// The following implementations will satisfy the requirements of the \b Blaze library:
\code
inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b )
{
return add( a, b );
}
inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b )
{
return sub( a, b );
}
inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b )
{
return mult( a, b );
}
inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b )
{
return div( a, b );
}
\endcode
// \b Blaze will use all the information provided with these functions (for instance the return
// type) to properly handle the operations. In the rare case that the return type cannot be
// automatically determined from the operator it might be additionally necessary to provide a
// specialization of the following four \b Blaze class templates:
\code
namespace blaze {
template<>
struct AddTrait<custom::double_t,custom::double_t> {
typedef custom::double_t Type;
};
template<>
struct SubTrait<custom::double_t,custom::double_t> {
typedef custom::double_t Type;
};
template<>
struct MultTrait<custom::double_t,custom::double_t> {
typedef custom::double_t Type;
};
template<>
struct DivTrait<custom::double_t,custom::double_t> {
typedef custom::double_t Type;
};
} // namespace blaze
\endcode
// The same steps are necessary if several custom data types need to be combined (as for instance
// \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to
// be taken into account:
\code
custom::double_t operator+( const custom::double_t& a, const custom::float_t& b );
custom::double_t operator+( const custom::float_t& a, const custom::double_t& b );
// ...
\endcode
// Please note that only built-in data types apply for vectorization and thus custom data types
// cannot achieve maximum performance!
//
//
// \n Previous: \ref configuration_files Next: \ref error_reporting_customization \n
*/
//*************************************************************************************************
//**Customization of the Error Reporting Mechanism*************************************************
/*!\page error_reporting_customization Customization of the Error Reporting Mechanism
//
// \tableofcontents
//
//
// \n \section error_reporting_background Background
//
// The default way of \b Blaze to report errors of any kind is to throw a standard exception.
// However, although in general this approach works well, in certain environments and under
// special circumstances exceptions may not be the mechanism of choice and a different error
// reporting mechanism may be desirable. For this reason, \b Blaze provides several macros,
// which enable the customization of the error reporting mechanism. Via these macros it is
// possible to replace the standard exceptions by some other exception type or a completely
// different approach to report errors.
//
//
// \n \section error_reporting_general_customization Customization of the Reporting Mechanism
//
// In some cases it might be necessary to adapt the entire error reporting mechanism and to
// replace it by some other means to signal failure. The primary macro for this purpose is the
// \c BLAZE_THROW macro:
\code
#define BLAZE_THROW( EXCEPTION ) \
throw EXCEPTION
\endcode
// This macro represents the default mechanism of the \b Blaze library to report errors of any
// kind. In order to customize the error reporing mechanism all that needs to be done is to
// define the macro prior to including any \b Blaze header file. This will cause the \b Blaze
// specific mechanism to be overridden. The following example demonstrates this by replacing
// exceptions by a call to a \c log() function and a direct call to abort:
\code
#define BLAZE_THROW( EXCEPTION ) \
log( "..." ); \
abort()
#include <blaze/Blaze.h>
\endcode
// Doing this will trigger a call to \c log() and an abort instead of throwing an exception
// whenever an error (such as an invalid argument) is detected.
//
// \note It is possible to execute several statements instead of executing a single statement to
// throw an exception. Also note that it is recommended to define the macro such that a subsequent
// semicolon is required!
//
// \warning This macro is provided with the intention to assist in adapting \b Blaze to special
// conditions and environments. However, the customization of the error reporting mechanism via
// this macro can have a significant effect on the library. Thus be advised to use the macro
// with due care!
//
//
// \n \section error_reporting_exception_customization Customization of the Type of Exceptions
//
// In addition to the customization of the entire error reporting mechanism it is also possible
// to customize the type of exceptions being thrown. This can be achieved by customizing any
// number of the following macros:
\code
#define BLAZE_THROW_BAD_ALLOC \
BLAZE_THROW( std::bad_alloc() )
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( std::invalid_argument( MESSAGE ) )
#define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \
BLAZE_THROW( std::length_error( MESSAGE ) )
#define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \
BLAZE_THROW( std::logic_error( MESSAGE ) )
#define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \
BLAZE_THROW( std::runtime_error( MESSAGE ) )
\endcode
// In order to customize the type of exception the according macro has to be defined prior to
// including any \b Blaze header file. This will override the \b Blaze default behavior. The
// following example demonstrates this by replacing \c std::invalid_argument by a custom
// exception type:
\code
class InvalidArgument
{
public:
InvalidArgument();
explicit InvalidArgument( const std::string& message );
// ...
};
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( InvalidArgument( MESSAGE ) )
#include <blaze/Blaze.h>
\endcode
// By manually defining the macro, an \c InvalidArgument exception is thrown instead of a
// \c std::invalid_argument exception. Note that it is recommended to define the macro such
// that a subsequent semicolon is required!
//
// \warning These macros are provided with the intention to assist in adapting \b Blaze to
// special conditions and environments. However, the customization of the type of an exception
// via this macro may have an effect on the library. Thus be advised to use the macro with due
// care!
//
//
// \n Previous: \ref custom_data_types Next: \ref intra_statement_optimization \n
*/
//*************************************************************************************************
//**Intra-Statement Optimization*******************************************************************
/*!\page intra_statement_optimization Intra-Statement Optimization
//
// One of the prime features of the \b Blaze library is the automatic intra-statement optimization.
// In order to optimize the overall performance of every single statement \b Blaze attempts to
// rearrange the operands based on their types. For instance, the following addition of dense and
// sparse vectors
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1 + d2;
\endcode
// is automatically rearranged and evaluated as
\code
// ...
d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged
\endcode
// This order of operands is highly favorable for the overall performance since the addition of
// the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized
// fashion.
//
// This intra-statement optimization can have a tremendous effect on the performance of a statement.
// Consider for instance the following computation:
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = A * B * x;
\endcode
// Since multiplications are evaluated from left to right, this statement would result in a
// matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the
// right subexpression is evaluated first, the performance can be dramatically improved since the
// matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication.
// The \b Blaze library exploits this by automatically restructuring the expression such that the
// right multiplication is evaluated first:
\code
// ...
y = A * ( B * x );
\endcode
// Note however that although this intra-statement optimization may result in a measurable or
// even significant performance improvement, this behavior may be undesirable for several reasons,
// for instance because of numerical stability. Therefore, in case the order of evaluation matters,
// the best solution is to be explicit and to separate a statement into several statements:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ...
d3 += d2; // ... and afterwards add the second dense vector
\endcode
\code
// ...
blaze::DynamicMatrix<double> A, B, C;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
C = A * B; // Compute the left-hand side matrix-matrix multiplication first ...
y = C * x; // ... before the right-hand side matrix-vector multiplication
\endcode
// Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + eval( s1 + d2 );
\endcode
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = eval( A * B ) * x;
\endcode
// \n Previous: \ref error_reporting_customization
*/
//*************************************************************************************************
#endif
|
parallel_master_taskloop_simd_misc_messages.c
|
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 -triple x86_64-unknown-unknown %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp parallel master taskloop simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd foo
void test_no_clause() {
int i;
#pragma omp parallel master taskloop simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel master taskloop simd' must be a for loop}}
#pragma omp parallel master taskloop simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp parallel master taskloop simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i, a;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel master taskloop simd foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp parallel master taskloop simd' cannot contain more than one 'nogroup' clause}}
#pragma omp parallel master taskloop simd nogroup nogroup
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{unexpected OpenMP clause 'in_reduction' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd in_reduction(+:a)
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel master taskloop simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel
#pragma omp parallel master taskloop simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel master taskloop simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel master taskloop simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp parallel master taskloop simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel master taskloop simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
#pragma omp parallel master taskloop simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel master taskloop simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel master taskloop simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel master taskloop simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel master taskloop simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp parallel master taskloop simd
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel master taskloop simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel master taskloop simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel master taskloop simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp parallel master taskloop simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp parallel master taskloop simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp parallel master taskloop simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
}
|
GB_unop__asinh_fp64_fp64.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asinh_fp64_fp64)
// op(A') function: GB (_unop_tran__asinh_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = asinh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = asinh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = asinh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASINH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asinh_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = asinh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = asinh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asinh_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
r_direct_o1.c
|
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <[email protected]>
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <complex.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "optimizer.h"
#include "nr_direct.h"
#include "time_rev.h"
int GTOmax_shell_dim(const int *ao_loc, const int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
#define DECLARE_ALL \
const int *atm = envs->atm; \
const int *bas = envs->bas; \
const double *env = envs->env; \
const int natm = envs->natm; \
const int nbas = envs->nbas; \
const int *ao_loc = envs->ao_loc; \
const int *shls_slice = envs->shls_slice; \
const int *tao = envs->tao; \
const CINTOpt *cintopt = envs->cintopt; \
const int nao = ao_loc[nbas]; \
const int di = ao_loc[ish+1] - ao_loc[ish]; \
const int dj = ao_loc[jsh+1] - ao_loc[jsh]; \
const int dim = GTOmax_shell_dim(ao_loc, shls_slice+4, 2); \
double *cache = (double *)(buf + di * dj * dim * dim * ncomp); \
int (*fprescreen)(); \
int (*r_vkscreen)(); \
if (vhfopt) { \
fprescreen = vhfopt->fprescreen; \
r_vkscreen = vhfopt->r_vkscreen; \
} else { \
fprescreen = CVHFnoscreen; \
r_vkscreen = CVHFr_vknoscreen; \
}
static void transpose01324(double complex * __restrict__ a,
double complex * __restrict__ at,
int di, int dj, int dk, int dl, int ncomp)
{
int i, j, k, l, m, ic;
int dij = di * dj;
int dijk = dij * dk;
double complex *pa;
m = 0;
for (ic = 0; ic < ncomp; ic++) {
for (l = 0; l < dl; l++) {
for (j = 0; j < dj; j++) {
pa = a + j*di;
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
at[m] = pa[i];
m++;
}
pa += dij;
}
}
a += dijk;
}
}
}
/*
* for given ksh, lsh, loop all ish, jsh
*/
void CVHFdot_rs1(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
DECLARE_ALL;
const size_t nao2 = nao * nao;
int idm, ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *pv;
double *dms_cond[n_dm+1];
double dm_atleast;
void (*pf)();
// to make fjk compatible to C-contiguous dm array, put ksh, lsh inner loop
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh < nbas; ksh++) {
for (lsh = 0; lsh < nbas; lsh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
// append buf.transpose(0,2,1,3) to eris, to reduce the cost of r_direct_dot
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
dijkl = di * dj * dk * dl;
if ((*r_vkscreen)(shls, vhfopt, dms_cond, n_dm,
&dm_atleast, atm, bas, env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
}
} }
}
/*
* for given ish, jsh, loop all ksh > lsh
*/
static void dot_rs2sub(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh, int ksh_count,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
DECLARE_ALL;
const size_t nao2 = nao * nao;
int idm, ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *pv;
double *dms_cond[n_dm+1];
double dm_atleast;
void (*pf)();
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh < ksh_count; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
dijkl = di * dj * dk * dl;
if ((*r_vkscreen)(shls, vhfopt, dms_cond, n_dm,
&dm_atleast, atm, bas, env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
}
} }
}
void CVHFdot_rs2ij(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ish >= jsh) {
CVHFdot_rs1(intor, fjk, dms, vjk, buf, n_dm, ncomp,
ish, jsh, vhfopt, envs);
}
}
void CVHFdot_rs2kl(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
dot_rs2sub(intor, fjk, dms, vjk, buf, n_dm, ncomp,
ish, jsh, envs->nbas, vhfopt, envs);
}
void CVHFdot_rs4(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ish >= jsh) {
dot_rs2sub(intor, fjk, dms, vjk, buf, n_dm, ncomp,
ish, jsh, envs->nbas, vhfopt, envs);
}
}
void CVHFdot_rs8(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ish < jsh) {
return;
}
DECLARE_ALL;
const size_t nao2 = nao * nao;
int idm, ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *pv;
double *dms_cond[n_dm+1];
double dm_atleast;
void (*pf)();
// to make fjk compatible to C-contiguous dm array, put ksh, lsh inner loop
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh <= ish; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
/* when ksh==ish, (lsh<jsh) misses some integrals (eg k<i&&l>j).
* These integrals are calculated in the next (ish,jsh) pair. To show
* that, we just need to prove that every elements in shell^4 appeared
* only once in fjk_s8. */
if ((ksh == ish) && (lsh > jsh)) {
break;
}
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
dijkl = di * dj * dk * dl;
if ((*r_vkscreen)(shls, vhfopt, dms_cond, n_dm,
&dm_atleast, atm, bas, env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
}
} }
}
/*
* drv loop over ij, generate eris of kl for given ij, call fjk to
* calculate vj, vk.
*
* n_dm is the number of dms for one [array(ij|kl)],
* ncomp is the number of components that produced by intor
*/
void CVHFr_direct_drv(int (*intor)(), void (*fdot)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, int *shls_slice, int *ao_loc,
CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const size_t nao = ao_loc[nbas];
int *tao = malloc(sizeof(int)*nao);
CVHFtimerev_map(tao, bas, nbas);
IntorEnvs envs = {natm, nbas, atm, bas, env, shls_slice, ao_loc, tao,
cintopt, ncomp};
memset(vjk, 0, sizeof(double complex)*nao*nao*n_dm*ncomp);
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 4);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 4,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, fdot, fjk, \
dms, vjk, n_dm, ncomp, nbas, cintopt, vhfopt, envs)
{
int i, j, ij;
double complex *v_priv = malloc(sizeof(double complex)*nao*nao*n_dm*ncomp);
memset(v_priv, 0, sizeof(double complex)*nao*nao*n_dm*ncomp);
int bufsize = di*di*di*di*ncomp;
bufsize = bufsize + MAX(bufsize, cache_size/2);
double complex *buf = malloc(sizeof(double complex) * bufsize);
#pragma omp for nowait schedule(dynamic)
for (ij = 0; ij < nbas*nbas; ij++) {
i = ij / nbas;
j = ij - i * nbas;
(*fdot)(intor, fjk, dms, v_priv, buf, n_dm, ncomp, i, j,
vhfopt, &envs);
}
#pragma omp critical
{
for (i = 0; i < nao*nao*n_dm*ncomp; i++) {
vjk[i] += v_priv[i];
}
}
free(v_priv);
free(buf);
}
free(tao);
}
|
master.c
|
#include<stdio.h>
#include<omp.h>
int main(){
int id;
#pragma omp parallel
{
#pragma omp master
{
id = omp_get_thread_num();
printf("Master block thread %d.\n", id);
}
id = omp_get_thread_num();
printf("Parallel block thread %d.\n", id);
}
}
|
cython_dL_update_omp.c
|
/* cython_dL_update_hmc.c
*
* Rutger van Haasteren, December 12 2015, Pasadena
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/* The aggregated algorithm for use in the Hamiltonian Sampler */
void dL_update_hmc2(const double *pdL, const double *pdLi, const double *pdp,
double *pdM, double *pdtj, const int N) {
/*
Formal derivative of rank-one update of Cholesky decomposition,
adjusted to perform all rank-one updates at once for the derivative
L'L'^{T} = LL^{T} + diag(B)
dL' = L Phi(L^{-1} dB L^{-T}) With Phi the utril function
B = B(x)
We need: dot(d_L_d_x, p), and trace(L^{-1} d_L_d_x)
Assuming we know dB/dx, we can get d_L_d_x from the chain-rule, using
d_L_d_B. The output of this function lets us do that:
dot(d_L_d_x, p) = dot(M, d_B_d_x)
trace(L^{-1} d_L_d_x) = dot(tj, d_B_d_x)
Re-parameterized: also works in the limit where a->0
:param pdL: Current updated Cholesky decomposition (L-prime)
:param pdLi: Inverse of Cholesky decomposition (L^{-1})
:param pdp: Vector we'll need to multiply dL with
:param pdM: The return matrix M (output)
:param pdtj: The return vector tj (output)
:param N: Size of all the objects
*/
double *pdLdot, *pdU, *pdLtrans;
double r, drdot, dcdot, ds, temp;
int i, j, k, index;
//const int maxthreads = omp_get_max_threads();
/* Allocate memory for dL transpose */
pdLtrans = malloc(N*N*sizeof(double));
/* Set the input matrices to zero (is quick), and transpose L */
for(i=0; i<N; ++i) {
for(j=0; j<N; ++j) {
pdM[j+N*i] = 0.0;
pdLtrans[j+N*i] = pdL[i+N*j];
} /* for j */
pdtj[i] = 0.0;
} /* for i */
#pragma omp parallel private(i, j, k, index, pdLdot, pdU, r, drdot, dcdot, ds, temp) shared(pdL, pdLtrans, pdLi, pdp, pdM, pdtj, N) default(none)
{
//const int nthreads = omp_get_num_threads();
//const int ithread = omp_get_thread_num();
double *pdMlocal, dtjlocal;
pdMlocal = calloc(N, sizeof(double));
//printf("In thread %i of %i\n", ithread, nthreads);
/* The index i represents the basis vector we are working with */
#pragma omp for nowait // schedule(dynamic)
for(i=0; i<N; ++i) {
/* Allocate memory inside the parallel region */
pdLdot = calloc(N, sizeof(double)); /* columns of Ldot are stored only */
pdU = calloc(N, sizeof(double)); /* basis vector we are updating */
/* Initialize all our quantities */
pdU[i] = 1.0;
temp = 0.0;
dtjlocal = 0.0;
/* The index k represents the row of Ldot we are working with */
for(k=0; k<N; ++k) {
r = pdL[k+N*k];
/* Initialize the vector quantities */
drdot = 0.5*pdU[k]*pdU[k] / r;
dcdot = drdot/pdL[k+N*k];
ds = pdU[k] / pdL[k+N*k];
/* Clear Ldot data */
if(k > 0) {
pdLdot[k-1] = 0.0;
} /* if k */
pdLdot[k] = drdot;
/* Update Ldot */
/* The index j represents the column of Ldot we are working with */
for(j=k+1; j<N; ++j) {
/* Using the transpose of pdL is faster */
//pdLdot[j] = ds*pdU[j] - dcdot * pdL[k+N*j];
pdLdot[j] = ds*pdU[j] - dcdot * pdLtrans[j+N*k];
} /* for j */
/* Update U */
for(j=k+1; j<N; ++j) {
/* Using the transpose of pdL is faster */
//pdU[j] = pdU[j] - ds*pdL[k+N*j];
pdU[j] = pdU[j] - ds*pdLtrans[j+N*k];
} /* for j */
/* Update M */
temp = 0;
for(j=k; j<N; ++j) {
temp += pdLdot[j]*pdp[j];
} /* for j */
//pdM[i+N*k] += temp;
pdMlocal[k] = temp;
/* Update tj */
temp = 0;
for(j=0; j<N; ++j) {
temp += pdLi[j+N*k]*pdLdot[j];
} /* for j */
//pdtj[i] += temp;
dtjlocal += temp;
} /* for k */
/* How do I update pdM and pdtj FAST????? */
/* Depends on the compiler flags!! */
#pragma omp critical
{
for(k=0; k<N; ++k) {
index = i+N*k;
/* Doing this is FAST */
/* pdM[index] = 1.337; */
/* But instead this, is SLOW */
pdM[index] = pdMlocal[k];
//pdM[index] = 1.337;
} /* for k */
/* Doing this is FAST */
/* pdtj[i] += 1.445; */
/* But instead this, is SLOW */
pdtj[i] += dtjlocal;
//pdtj[i] += 1.445;
}
/* Free memory of parallel regions */
free(pdLdot);
free(pdU);
} /* for i */
free(pdMlocal);
} /* pragma omp parallel */
free(pdLtrans);
return;
} /* dL_update_hmc */
|
gbdt.h
|
/*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include "score_updater.hpp"
namespace LightGBM {
using json11::Json;
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequency of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param start_iteration Start index of the iteration to predict
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_pred_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, max_iteration);
if (num_iteration > 0) {
num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration));
} else {
num_pred_in_one_row *= (max_iteration - start_iteration);
}
} else if (is_pred_contrib) {
num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_pred_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output) const override;
void PredictContribByMap(const std::unordered_map<int, double>& features,
std::vector<std::unordered_map<int, double>>* output) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Non-empty string if succeeded
*/
std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Calculate upper bound value
* \return upper bound value
*/
double GetUpperBoundValue() const override;
/*!
* \brief Calculate lower bound value
* \return lower bound value
*/
double GetLowerBoundValue() const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, num_iteration_for_pred_);
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration);
} else {
num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration;
}
start_iteration_for_pred_ = start_iteration;
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char* SubModelName() const override { return "tree"; }
bool IsLinear() const override { return linear_tree_; }
inline std::string ParserConfigStr() const override {return parser_config_str_;}
protected:
virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) {
if (objective_function != nullptr) {
return objective_function->IsConstantHessian();
} else {
return false;
}
}
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current iteration
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief Parser config file content */
std::string parser_config_str_ = "";
#ifdef USE_CUDA
/*! \brief First order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> hessians_;
#else
/*! \brief First order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_;
#endif
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Start iteration of used model */
int start_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
const int bagging_rand_block_ = 1024;
std::vector<Random> bagging_rands_;
ParallelPartitionRunner<data_size_t, false> bagging_runner_;
Json forced_splits_json_;
bool linear_tree_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
kt_sbucket.c
|
#include "kt_sbucket.h"
#include "yche/log.h"
void sbucket_update_edge(
support_bucket_t * const sbucket,
int64_t const edge_id,
int32_t const support,
int32_t const ktruss)
{
slist_s * const slist = sbucket->slist;
/* no-op if edge has already been deleted or updated */
if(support < 0 || support == slist[edge_id].support) {
return;
}
/* peel starting at (ktruss - 3) */
int32_t const min_sup = ktruss - 3;
ssize_t * shead = sbucket->list_head;
/*
* NOTE: The logic of selecting new/old support instead of what is actually
* given is that we are ultimately interested in the smallest bucket
* having ALL edges which need to be peeled. So bottom-occupied list actually
* contains all to-be-peeled edges.
*/
/* remove edge_id from current support-bucket */
int32_t const old_sup = gk_max(slist[edge_id].support, min_sup);
/* if edge_id is the head of the list */
if(shead[old_sup] == edge_id) {
shead[old_sup] = slist[edge_id].next_eid;
slist[slist[edge_id].next_eid].prev_eid = -1;
} else {
slist[slist[edge_id].prev_eid].next_eid = slist[edge_id].next_eid;
slist[slist[edge_id].next_eid].prev_eid = slist[edge_id].prev_eid;
}
/* now add edge_id to the head of the new list */
int32_t const new_sup = gk_max(support, min_sup);
slist[edge_id].support = support;
slist[edge_id].prev_eid = -1;
slist[edge_id].next_eid = shead[new_sup];
slist[shead[new_sup]].prev_eid = edge_id;
shead[new_sup] = edge_id;
}
int64_t sbucket_count_support_size(
support_bucket_t const * const sbucket,
int32_t const support)
{
if(support >= sbucket->nsupports) {
return 0;
}
/* traverse linked list to count edges */
int64_t nedges = 0;
ssize_t e_id = sbucket->list_head[support];
while(e_id != -1) {
++nedges;
e_id = sbucket->slist[e_id].next_eid;
}
return nedges;
}
int64_t sbucket_get_frontier(
support_bucket_t * const sbuckets,
int32_t const support,
int64_t * frontier)
{
int const nbuckets = omp_get_max_threads() * KT_BUCKETS_PER_THREAD;
int64_t bucket_sizes[1 + KT_MAX_THREADS * KT_BUCKETS_PER_THREAD];
#pragma omp parallel
{
/* first get size of each bucket */
#pragma omp for schedule(dynamic, 1)
for(int b=0; b < nbuckets; ++b) {
bucket_sizes[b] = sbucket_count_support_size(&(sbuckets[b]), support);
}
/* prefix sum to allow parallel writes */
#pragma omp single
{
int b;
MAKECSR(b, nbuckets, bucket_sizes);
}
/* now copy data into frontier buffer */
#pragma omp for schedule(dynamic, 1)
for(int b=0; b < nbuckets; ++b) {
/* traverse list and fill buffer */
int64_t * buffer = &(frontier[bucket_sizes[b]]);
int64_t edge_ptr = 0;
ssize_t e_id = sbuckets[b].list_head[support];
while(e_id != -1) {
buffer[edge_ptr++] = e_id;
e_id = sbuckets[b].slist[e_id].next_eid;
}
/* We are deleting all edges in bucket, so update head of list. */
sbuckets[b].list_head[support] = -1;
sbuckets[b].slist[-1].prev_eid = -1;
sbuckets[b].slist[-1].next_eid = -1;
} /* foreach bucket */
} /* end omp parallel */
return bucket_sizes[nbuckets];
}
void sbucket_fill_edges(
support_bucket_t const * const sbucket,
int32_t const support,
int64_t * const restrict edge_ids)
{
if(support >= sbucket->nsupports) {
return;
}
/* traverse linked list and fill buffer */
int64_t edge_ptr = 0;
ssize_t e_id = sbucket->list_head[support];
while(e_id != -1) {
edge_ids[edge_ptr++] = e_id;
e_id = sbucket->slist[e_id].next_eid;
}
}
support_bucket_t * sbucket_alloc(
edge_t const * const edges,
int32_t const * const supports,
int64_t const global_nedges,
thread_ws * * thd_ws)
{
/* allocate buckets */
int const nbuckets = omp_get_max_threads() * KT_BUCKETS_PER_THREAD;
support_bucket_t * sbuckets = gk_malloc(nbuckets * sizeof(*sbuckets),
"sbuckets");
int32_t const nsupports = max_elem(supports, global_nedges) + 1;
log_debug("nsupport: %d", nsupports);
/*
* It is easier to have a single global slist that the various buckets
* point into. This allows us to avoid any local <-> global mappings of
* edge IDs.
*/
slist_s * big_slist = gk_malloc((global_nedges+1)*sizeof(*big_slist),
"big_slist");
par_memset(big_slist, 0, (global_nedges+1) * sizeof(*big_slist));
++big_slist; /* +1 to allow slist[-1] to be valid */
/* allocate each thread-bucket */
#pragma omp parallel for schedule(static, 1)
for(int bucket=0; bucket < nbuckets; ++bucket) {
support_bucket_t * sbucket = &(sbuckets[bucket]);
sbucket->nsupports = nsupports;
sbucket->nowned_edges = 0;
sbucket->slist = big_slist;
sbucket->list_head =
gk_malloc(sbucket->nsupports * sizeof(*sbucket->list_head), "list_head");
ssize_t * const shead = sbucket->list_head;
for(int32_t s=0; s < sbucket->nsupports; ++s) {
shead[s] = -1;
}
}
/* go over all edges and assign to support-buckets */
for(int64_t e=0; e < global_nedges; ++e) {
int64_t const bucket = map_edge_to_bucket(e, thd_ws[0]);
support_bucket_t * sbucket = &(sbuckets[bucket]);
slist_s * slist = sbucket->slist;
ssize_t * const shead = sbucket->list_head;
int32_t const sup = supports[e];
/* fill data */
slist[e].prev_eid = -1;
slist[e].next_eid = shead[sup];
slist[e].support = sup;
/* update doubly-linked list */
if(shead[sup] != -1) {
slist[shead[sup]].prev_eid = e;
}
shead[sup] = e;
++sbucket->nowned_edges;
} /* foreach edge */
return sbuckets;
}
void sbucket_free(
support_bucket_t * sbucket)
{
--(sbucket->slist);
gk_free((void **) &(sbucket->slist), LTERM);
/* XXX this is all wrong and does not account for multi buckets... */
gk_free((void **) &sbucket->list_head, LTERM);
gk_free((void **) &sbucket, LTERM);
}
|
MergeSortOMP.c
|
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
FILE * fp;
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge(int *arr, int l, int m, int r)
{
int i, j, k;
int n1 = m - l + 1;
int n2 = r - m;
int *L = (int *)malloc(n1*sizeof(int));
int *R = (int *)malloc(n2*sizeof(int));
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++)
L[i] = arr[l + i];
for (j = 0; j < n2; j++)
R[j] = arr[m + 1+ j];
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i] <= R[j])
{
arr[k] = L[i];
i++;
}
else
{
arr[k] = R[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
j++;
k++;
}
free(R);
free(L);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort(int arr[], int l, int r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
int m = l+(r-l)/2;
// // Sort first and second halves
#pragma omp parallel sections num_threads(48)
{
#pragma omp section
{
mergeSort(arr,l,m); //call 1
}
#pragma omp section
{
mergeSort(arr,m+1,r); //call 2
}
}
merge(arr, l, m, r);
}
}
/* UTILITY FUNCTIONS */
/* Function to print an array */
void printArray(int A[], int size)
{
int i;
for (i=0; i < size; i++)
printf("%d ", A[i]);
printf("\n");
}
void benchMark(int max){
int *arr = (int *)malloc(max*sizeof(int));
for(int i = 0; i < max; i++){
arr[i] = rand()%1000;
}
// int arr_size = sizeof(arr)/sizeof(arr[0]);
// printf("Given array is \n");
// printArray(arr, max);
clock_t begin = clock();
mergeSort(arr, 0, max - 1);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
// printf("\nSorted array is \n");
// printArray(arr, max);
fprintf (fp, "%d , %f\n", max, time_spent);
// printf("Done with %d\n", max);
free(arr);
}
/* Driver program to test above functions */
int main(){
int data = 1000;
/* open the file for writing*/
fp = fopen ("MergeSortOMP48Threads.txt","w");
while(data <= 100000000){
benchMark(data);
data = data * 10;
}
/* close the file*/
fclose (fp);
// printf("Done!\n");
return 0;
}
|
3d7pt_var.c
|
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
backprop.ref.c
|
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
static unsigned long long current_time_ns() {
#ifdef __MACH__
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec;
return (unsigned long long)mts.tv_nsec + s;
#else
struct timespec t ={0,0};
clock_gettime(CLOCK_MONOTONIC, &t);
unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec;
return (((unsigned long long)t.tv_nsec)) + s;
#endif
}
/*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
#include <math.h>
#include <fcntl.h>
#include <unistd.h>
#define OPEN
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to,from,len)\
{\
register char *_to,*_from;\
register int _i,_l;\
_to = (char *)(to);\
_from = (char *)(from);\
_l = (len);\
for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\
}
/*** Return random number between 0.0 and 1.0 ***/
float drnd()
{
return ((float) rand() / (float) BIGRND);
}
/*** Return random number between -1.0 and 1.0 ***/
float dpn1()
{
return ((drnd() * 2.0) - 1.0);
}
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(float x)
{
float m;
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(int n)
{
float *new_alloc;
new_alloc = (float *) malloc ((unsigned) (n * sizeof (float)));
if (new_alloc == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return (new_alloc);
}
/*** Allocate 2d array of floats ***/
float **alloc_2d_dbl(int m, int n)
{
int i;
float **new_alloc;
new_alloc = (float **) malloc ((unsigned) (m * sizeof (float *)));
if (new_alloc == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
new_alloc[i] = alloc_1d_dbl(n);
}
return (new_alloc);
}
void bpnn_randomize_weights(float **w, int m, int n)
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = (float) rand()/RAND_MAX;
// w[i][j] = dpn1();
}
}
}
void bpnn_randomize_row(float *w, int m)
{
int i;
for (i = 0; i <= m; i++) {
//w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
void bpnn_zero_weights(float **w, int m, int n)
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = 0.0;
}
}
}
void bpnn_initialize(int seed)
{
printf("Random number generator seed: %d\n", seed);
srand(seed);
}
BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out)
{
BPNN *newnet;
newnet = (BPNN *) malloc (sizeof (BPNN));
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
void bpnn_free(BPNN *net)
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
free((char *) net->input_units);
free((char *) net->hidden_units);
free((char *) net->output_units);
free((char *) net->hidden_delta);
free((char *) net->output_delta);
free((char *) net->target);
for (i = 0; i <= n1; i++) {
free((char *) net->input_weights[i]);
free((char *) net->input_prev_weights[i]);
}
free((char *) net->input_weights);
free((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *) net->hidden_weights[i]);
free((char *) net->hidden_prev_weights[i]);
}
free((char *) net->hidden_weights);
free((char *) net->hidden_prev_weights);
free((char *) net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(int n_in, int n_hidden, int n_out)
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
#ifdef INITZERO
bpnn_zero_weights(newnet->input_weights, n_in, n_hidden);
#else
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
#endif
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2)
{
float sum;
int j, k;
/*** Set up thresholding unit ***/
l1[0] = 1.0;
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static)
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k][j] * l1[k];
}
l2[j] = squash(sum);
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma232_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
}
//extern "C"
void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err)
{
int j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
void bpnn_hidden_error(float *delta_h,
int nh,
float *delta_o,
int no,
float **who,
float *hidden,
float *err)
{
int j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j][k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw)
{
float new_dw;
int k, j;
ly[0] = 1.0;
//eta = 0.3;
//momentum = 0.3;
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(oldw, w, delta) private(j, k, new_dw) firstprivate(ndelta, nly)
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j]));
w[k][j] += new_dw;
oldw[k][j] = new_dw;
}
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma297_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
}
void bpnn_feedforward(BPNN *net)
{
int in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
}
void bpnn_train(BPNN *net, float *eo, float *eh)
{
const unsigned long long full_program_start = current_time_ns();
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units,
out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights);
} ;
const unsigned long long full_program_end = current_time_ns();
printf("full_program %llu ns\n", full_program_end - full_program_start);
}
void bpnn_save(BPNN *net, char *filename)
{
int n1, n2, n3, i, j, memcnt;
float dvalue, **w;
char *mem;
///add//
FILE *pFile;
pFile = fopen( filename, "w+" );
///////
/*
if ((fd = creat(filename, 0644)) == -1) {
printf("BPNN_SAVE: Cannot create '%s'\n", filename);
return;
}
*/
n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n;
printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename);
//fflush(stdout);
//write(fd, (char *) &n1, sizeof(int));
//write(fd, (char *) &n2, sizeof(int));
//write(fd, (char *) &n3, sizeof(int));
fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile);
memcnt = 0;
w = net->input_weights;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n1+1) * (n2+1) * sizeof(float));
fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile);
free(mem);
memcnt = 0;
w = net->hidden_weights;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n2+1) * (n3+1) * sizeof(float));
fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile);
free(mem);
fclose(pFile);
return;
}
BPNN *bpnn_read(char *filename)
{
char *mem;
BPNN *new_alloc;
int fd, n1, n2, n3, i, j, memcnt;
if ((fd = open(filename, 0, 0644)) == -1) {
return (NULL);
}
printf("Reading '%s'\n", filename); //fflush(stdout);
read(fd, (char *) &n1, sizeof(int));
read(fd, (char *) &n2, sizeof(int));
read(fd, (char *) &n3, sizeof(int));
new_alloc = bpnn_internal_create(n1, n2, n3);
printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3);
printf("Reading input weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
read(fd, mem, (n1+1) * (n2+1) * sizeof(float));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
fastcopy(&(new_alloc->input_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
printf("Done\nReading hidden weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
read(fd, mem, (n2+1) * (n3+1) * sizeof(float));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
fastcopy(&(new_alloc->hidden_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
close(fd);
printf("Done\n"); //fflush(stdout);
bpnn_zero_weights(new_alloc->input_prev_weights, n1, n2);
bpnn_zero_weights(new_alloc->hidden_prev_weights, n2, n3);
return (new_alloc);
}
|
SplineC2CAdoptor.h
|
//////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by:
//
// File created by: Jeongnim Kim, [email protected], Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
/** @file SplineC2CSoA.h
*
* Adoptor classes to handle complex-to-(real,complex) with arbitrary precision
*/
#ifndef QMCPLUSPLUS_EINSPLINE_C2C_SOA_ADOPTOR_H
#define QMCPLUSPLUS_EINSPLINE_C2C_SOA_ADOPTOR_H
#include <OhmmsSoA/Container.h>
#include <spline2/MultiBspline.hpp>
#include "QMCWaveFunctions/BsplineFactory/SplineAdoptorBase.h"
//#define USE_VECTOR_ML 1
namespace qmcplusplus
{
/** adoptor class to match std::complex<ST> spline with TT real SPOs
* @tparam ST precision of spline
* @tparam TT precision of SPOs
* @tparam D dimension
*
* Requires temporage storage and multiplication of phase vectors
*/
template<typename ST, typename TT>
struct SplineC2CSoA: public SplineAdoptorBase<ST,3>
{
static const int D=3;
using BaseType=SplineAdoptorBase<ST,3>;
using SplineType=typename bspline_traits<ST,3>::SplineType;
using BCType=typename bspline_traits<ST,3>::BCType;
using DataType=ST;
using PointType=typename BaseType::PointType;
using SingleSplineType=typename BaseType::SingleSplineType;
using vContainer_type=Vector<ST,aligned_allocator<ST> >;
using gContainer_type=VectorSoaContainer<ST,3>;
using hContainer_type=VectorSoaContainer<ST,6>;
using BaseType::first_spo;
using BaseType::last_spo;
using BaseType::GGt;
using BaseType::PrimLattice;
using BaseType::kPoints;
using BaseType::MakeTwoCopies;
using BaseType::offset_cplx;
using BaseType::offset_real;
///number of points of the original grid
int BaseN[3];
///offset of the original grid, always 0
int BaseOffset[3];
///multi bspline set
MultiBspline<ST>* SplineInst;
///expose the pointer to reuse the reader and only assigned with create_spline
SplineType* MultiSpline;
vContainer_type mKK;
VectorSoaContainer<ST,3> myKcart;
VectorSoaContainer<std::complex<ST>,3> myGL;
#if defined(USE_VECTOR_ML)
vContainer_type KdotR;
vContainer_type CosV;
vContainer_type SinV;
#endif
vContainer_type myV;
vContainer_type myL;
gContainer_type myG;
hContainer_type myH;
SplineC2CSoA(): BaseType(),SplineInst(nullptr), MultiSpline(nullptr)
{
this->is_complex=true;
this->is_soa_ready=true;
this->AdoptorName="SplineC2CSoAAdoptor";
this->KeyWord="SplineC2CSoA";
}
///** copy the base property */
//SplineC2CSoA(BaseType& rhs): BaseType(rhs)
//{
// this->is_complex=true;
// this->AdoptorName="SplineC2CSoA";
// this->KeyWord="C2RSoA";
//}
SplineC2CSoA(const SplineC2CSoA& a):
SplineAdoptorBase<ST,3>(a),SplineInst(a.SplineInst),MultiSpline(nullptr),
mKK(a.mKK), myKcart(a.myKcart)
{
const size_t n=a.myL.size();
myV.resize(n); myG.resize(n); myL.resize(n); myH.resize(n);
}
~SplineC2CSoA()
{
if(MultiSpline != nullptr) delete SplineInst;
}
inline void resizeStorage(size_t n, size_t nvals)
{
BaseType::init_base(n);
size_t npad=getAlignedSize<ST>(2*n);
myV.resize(npad);
myG.resize(npad);
myL.resize(npad);
myH.resize(npad);
#if defined(USE_VECTOR_ML)
KdotR.resize(n);
CosV.resize(n);
SinV.resize(n);
#endif
}
void bcast_tables(Communicate* comm)
{
chunked_bcast(comm, MultiSpline);
}
void gather_tables(Communicate* comm)
{
if(comm->size()==1) return;
const int Nbands = kPoints.size();
const int Nbandgroups = comm->size();
offset_cplx.resize(Nbandgroups+1,0);
FairDivideLow(Nbands,Nbandgroups,offset_cplx);
for(size_t ib=0; ib<offset_cplx.size(); ib++)
offset_cplx[ib]*=2;
gatherv(comm, MultiSpline, MultiSpline->z_stride, offset_cplx);
}
template<typename GT, typename BCT>
void create_spline(GT& xyz_g, BCT& xyz_bc)
{
resize_kpoints();
SplineInst=new MultiBspline<ST>();
SplineInst->create(xyz_g,xyz_bc,myV.size());
MultiSpline=SplineInst->spline_m;
for(size_t i=0; i<D; ++i)
{
BaseOffset[i]=0;
BaseN[i]=xyz_g[i].num+3;
}
qmc_common.memory_allocated += SplineInst->sizeInByte();
}
inline void flush_zero()
{
SplineInst->flush_zero();
}
/** remap kPoints to pack the double copy */
inline void resize_kpoints()
{
const size_t nk=kPoints.size();
mKK.resize(nk);
myKcart.resize(nk);
for(size_t i=0; i<nk; ++i)
{
mKK[i]=-dot(kPoints[i],kPoints[i]);
myKcart(i)=kPoints[i];
}
}
inline void set_spline(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, int level)
{
SplineInst->copy_spline(spline_r,2*ispline ,BaseOffset, BaseN);
SplineInst->copy_spline(spline_i,2*ispline+1,BaseOffset, BaseN);
}
void set_spline(ST* restrict psi_r, ST* restrict psi_i, int twist, int ispline, int level)
{
Vector<ST> v_r(psi_r,0), v_i(psi_i,0);
SplineInst->set(2*ispline ,v_r);
SplineInst->set(2*ispline+1,v_i);
}
inline void set_spline_domain(SingleSplineType* spline_r, SingleSplineType* spline_i,
int twist, int ispline, const int* offset_l, const int* mesh_l)
{
}
bool read_splines(hdf_archive& h5f)
{
std::ostringstream o;
o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex;
einspline_engine<SplineType> bigtable(SplineInst->spline_m);
return h5f.read(bigtable,o.str().c_str());//"spline_0");
}
bool write_splines(hdf_archive& h5f)
{
std::ostringstream o;
o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex;
einspline_engine<SplineType> bigtable(SplineInst->spline_m);
return h5f.write(bigtable,o.str().c_str());//"spline_0");
}
inline std::complex<TT>
evaluate_dot(const ParticleSet& P, const int iat, const std::complex<TT>* restrict arow, ST* scratch,
bool compute_spline=true)
{
Vector<ST> vtmp(scratch,myV.size());
const PointType& r=P.activeR(iat);
if(compute_spline)
{
PointType ru(PrimLattice.toUnit_floor(r));
SplineInst->evaluate(ru,vtmp);
}
const size_t N=kPoints.size();
const ST x=r[0], y=r[1], z=r[2];
const ST* restrict kx=myKcart.data(0);
const ST* restrict ky=myKcart.data(1);
const ST* restrict kz=myKcart.data(2);
const TT* restrict psi0=reinterpret_cast<const TT*>(arow+first_spo);
const ST* restrict psi1=vtmp.data();
TT sum_r=TT();
TT sum_i=TT();
#pragma omp simd reduction(+:sum_r,sum_i)
for (size_t j=0; j<N; ++j)
{
ST s, c;
sincos(-(x*kx[j]+y*ky[j]+z*kz[j]),&s,&c);
const size_t jr=j<<1;
const size_t ji=jr+1;
const ST val_r=psi1[jr];
const ST val_i=psi1[ji];
const ST psi_r=val_r*c-val_i*s;
const ST psi_i=val_i*c+val_r*s;
sum_r+=psi0[jr]*psi_r-psi0[ji]*psi_i;
sum_i+=psi0[ji]*psi_r+psi0[jr]*psi_i;
}
return std::complex<TT>(sum_r,sum_i);
}
template<typename VV>
inline void assign_v(const PointType& r, const vContainer_type& myV, VV& psi)
{
typedef std::complex<TT> ComplexT;
const size_t N=kPoints.size();
const ST x=r[0], y=r[1], z=r[2];
const ST* restrict kx=myKcart.data(0);
const ST* restrict ky=myKcart.data(1);
const ST* restrict kz=myKcart.data(2);
#if defined(USE_VECTOR_ML)
{
for(size_t j=0; j<N; ++j)
KdotR[j]=-(x*kx[j]+y*ky[j]+z*kz[j]);
eval_e2iphi(N,KdotR.data(),CosV.data(),SinV.data());
}
for (size_t j=0, psiIndex=first_spo; psiIndex<last_spo; j++,psiIndex++)
{
const ST val_r=myV[2*j ];
const ST val_i=myV[2*j+1];
psi[psiIndex]=ComplexT( val_r*CosV[j]-val_i*SinV[j], val_i*CosV[j]+val_r*SinV[j]);
}
#else
#pragma omp simd
for (size_t j=0; j<N; ++j)
{
ST s, c;
const ST val_r=myV[2*j ];
const ST val_i=myV[2*j+1];
sincos(-(x*kx[j]+y*ky[j]+z*kz[j]),&s,&c);
psi[j+first_spo] = ComplexT(val_r*c-val_i*s,val_i*c+val_r*s);
}
#endif
}
template<typename VV>
inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi)
{
const PointType& r=P.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
SplineInst->evaluate(ru,myV);
assign_v(r,myV,psi);
}
template<typename VM>
inline void evaluateValues(const VirtualParticleSet& VP, VM& psiM)
{
const size_t m=psiM.cols();
for(int iat=0; iat<VP.getTotalNum(); ++iat)
{
Vector<std::complex<TT> > psi(psiM[iat],m);
evaluate_v(VP,iat,psi);
}
}
inline size_t estimateMemory(const int nP) { return 0; }
/** assign_vgl
*/
template<typename VV, typename GV>
inline void assign_vgl(const PointType& r, VV& psi, GV& dpsi, VV& d2psi)
{
typedef std::complex<TT> ComplexT;
CONSTEXPR ST zero(0);
CONSTEXPR ST two(2);
const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2),
g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5),
g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8);
const ST x=r[0], y=r[1], z=r[2];
const ST symGG[6]={GGt[0],GGt[1]+GGt[3],GGt[2]+GGt[6],GGt[4],GGt[5]+GGt[7],GGt[8]};
const ST* restrict k0=myKcart.data(0);
const ST* restrict k1=myKcart.data(1);
const ST* restrict k2=myKcart.data(2);
const ST* restrict g0=myG.data(0);
const ST* restrict g1=myG.data(1);
const ST* restrict g2=myG.data(2);
const ST* restrict h00=myH.data(0);
const ST* restrict h01=myH.data(1);
const ST* restrict h02=myH.data(2);
const ST* restrict h11=myH.data(3);
const ST* restrict h12=myH.data(4);
const ST* restrict h22=myH.data(5);
const size_t N=kPoints.size();
#if defined(PRECOMPUTE_L)
for(size_t j=0,nsplines=myL.size(); j<nsplines; ++j)
{
myL[j]=SymTrace(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],symGG);
}
#endif
#pragma omp simd
for (size_t j=0; j<N; ++j)
{
const size_t jr=j<<1;
const size_t ji=jr+1;
const ST kX=k0[j];
const ST kY=k1[j];
const ST kZ=k2[j];
const ST val_r=myV[jr];
const ST val_i=myV[ji];
//phase
ST s, c;
sincos(-(x*kX+y*kY+z*kZ),&s,&c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr];
const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr];
const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr];
const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji];
const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji];
const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r=dX_r+val_i*kX;
const ST gY_r=dY_r+val_i*kY;
const ST gZ_r=dZ_r+val_i*kZ;
const ST gX_i=dX_i-val_r*kX;
const ST gY_i=dY_i-val_r*kY;
const ST gZ_i=dZ_i-val_r*kZ;
#if defined(PRECOMPUTE_L)
const ST lap_r=myL[jr]+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i);
const ST lap_i=myL[ji]+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r);
#else
const ST lcart_r=SymTrace(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],symGG);
const ST lcart_i=SymTrace(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],symGG);
const ST lap_r=lcart_r+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i);
const ST lap_i=lcart_i+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r);
#endif
const size_t psiIndex=j+first_spo;
psi[psiIndex ] = ComplexT(c*val_r-s*val_i,c*val_i+s*val_r);
dpsi[psiIndex][0]= ComplexT(c*gX_r -s*gX_i, c*gX_i +s*gX_r);
dpsi[psiIndex][1]= ComplexT(c*gY_r -s*gY_i, c*gY_i +s*gY_r);
dpsi[psiIndex][2]= ComplexT(c*gZ_r -s*gZ_i, c*gZ_i +s*gZ_r);
d2psi[psiIndex] = ComplexT(c*lap_r-s*lap_i,c*lap_i+s*lap_r);
}
}
/** assign_vgl_from_l can be used when myL is precomputed and myV,myG,myL in cartesian
*/
template<typename VV, typename GV>
inline void assign_vgl_from_l(const PointType& r, VV& psi, GV& dpsi, VV& d2psi)
{
typedef std::complex<TT> ComplexT;
CONSTEXPR ST two(2);
const ST x=r[0], y=r[1], z=r[2];
const ST* restrict k0=myKcart.data(0);
const ST* restrict k1=myKcart.data(1);
const ST* restrict k2=myKcart.data(2);
const ST* restrict g0=myG.data(0);
const ST* restrict g1=myG.data(1);
const ST* restrict g2=myG.data(2);
const size_t N=last_spo-first_spo;
#pragma omp simd
for (size_t j=0; j<N; ++j)
{
const size_t jr=j<<1;
const size_t ji=jr+1;
const ST kX=k0[j];
const ST kY=k1[j];
const ST kZ=k2[j];
const ST val_r=myV[jr];
const ST val_i=myV[ji];
//phase
ST s, c;
sincos(-(x*kX+y*kY+z*kZ),&s,&c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g0[jr];
const ST dY_r = g1[jr];
const ST dZ_r = g2[jr];
const ST dX_i = g0[ji];
const ST dY_i = g1[ji];
const ST dZ_i = g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r=dX_r+val_i*kX;
const ST gY_r=dY_r+val_i*kY;
const ST gZ_r=dZ_r+val_i*kZ;
const ST gX_i=dX_i-val_r*kX;
const ST gY_i=dY_i-val_r*kY;
const ST gZ_i=dZ_i-val_r*kZ;
const ST lap_r=myL[jr]+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i);
const ST lap_i=myL[ji]+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r);
const size_t psiIndex=j+first_spo;
psi[psiIndex ] = ComplexT(c*val_r-s*val_i,c*val_i+s*val_r);
dpsi[psiIndex][0]= ComplexT(c*gX_r -s*gX_i, c*gX_i +s*gX_r);
dpsi[psiIndex][1]= ComplexT(c*gY_r -s*gY_i, c*gY_i +s*gY_r);
dpsi[psiIndex][2]= ComplexT(c*gZ_r -s*gZ_i, c*gZ_i +s*gZ_r);
d2psi[psiIndex] = ComplexT(c*lap_r-s*lap_i,c*lap_i+s*lap_r);
}
}
template<typename VV, typename GV>
inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi)
{
const PointType& r=P.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
SplineInst->evaluate_vgh(ru,myV,myG,myH);
assign_vgl(r,psi,dpsi,d2psi);
}
/** identical to assign_vgl but the output container is SoA container
*/
template<typename VGL>
inline void assign_vgl_soa(const PointType& r, VGL& vgl)
{
typedef std::complex<TT> ComplexT;
CONSTEXPR ST zero(0);
CONSTEXPR ST two(2);
const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2),
g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5),
g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8);
const ST x=r[0], y=r[1], z=r[2];
const ST symGG[6]={GGt[0],GGt[1]+GGt[3],GGt[2]+GGt[6],GGt[4],GGt[5]+GGt[7],GGt[8]};
const ST* restrict k0=myKcart.data(0);
const ST* restrict k1=myKcart.data(1);
const ST* restrict k2=myKcart.data(2);
const ST* restrict g0=myG.data(0);
const ST* restrict g1=myG.data(1);
const ST* restrict g2=myG.data(2);
const ST* restrict h00=myH.data(0);
const ST* restrict h01=myH.data(1);
const ST* restrict h02=myH.data(2);
const ST* restrict h11=myH.data(3);
const ST* restrict h12=myH.data(4);
const ST* restrict h22=myH.data(5);
const size_t N=kPoints.size();
const size_t nsplines=myL.size();
#if defined(PRECOMPUTE_L)
for(size_t j=0; j<nsplines; ++j)
{
myL[j]=SymTrace(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],symGG);
}
#endif
ComplexT* restrict psi =vgl.data(0)+first_spo; ASSUME_ALIGNED(psi);
ComplexT* restrict vg_x=vgl.data(1)+first_spo; ASSUME_ALIGNED(vg_x);
ComplexT* restrict vg_y=vgl.data(2)+first_spo; ASSUME_ALIGNED(vg_y);
ComplexT* restrict vg_z=vgl.data(3)+first_spo; ASSUME_ALIGNED(vg_z);
ComplexT* restrict vl_l=vgl.data(4)+first_spo; ASSUME_ALIGNED(vl_l);
#pragma omp simd
for (size_t j=0; j<N; ++j)
{
const size_t jr=j<<1;
const size_t ji=jr+1;
const ST kX=k0[j];
const ST kY=k1[j];
const ST kZ=k2[j];
const ST val_r=myV[jr];
const ST val_i=myV[ji];
//phase
ST s, c;
sincos(-(x*kX+y*kY+z*kZ),&s,&c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr];
const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr];
const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr];
const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji];
const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji];
const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r=dX_r+val_i*kX;
const ST gY_r=dY_r+val_i*kY;
const ST gZ_r=dZ_r+val_i*kZ;
const ST gX_i=dX_i-val_r*kX;
const ST gY_i=dY_i-val_r*kY;
const ST gZ_i=dZ_i-val_r*kZ;
#if defined(PRECOMPUTE_L)
const ST lap_r=myL[jr]+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i);
const ST lap_i=myL[ji]+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r);
#else
const ST lcart_r=SymTrace(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],symGG);
const ST lcart_i=SymTrace(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],symGG);
const ST lap_r=lcart_r+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i);
const ST lap_i=lcart_i+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r);
#endif
psi[j] =ComplexT(c*val_r-s*val_i,c*val_i+s*val_r);
vg_x[j]=ComplexT(c*gX_r -s*gX_i, c*gX_i +s*gX_r);
vg_y[j]=ComplexT(c*gY_r -s*gY_i, c*gY_i +s*gY_r);
vg_z[j]=ComplexT(c*gZ_r -s*gZ_i, c*gZ_i +s*gZ_r);
vl_l[j]=ComplexT(c*lap_r-s*lap_i,c*lap_i+s*lap_r);
}
}
/** evaluate VGL using VectorSoaContainer
* @param r position
* @param psi value container
* @param dpsi gradient-laplacian container
*/
template<typename VGL>
inline void evaluate_vgl_combo(const ParticleSet& P, const int iat, VGL& vgl)
{
const PointType& r=P.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
SplineInst->evaluate_vgh(ru,myV,myG,myH);
assign_vgl_soa(r,vgl);
}
template<typename VV, typename GV, typename GGV>
void assign_vgh(const PointType& r, VV& psi, GV& dpsi, GGV& grad_grad_psi)
{
typedef std::complex<TT> ComplexT;
const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2),
g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5),
g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8);
const ST x=r[0], y=r[1], z=r[2];
const ST* restrict k0=myKcart.data(0);
const ST* restrict k1=myKcart.data(1);
const ST* restrict k2=myKcart.data(2);
const ST* restrict g0=myG.data(0);
const ST* restrict g1=myG.data(1);
const ST* restrict g2=myG.data(2);
const ST* restrict h00=myH.data(0);
const ST* restrict h01=myH.data(1);
const ST* restrict h02=myH.data(2);
const ST* restrict h11=myH.data(3);
const ST* restrict h12=myH.data(4);
const ST* restrict h22=myH.data(5);
const size_t N=kPoints.size();
#pragma omp simd
for (size_t j=0; j<N; ++j)
{
int jr=j<<1;
int ji=jr+1;
const ST kX=k0[j];
const ST kY=k1[j];
const ST kZ=k2[j];
const ST val_r=myV[jr];
const ST val_i=myV[ji];
//phase
ST s, c;
sincos(-(x*kX+y*kY+z*kZ),&s,&c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr];
const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr];
const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr];
const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji];
const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji];
const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r=dX_r+val_i*kX;
const ST gY_r=dY_r+val_i*kY;
const ST gZ_r=dZ_r+val_i*kZ;
const ST gX_i=dX_i-val_r*kX;
const ST gY_i=dY_i-val_r*kY;
const ST gZ_i=dZ_i-val_r*kZ;
const size_t psiIndex=j+first_spo;
psi[psiIndex] =ComplexT(c*val_r-s*val_i,c*val_i+s*val_r);
dpsi[psiIndex][0]=ComplexT(c*gX_r -s*gX_i, c*gX_i +s*gX_r);
dpsi[psiIndex][1]=ComplexT(c*gY_r -s*gY_i, c*gY_i +s*gY_r);
dpsi[psiIndex][2]=ComplexT(c*gZ_r -s*gZ_i, c*gZ_i +s*gZ_r);
const ST h_xx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g00,g01,g02)+kX*(gX_i+dX_i);
const ST h_xy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g10,g11,g12)+kX*(gY_i+dY_i);
const ST h_xz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g20,g21,g22)+kX*(gZ_i+dZ_i);
const ST h_yx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g00,g01,g02)+kY*(gX_i+dX_i);
const ST h_yy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g10,g11,g12)+kY*(gY_i+dY_i);
const ST h_yz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g20,g21,g22)+kY*(gZ_i+dZ_i);
const ST h_zx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g00,g01,g02)+kZ*(gX_i+dX_i);
const ST h_zy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g10,g11,g12)+kZ*(gY_i+dY_i);
const ST h_zz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g20,g21,g22)+kZ*(gZ_i+dZ_i);
const ST h_xx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g00,g01,g02)-kX*(gX_r+dX_r);
const ST h_xy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g10,g11,g12)-kX*(gY_r+dY_r);
const ST h_xz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g20,g21,g22)-kX*(gZ_r+dZ_r);
const ST h_yx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g00,g01,g02)-kY*(gX_r+dX_r);
const ST h_yy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g10,g11,g12)-kY*(gY_r+dY_r);
const ST h_yz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g20,g21,g22)-kY*(gZ_r+dZ_r);
const ST h_zx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g00,g01,g02)-kZ*(gX_r+dX_r);
const ST h_zy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g10,g11,g12)-kZ*(gY_r+dY_r);
const ST h_zz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g20,g21,g22)-kZ*(gZ_r+dZ_r);
grad_grad_psi[psiIndex][0]=ComplexT(c*h_xx_r-s*h_xx_i, c*h_xx_i+s*h_xx_r);
grad_grad_psi[psiIndex][1]=ComplexT(c*h_xy_r-s*h_xy_i, c*h_xy_i+s*h_xy_r);
grad_grad_psi[psiIndex][2]=ComplexT(c*h_xz_r-s*h_xz_i, c*h_xz_i+s*h_xz_r);
grad_grad_psi[psiIndex][3]=ComplexT(c*h_yx_r-s*h_yx_i, c*h_yx_i+s*h_yx_r);
grad_grad_psi[psiIndex][4]=ComplexT(c*h_yy_r-s*h_yy_i, c*h_yy_i+s*h_yy_r);
grad_grad_psi[psiIndex][5]=ComplexT(c*h_yz_r-s*h_yz_i, c*h_yz_i+s*h_yz_r);
grad_grad_psi[psiIndex][6]=ComplexT(c*h_zx_r-s*h_zx_i, c*h_zx_i+s*h_zx_r);
grad_grad_psi[psiIndex][7]=ComplexT(c*h_zy_r-s*h_zy_i, c*h_zy_i+s*h_zy_r);
grad_grad_psi[psiIndex][8]=ComplexT(c*h_zz_r-s*h_zz_i, c*h_zz_i+s*h_zz_r);
}
}
template<typename VV, typename GV, typename GGV>
void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi)
{
const PointType& r=P.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
SplineInst->evaluate_vgh(ru,myV,myG,myH);
assign_vgh(r,psi,dpsi,grad_grad_psi);
//missing
}
};
}
#endif
|
subsref_mex_openmp.c
|
/* DOES NOT WORK PROPERLY, AS GETTING OPENMP SUPPORT IN MATLAB IS DIFFICULT.
*
* Compile using:
* mex -lmwlapack -lmwblas -largeArrayDims subsref_mex_openmp.c
* calling (do NOT call directly. Only meant to be called through TTeMPS.subsref
* subsref_mex( n, r, transpose(ind), Cores)
*/
/*
* TTeMPS Toolbox.
* Michael Steinlechner, 2013-2014
* Questions and contact: [email protected]
* BSD 2-clause license, see LICENSE.txt
*/
#include "mex.h"
#include "blas.h"
#include <omp.h>
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] ) {
/* input variables */
double* n_raw;
double* r_raw;
double* ind_raw;
double** C;
/* output variables */
double* result;
/* internal variables */
double* P;
double* current;
mwSignedIndex* n;
mwSignedIndex* r;
mwSignedIndex* ind;
mwSignedIndex numSubsref;
mwSignedIndex d;
mwSignedIndex i;
mwSignedIndex j;
mwSignedIndex k;
mwSignedIndex maxrank = 1;
/* get sizes */
n_raw = mxGetPr( prhs[0] );
/* get ranks */
r_raw = mxGetPr( prhs[1] );
/* get indices */
ind_raw = mxGetPr( prhs[2] );
d = mxGetM( prhs[2] );
numSubsref = mxGetN( prhs[2] );
n = mxMalloc( d*sizeof(mwSignedIndex) );
r = mxMalloc( (d+1)*sizeof(mwSignedIndex) );
ind = mxMalloc( d*numSubsref*sizeof(mwSignedIndex) );
/* Convert index arrays to integer arrays as they get converted
* to double arrays when passing to mex.
* Converting beforehand allows to avoid multiple typecasts inside the inner loop */
for( i = 0; i < d; ++i ) {
n[i] = (mwSignedIndex) n_raw[i];
r[i] = (mwSignedIndex) r_raw[i];
if( r[i] > maxrank )
maxrank = r[i];
}
r[d] = (mwSize) r_raw[d];
for( i = 0; i < numSubsref*d; ++i ) {
ind[i] = (mwSignedIndex) ind_raw[i];
}
/* Get pointers to the matrices within the cell array */
C = mxMalloc( d*sizeof(double*) );
for( i = 0; i<d; ++i ) {
C[i] = mxGetPr( mxGetCell( prhs[3], i ) );
}
/* Allocate space for output */
plhs[0] = mxCreateDoubleMatrix( numSubsref, 1, mxREAL);
result = mxGetPr( plhs[0] );
/* helper variables for dgemv call */
char transa = 'T';
mwSignedIndex ONE_i = 1;
double ONE_d = 1.0;
double ZERO_d = 0.0;
#pragma omp parallel shared(n,r,ind,C,result) private(i,j,k,P,current)
{
/* Allocate enough space for internal intermediate results */
P = malloc( maxrank*sizeof(double) );
current = malloc( maxrank*sizeof(double) );
#pragma omp for
for( j = 0; j < numSubsref; ++j ) {
/* first two cores */
dgemv( &transa, &r[1], &r[2], &ONE_d,
&C[1][ (ind[d*j+1]-1)*r[1]*r[2] ],
&r[1],
&C[0][ (ind[d*j]-1)*r[0]*r[1] ],
&ONE_i, &ZERO_d, P, &ONE_i);
/* loop over remaining cores */
for( i = 2; i < d; ++i ) {
/* copy over the previous result to free space at P
* (necessary because dgemv does not work in-place */
for( k = 0; k < r[i]; ++k )
current[k] = P[k];
dgemv( &transa, &r[i], &r[i+1], &ONE_d,
&C[i][ (ind[d*j+i]-1)*r[i]*r[i+1] ],
&r[i],
current,
&ONE_i, &ZERO_d, P, &ONE_i);
}
result[j] = P[0];
}
free( P );
free( current );
}
mxFree( n );
mxFree( r );
mxFree( ind );
mxFree( C );
}
|
YAKL_mem_transfers.h
|
#pragma once
// Included by YAKL.h
namespace yakl {
// Your one-stop shop for memory transfers to / from host / device
template <class T1, class T2,
typename std::enable_if< std::is_same< typename std::remove_cv<T1>::type ,
typename std::remove_cv<T2>::type >::value , int >::type = 0>
inline void memcpy_host_to_host(T1 *dst , T2 *src , index_t elems) {
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
}
inline void memcpy_host_to_host_void(void *dst , void *src , size_t bytes) {
memcpy( dst , src , bytes );
}
template <class T1, class T2,
typename std::enable_if< std::is_same< typename std::remove_cv<T1>::type ,
typename std::remove_cv<T2>::type >::value , int >::type = 0>
inline void memcpy_device_to_host(T1 *dst , T2 *src , index_t elems) {
#ifdef YAKL_AUTO_PROFILE
timer_start("YAKL_internal_memcpy_device_to_host");
#endif
#ifdef YAKL_ARCH_CUDA
cudaMemcpyAsync(dst,src,elems*sizeof(T1),cudaMemcpyDeviceToHost,0);
check_last_error();
#elif defined(YAKL_ARCH_HIP)
hipMemcpyAsync(dst,src,elems*sizeof(T1),hipMemcpyDeviceToHost,0);
check_last_error();
#elif defined (YAKL_ARCH_SYCL)
sycl_default_stream().memcpy(dst, src, elems*sizeof(T1));
check_last_error();
#elif defined(YAKL_ARCH_OPENMP)
#pragma omp parallel for
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#else
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#endif
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop("YAKL_internal_memcpy_device_to_host");
#endif
}
template <class T1, class T2,
typename std::enable_if< std::is_same< typename std::remove_cv<T1>::type ,
typename std::remove_cv<T2>::type >::value , int >::type = 0>
inline void memcpy_host_to_device(T1 *dst , T2 *src , index_t elems) {
#ifdef YAKL_AUTO_PROFILE
timer_start("YAKL_internal_memcpy_host_to_device");
#endif
#ifdef YAKL_ARCH_CUDA
cudaMemcpyAsync(dst,src,elems*sizeof(T1),cudaMemcpyHostToDevice,0);
check_last_error();
#elif defined(YAKL_ARCH_HIP)
hipMemcpyAsync(dst,src,elems*sizeof(T1),hipMemcpyHostToDevice,0);
check_last_error();
#elif defined (YAKL_ARCH_SYCL)
sycl_default_stream().memcpy(dst, src, elems*sizeof(T1));
check_last_error();
#elif defined(YAKL_ARCH_OPENMP)
#pragma omp parallel for
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#else
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#endif
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop("YAKL_internal_memcpy_host_to_device");
#endif
}
template <class T1, class T2,
typename std::enable_if< std::is_same< typename std::remove_cv<T1>::type ,
typename std::remove_cv<T2>::type >::value , int >::type = 0>
inline void memcpy_device_to_device(T1 *dst , T2 *src , index_t elems) {
#ifdef YAKL_AUTO_PROFILE
timer_start("YAKL_internal_memcpy_device_to_device");
#endif
#ifdef YAKL_ARCH_CUDA
cudaMemcpyAsync(dst,src,elems*sizeof(T1),cudaMemcpyDeviceToDevice,0);
check_last_error();
#elif defined(YAKL_ARCH_HIP)
hipMemcpyAsync(dst,src,elems*sizeof(T1),hipMemcpyDeviceToDevice,0);
check_last_error();
#elif defined (YAKL_ARCH_SYCL)
sycl_default_stream().memcpy(dst, src, elems*sizeof(T1));
check_last_error();
#elif defined(YAKL_ARCH_OPENMP)
#pragma omp parallel for
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#else
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#endif
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop("YAKL_internal_memcpy_device_to_device");
#endif
}
inline void memcpy_device_to_device_void(void *dst , void *src , size_t bytes) {
#ifdef YAKL_AUTO_PROFILE
timer_start("YAKL_internal_memcpy_device_to_device");
#endif
#ifdef YAKL_ARCH_CUDA
cudaMemcpyAsync(dst,src,bytes,cudaMemcpyDeviceToDevice,0);
check_last_error();
#elif defined(YAKL_ARCH_HIP)
hipMemcpyAsync(dst,src,bytes,hipMemcpyDeviceToDevice,0);
check_last_error();
#elif defined (YAKL_ARCH_SYCL)
sycl_default_stream().memcpy(dst, src, bytes);
check_last_error();
#else
memcpy( dst , src , bytes );
#endif
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop("YAKL_internal_memcpy_device_to_device");
#endif
}
}
|
critical.c
|
/* Copyright (C) 2005 Free Software Foundation, Inc.
Contributed by Richard Henderson <[email protected]>.
This file is part of the GNU OpenMP Library (libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with libgomp; see the file COPYING.LIB. If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/* As a special exception, if you link this library with other files, some
of which are compiled with GCC, to produce an executable, this library
does not by itself cause the resulting executable to be covered by the
GNU General Public License. This exception does not however invalidate
any other reasons why the executable file might be covered by the GNU
General Public License. */
/* This file handles the CRITICAL construct. */
#include "libgomp.h"
#include <stdlib.h>
static gomp_mutex_t default_lock;
void
GOMP_critical_start (void)
{
gomp_mutex_lock (&default_lock);
}
void
GOMP_critical_end (void)
{
gomp_mutex_unlock (&default_lock);
}
#ifndef HAVE_SYNC_BUILTINS
static gomp_mutex_t create_lock_lock;
#endif
void
GOMP_critical_name_start (void **pptr)
{
gomp_mutex_t *plock;
/* If a mutex fits within the space for a pointer, and is zero initialized,
then use the pointer space directly. */
if (GOMP_MUTEX_INIT_0
&& sizeof (gomp_mutex_t) <= sizeof (void *)
&& __alignof (gomp_mutex_t) <= sizeof (void *))
plock = (gomp_mutex_t *)pptr;
/* Otherwise we have to be prepared to malloc storage. */
else
{
plock = *pptr;
if (plock == NULL)
{
#ifdef HAVE_SYNC_BUILTINS
gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
gomp_mutex_init (nlock);
plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
if (plock != NULL)
{
gomp_mutex_destroy (nlock);
free (nlock);
}
else
plock = nlock;
#else
gomp_mutex_lock (&create_lock_lock);
plock = *pptr;
if (plock == NULL)
{
plock = gomp_malloc (sizeof (gomp_mutex_t));
gomp_mutex_init (plock);
__sync_synchronize ();
*pptr = plock;
}
gomp_mutex_unlock (&create_lock_lock);
#endif
}
}
gomp_mutex_lock (plock);
}
void
GOMP_critical_name_end (void **pptr)
{
gomp_mutex_t *plock;
/* If a mutex fits within the space for a pointer, and is zero initialized,
then use the pointer space directly. */
if (GOMP_MUTEX_INIT_0
&& sizeof (gomp_mutex_t) <= sizeof (void *)
&& __alignof (gomp_mutex_t) <= sizeof (void *))
plock = (gomp_mutex_t *)pptr;
else
plock = *pptr;
gomp_mutex_unlock (plock);
}
/* This mutex is used when atomic operations don't exist for the target
in the mode requested. The result is not globally atomic, but works so
long as all parallel references are within #pragma omp atomic directives.
According to responses received from [email protected], appears to be within
spec. Which makes sense, since that's how several other compilers
handle this situation as well. */
static gomp_mutex_t atomic_lock;
void
GOMP_atomic_start (void)
{
gomp_mutex_lock (&atomic_lock);
}
void
GOMP_atomic_end (void)
{
gomp_mutex_unlock (&atomic_lock);
}
#if !GOMP_MUTEX_INIT_0
static void __attribute__((constructor))
initialize_critical (void)
{
gomp_mutex_init (&default_lock);
gomp_mutex_init (&atomic_lock);
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_init (&create_lock_lock);
#endif
}
#endif
|
GB_unaryop__ainv_uint8_fp64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_fp64
// op(A') function: GB_tran__ainv_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z ; GB_CAST_UNSIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_fp64
(
uint8_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_colscale_meta.c
|
//------------------------------------------------------------------------------
// GB_AxB_colscale_meta: C=A*D where D is a square diagonal matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// All entries in C=A*D are computed entirely in parallel.
// A and C can be jumbled. D cannot, but it is a diagonal matrix so it is
// never jumbled.
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// Dx, j, and Ah are unused if the operator is FIRST or PAIR
#include "GB_unused.h"
ASSERT (GB_JUMBLED_OK (C)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_JUMBLED (D)) ;
//--------------------------------------------------------------------------
// get C, A, and D
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) (A_is_pattern ? NULL : A->x) ;
const GB_BTYPE *GB_RESTRICT Dx = (GB_BTYPE *) (D_is_pattern ? NULL : D->x) ;
const int64_t avlen = A->vlen ;
//--------------------------------------------------------------------------
// C=A*D
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// C(:,kfirst:klast) = A(:,kfirst:klast)*D(kfirst:klast,kfirst:klast)
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) and C(:,k) to be operated on by this task
//------------------------------------------------------------------
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k,
kfirst, klast, pstart_slice, Ap, avlen) ;
//------------------------------------------------------------------
// C(:,j) = A(:,j)*D(j,j)
//------------------------------------------------------------------
GB_GETB (djj, Dx, j) ; // djj = D (j,j)
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = pA_start ; p < pA_end ; p++)
{
GB_GETA (aij, Ax, p) ; // aij = A(i,j)
GB_BINOP (GB_CX (p), aij, djj, 0, 0) ; // C(i,j) = aij * djj
}
}
}
}
|
GB_unaryop__minv_uint32_int64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_int64
// op(A') function: GB_tran__minv_uint32_int64
// C type: uint32_t
// A type: int64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_int64
(
uint32_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LinearStrainMapping.h
|
/******************************************************************************
* SOFA, Simulation Open-Framework Architecture, development version *
* (c) 2006-2017 INRIA, USTL, UJF, CNRS, MGH *
* *
* This program is free software; you can redistribute it and/or modify it *
* under the terms of the GNU Lesser General Public License as published by *
* the Free Software Foundation; either version 2.1 of the License, or (at *
* your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, but WITHOUT *
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License *
* for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
*******************************************************************************
* Authors: The SOFA Team and external contributors (see Authors.txt) *
* *
* Contact information: [email protected] *
******************************************************************************/
#ifndef SOFA_COMPONENT_MAPPING_LinearStrainMAPPING_H
#define SOFA_COMPONENT_MAPPING_LinearStrainMAPPING_H
#include <Flexible/config.h>
#include <sofa/core/Mapping.h>
#include <sofa/defaulttype/Mat.h>
#include <sofa/defaulttype/Vec.h>
#include <sofa/simulation/Simulation.h>
#include <sofa/core/Mapping.h>
#include <SofaEigen2Solver/EigenSparseMatrix.h>
#include <sofa/helper/IndexOpenMP.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "../types/StrainTypes.h"
#include "LinearStrainJacobianBlock.h"
namespace sofa
{
namespace component
{
namespace mapping
{
/** Map strain positions as a linear combination of strains, for smoothing.
*/
template <class TStrain>
class LinearStrainMapping : public core::Mapping<TStrain,TStrain>
{
public:
typedef defaulttype::LinearStrainJacobianBlock<TStrain> BlockType;
typedef core::Mapping<TStrain,TStrain> Inherit;
SOFA_CLASS(SOFA_TEMPLATE(LinearStrainMapping,TStrain), SOFA_TEMPLATE2(core::Mapping,TStrain,TStrain));
/** @name Input types */
//@{
typedef typename TStrain::Coord Coord;
typedef typename TStrain::Deriv Deriv;
typedef typename TStrain::VecCoord VecCoord;
typedef typename TStrain::VecDeriv VecDeriv;
typedef typename TStrain::MatrixDeriv MatrixDeriv;
typedef typename TStrain::Real Real;
//@}
/** @name Shape Function types */
//@{
typedef helper::vector<Real> VReal;
typedef helper::vector< helper::SVector<Real> > VecVReal;
typedef helper::vector<unsigned int> VRef;
typedef helper::vector< helper::SVector<unsigned int> > VecVRef;
//@}
/** @name Jacobian types */
//@{
typedef helper::vector<helper::vector<BlockType> > SparseMatrix;
typedef linearsolver::EigenSparseMatrix<TStrain,TStrain> SparseMatrixEigen;
//@}
virtual void resizeOut()
{
if(this->f_printLog.getValue()) std::cout<<this->getName()<<"::resizeOut()"<<std::endl;
const VecVRef& indices = this->d_index.getValue();
const VecVReal& w = this->d_w.getValue();
this->toModel->resize(indices.size());
// init jacobian blocks
jacobian.resize(indices.size());
for( size_t i=0 ; i<indices.size() ; ++i)
{
jacobian[i].resize(indices[i].size());
for(size_t j=0; j<indices[i].size(); j++)
jacobian[i][j].init( w[i][j]);
}
reinit();
}
/** @name Mapping functions */
//@{
virtual void init()
{
if( core::behavior::BaseMechanicalState* stateFrom = this->fromModel.get()->toBaseMechanicalState() )
maskFrom = &stateFrom->forceMask;
if( core::behavior::BaseMechanicalState* stateTo = this->toModel.get()->toBaseMechanicalState() )
maskTo = &stateTo->forceMask;
// init jacobians
baseMatrices.resize( 1 ); // just a wrapping for getJs()
baseMatrices[0] = &eigenJacobian;
resizeOut();
Inherit::init();
}
virtual void reinit()
{
if(this->d_assemble.getValue()) updateJ();
Inherit::reinit();
}
virtual void apply(const core::MechanicalParams * /*mparams*/ , Data<VecCoord>& dOut, const Data<VecCoord>& dIn)
{
if(this->f_printLog.getValue()) std::cout<<this->getName()<<":apply"<<std::endl;
VecCoord& out = *dOut.beginWriteOnly();
const VecCoord& in = dIn.getValue();
const VecVRef& indices = this->d_index.getValue();
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for(helper::IndexOpenMP<unsigned int>::type i=0; i<jacobian.size(); i++)
{
out[i]=Coord();
for(size_t j=0; j<jacobian[i].size(); j++)
{
size_t index=indices[i][j];
jacobian[i][j].addapply(out[i],in[index]);
}
}
dOut.endEdit();
}
virtual void applyJ(const core::MechanicalParams * /*mparams*/ , Data<VecDeriv>& dOut, const Data<VecDeriv>& dIn)
{
if(this->d_assemble.getValue())
{
if( !eigenJacobian.rows() ) updateJ();
eigenJacobian.mult(dOut,dIn);
}
else
{
VecDeriv& out = *dOut.beginWriteOnly();
const VecDeriv& in = dIn.getValue();
const VecVRef& indices = this->d_index.getValue();
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
// for( size_t i=0 ; i<this->maskTo->size() ; ++i)
// if( !this->maskTo->isActivated() || this->maskTo->getEntry(i) )
for(helper::IndexOpenMP<unsigned int>::type i=0; i<jacobian.size(); i++)
{
out[i]=Deriv();
for(size_t j=0; j<jacobian[i].size(); j++)
{
size_t index=indices[i][j];
jacobian[i][j].addmult(out[i],in[index]);
}
}
dOut.endEdit();
}
}
virtual void applyJT(const core::MechanicalParams * /*mparams*/ , Data<VecDeriv>& dIn, const Data<VecDeriv>& dOut)
{
if(this->d_assemble.getValue())
{
if( !eigenJacobian.rows() ) updateJ();
eigenJacobian.addMultTranspose(dIn,dOut);
}
else
{
VecDeriv& in = *dIn.beginEdit();
const VecDeriv& out = dOut.getValue();
const VecVRef& indices = this->d_index.getValue();
//#ifdef _OPENMP
//#pragma omp parallel for if (this->d_parallel.getValue())
//#endif
// for( size_t i=0 ; i<this->maskTo->size() ; ++i)
// if( this->maskTo->getEntry(i) )
for(helper::IndexOpenMP<unsigned int>::type i=0; i<jacobian.size(); i++)
{
for(size_t j=0; j<jacobian[i].size(); j++)
{
size_t index=indices[i][j];
jacobian[i][j].addMultTranspose(in[index],out[i]);
}
}
dIn.endEdit();
}
}
virtual void applyJT(const core::ConstraintParams * /*cparams*/ , Data<MatrixDeriv>& /*out*/, const Data<MatrixDeriv>& /*in*/)
{
}
virtual void applyDJT(const core::MechanicalParams* /*mparams*/, core::MultiVecDerivId /*parentDfId*/, core::ConstMultiVecDerivId /*childForceId*/ )
{
}
const defaulttype::BaseMatrix* getJ(const core::MechanicalParams * /*mparams*/)
{
if(!this->d_assemble.getValue()) // J should have been updated in apply() that is call before (when assemble==1)
{
updateJ();
serr<<"Please, with an assembled solver, set assemble=1\n";
}
return &eigenJacobian;
}
// Compliant plugin API
virtual const helper::vector<sofa::defaulttype::BaseMatrix*>* getJs()
{
if(!this->d_assemble.getValue()) // J should have been updated in apply() that is call before (when assemble==1)
{
updateJ();
serr<<"Please, with an assembled solver, set assemble=1\n";
}
return &baseMatrices;
}
virtual const defaulttype::BaseMatrix* getK()
{
return NULL;
}
void draw(const core::visual::VisualParams* /*vparams*/)
{
}
//void updateForceMask()
//{
// const VecVRef& indices = this->d_index.getValue();
// for( size_t i=0 ; i<this->maskTo->size() ; ++i)
// {
// if( this->maskTo->getEntry(i) )
// {
// for(size_t j=0; j<jacobian[i].size(); j++)
// {
// size_t index = indices[i][j];
// this->maskFrom->insertEntry( index );
// }
// }
// }
// // serr<<"updateForceMask "<<this->maskTo->nbActiveDofs()<<" "<<this->maskFrom->nbActiveDofs()<<sendl;
//}
//@}
Data<bool> d_assemble;
Data< bool > d_parallel; ///< use openmp ?
protected:
LinearStrainMapping (core::State<TStrain>* from = NULL, core::State<TStrain>* to= NULL)
: Inherit ( from, to )
, d_assemble ( initData ( &d_assemble,false, "assemble","Assemble the matrices (Jacobian and Geometric Stiffness) or use optimized matrix/vector multiplications" ) )
, d_parallel(initData(&d_parallel, false, "parallel", "use openmp parallelisation?"))
, d_index ( initData ( &d_index,"indices","parent indices for each child" ) )
, d_w ( initData ( &d_w,"weights","influence weights of the Dofs" ) )
, maskFrom(NULL)
, maskTo(NULL)
{
}
virtual ~LinearStrainMapping() { }
SparseMatrix jacobian; ///< Jacobian of the mapping
Data<VecVRef > d_index; ///< Store child to parent relationship. index[i][j] is the index of the j-th parent influencing child i.
Data<VecVReal > d_w; ///< Influence weights of the parent for each child
helper::StateMask* maskFrom; ///< Subset of master DOF, to cull out computations involving null forces or displacements
helper::StateMask* maskTo; ///< Subset of slave DOF, to cull out computations involving null forces or displacements
SparseMatrixEigen eigenJacobian; ///< Assembled Jacobian matrix
helper::vector<defaulttype::BaseMatrix*> baseMatrices; ///< Vector of jacobian matrices, for the Compliant plugin API
void updateJ()
{
unsigned int insize = this->fromModel->getSize();
// unsigned int outsize = this->toModel->getSize();
SparseMatrixEigen& J = eigenJacobian;
const VecVRef& indices = this->d_index.getValue();
J.resizeBlocks(jacobian.size(),insize);
for( size_t i=0 ; i<this->maskTo->size() ; ++i)
{
J.beginBlockRow(i);
for(size_t j=0; j<jacobian[i].size(); j++)
J.createBlock( indices[i][j], jacobian[i][j].getJ());
J.endBlockRow();
}
J.compress();
}
};
} // namespace mapping
} // namespace component
} // namespace sofa
#endif
|
taskwait-depend-1.c
|
void
foo (int *p)
{
#pragma omp taskwait depend(iterator(i = 0:16) , in : p[i]) depend(out : p[32])
}
void
bar (int *p)
{
#pragma omp taskwait depend(mutexinoutset : p[0]) /* { dg-error "'mutexinoutset' kind in 'depend' clause on a 'taskwait' construct" } */
}
|
CPUMatrixImpl.h
|
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// CPUMatrix.h : template implementation of all matrix functions on the CPU side
//
#pragma once
#include "Basics.h"
#include "File.h"
#include "CPUMatrix.h"
#include "TensorOps.h"
#include <assert.h>
#include <stdexcept>
#include <omp.h>
#include <math.h>
#include <random>
#include <chrono>
#include <exception>
#include <thread>
#include <iostream>
#include <algorithm>
#pragma warning(push)
#pragma warning(disable:4244) // 'conversion' conversion from 'type1' to 'type2', possible loss of data
#include <boost/random/normal_distribution.hpp>
#pragma warning(pop)
#include <boost/random/uniform_real_distribution.hpp>
#ifdef _WIN32
#define NOMINMAX
#include "Windows.h"
#else
#include <cfloat>
#endif
#ifdef LEAKDETECT
#include <vld.h>
#endif
#pragma warning(disable : 4100) // unreferenced formal parameter; "struct TensorOpReduction<ElemType, OPFN, typename ReductionOp, N, -1>" trigger this
#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this
#pragma warning(disable : 4244) // unreachable code; triggered for unknown reasons
#pragma warning(disable : 4702) // conversion from 'double' to 'float'
#ifdef USE_MKL
// requires MKL 10.0 and above
#include <mkl.h>
#else
#ifdef _MSC_VER
// Visual Studio doesn't define standard complex types properly
#define HAVE_LAPACK_CONFIG_H
#define LAPACK_COMPLEX_STRUCTURE
#endif
#include <cblas.h>
#include <lapacke.h>
#endif
#define SWAP(a, b) \
{ \
(a) ^= (b); \
(b) ^= (a); \
(a) ^= (b); \
}
#define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing
namespace Microsoft { namespace MSR { namespace CNTK {
#pragma region Helpful Enum Definitions
enum class MatrixOrder
{
RowMajor = 101, // row-major arrays
ColMajor = 102 // column-major arrays
};
enum class MatrixTranspose : char
{
NoTrans = 'N', // trans='N'
Trans = 'T', // trans='T'
ConjTrans = 'C' // trans='C'
};
enum class SymMatrixType : char
{
Up = 'U', // symmetric matrix is stored in the upper part
Low = 'L', // symmetric matrix is stored in thelower part
Full = 'F', // full populated
NotSymmetric = 'N' // not a symmetric matrix
};
enum class MatrixOpSide : char
{
Left = 'L', // left multiply
Right = 'R', // right multiply
};
#pragma endregion Helpful Enum Definitions
#pragma region Constructors and Destructor
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix()
{
ZeroInit();
}
// helper to allocate an array of ElemType
// Use this instead of new[] to get NaN initialization for debugging.
template <class ElemType>
static ElemType* NewArray(size_t n)
{
// We need to allocate possibly one more element for the following reason.
// At some point we might want to fill a buffer with the result of a random
// number generator. The RNG is oblivious to whether the buffer is on the
// CPU or GPU but it needs to keep an accurate tally of how many numbers it
// has generated. The trouble stems from the fact that generating an odd
// number gaussians on the GPU is not supported so we must always
// generate an even number. So since we wouldn't know how to update the tally
// we are making this allocate one more element in the worst case.
ElemType* p = new ElemType[AsMultipleOf(n, 2)]();
#if 0 // _DEBUG
ElemType nan = Matrix<ElemType>::MakeNan(__LINE__);
for (size_t i = 0; i < n; i++)
p[i] = nan;
#endif
return p;
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols)
{
ZeroInit();
m_numRows = numRows;
m_numCols = numCols;
SetSizeAllocated(GetNumElements());
if (GetNumElements() != 0)
{
SetBuffer(NewArray<ElemType>(GetNumElements()), GetNumElements() * sizeof(ElemType));
}
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
ZeroInit();
SetValue(numRows, numCols, pArray, matrixFlags);
}
//copy constructor, deep copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& deepCopyFrom)
{
ZeroInit();
SetValue(deepCopyFrom);
}
//assignment operator, deep copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(const CPUMatrix<ElemType>& deepCopyFrom)
{
SetValue(deepCopyFrom);
return *this;
}
//move constructor, shallow copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(CPUMatrix<ElemType>&& moveFrom)
: Base(/* shallow */ true)
{
ShallowCopyFrom(moveFrom);
moveFrom.ZeroValues();
}
// Shortcut of default constructor + shallow copy, to avoid one initialization
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& shallowCopyFrom, bool shallow)
: Base(shallow)
{
ShallowCopyFrom(shallowCopyFrom);
}
//move assignment operator, shallow copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(CPUMatrix<ElemType>&& moveFrom)
{
if (this != &moveFrom)
{
ShallowCopyFrom(moveFrom);
// release the pointer from the source object so that the destructor won't release it twice
moveFrom.ZeroValues();
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::Clear()
{
ZeroInit();
}
#pragma endregion Constructors and Destructor
#pragma region Basic Operators
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::ColumnSlice(size_t startColumn, size_t numCols) const
{
if (startColumn + numCols > m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) m_numCols);
CPUMatrix<ElemType> slice(*this, /* shallow= */ true);
slice.m_numCols = numCols;
slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * m_numRows;
return slice;
}
// set this(:, 0:numCols-1) = fromMatrix(:, startColumn : startColumn+numCols-1)
// TODO: why not say *this = ColumnSlice()?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > fromMatrix.m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.m_numCols);
Clear();
ShallowCopyFrom(fromMatrix);
m_numCols = numCols;
m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * m_numRows;
return *this;
}
// set this(: , startColumn:startColumn+numCols-1)= fromMatrix;
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > m_numCols)
LogicError("The slice is out of range of the destination matrix.");
if (numCols > fromMatrix.GetNumCols())
InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols());
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
memcpy(Data() + startColumn * m_numRows, fromMatrix.Data(), numCols * m_numRows * sizeof(ElemType));
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::CopyColumnsStrided(const CPUMatrix<ElemType>& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride)
{
if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix.");
if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix.");
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
long n = (long) numCols, m = (long) m_numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
us(i + 1, j * destNumColsStride) = fromMatrix(i + 1, j * srcNumColsStride);
us(i + 2, j * destNumColsStride) = fromMatrix(i + 2, j * srcNumColsStride);
us(i + 3, j * destNumColsStride) = fromMatrix(i + 3, j * srcNumColsStride);
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
}
}
}
//for each column of a, we add all rows of a to this starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0, startRow = startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) = a(i, j);
us(startRow + 1, j) = a(i + 1, j);
us(startRow + 2, j) = a(i + 2, j);
us(startRow + 3, j) = a(i + 3, j);
}
// handle remaining stuffs
for (size_t i = m & ~3, startRow = startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) = a(i, j);
}
}
return *this;
}
//for each column of a, we assign numRows starting from startIndex to this
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (startIndex + numRows > a.GetNumRows())
LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
RequireSize(numRows, a.GetNumCols());
long n = (long) a.GetNumCols(); // note: OpenMP requires loop indices to be long, not size_t
long k = (long) a.GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// memory copy might be faster?
memcpy(Data() + j * numRows, a.Data() + j * k + startIndex, sizeof(ElemType) * numRows);
// //four-way unrolling
// for (long i=0, startRow = startIndex; i<(m & ~3); i+=4, startRow+=4)
// {
// us(i,j) = a(startRow,j);
// us(i+1,j) = a(startRow+1,j);
// us(i+2,j) = a(startRow+2,j);
// us(i+3,j) = a(startRow+3,j);
// }
// //handle remaining stuffs
// for (long i=m & ~3, startRow = startIndex+(m & ~3); i<m; i++, startRow++)
// {
// us(i,j) = a(startRow,j);
// }
}
return *this;
}
//for the row slice of this starting from startIndex we add a to it.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddToRowSliceValuesOf: input matrix a is empty.");
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) += a(i, j);
us(startRow + 1, j) += a(i + 1, j);
us(startRow + 2, j) += a(i + 2, j);
us(startRow + 3, j) += a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) += a(i, j);
}
}
return *this;
}
//for each column of this, we add row slice of a starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddWithRowSliceValuesOf: input matrix a is empty.");
if (GetNumRows() != numRows)
LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows.");
if (startIndex + numRows > a.GetNumRows())
LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddWithRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(i, j) += a(startRow, j);
us(i + 1, j) += a(startRow + 1, j);
us(i + 2, j) += a(startRow + 2, j);
us(i + 3, j) += a(startRow + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(i, j) += a(startRow, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Diagonal() const
{
if (m_numRows != m_numCols)
LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m_numRows, (int) m_numCols);
CPUMatrix<ElemType> diag(1, m_numCols);
auto& us = *this;
#pragma omp parallel for
for (long i = 0; i < m_numRows; i++)
{
diag(0, (size_t) i) = us(i, i);
}
return diag;
}
template <class ElemType>
void CPUMatrix<ElemType>::MinusOneAt(CPUMatrix<ElemType>& c, const size_t position)
{
if (position < c.GetNumElements())
c.Data()[position] -= 1.0;
else
RuntimeError("MinusOneAt: position is out of CPU matrix size");
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRepeatOf(const CPUMatrix<ElemType>& a, const size_t numRowRepeats, const size_t numColRepeats)
{
if (this == &a)
LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat.");
if (a.IsEmpty())
LogicError("AssignRepeatOf: Matrix a is empty.");
RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats);
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long q = 0; q < numColRepeats; q++)
{
for (long p = 0; p < numRowRepeats; p++)
{
long colOffset = q * n;
for (long j = 0; j < n; j++, colOffset++)
{
long rowOffset = p * m;
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4, rowOffset += 4)
{
us(rowOffset, colOffset) = a(i, j);
us(rowOffset + 1, colOffset) = a(i + 1, j);
us(rowOffset + 2, colOffset) = a(i + 2, j);
us(rowOffset + 3, colOffset) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++, rowOffset++)
{
us(rowOffset, colOffset) = a(i, j);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowRepeatValuesOf(const CPUMatrix<ElemType>& a, const size_t numRepeats)
{
if (a.IsEmpty())
LogicError("AddToRowRepeatValuesOf: input matrix a is empty.");
if (a.GetNumRows() != GetNumRows() * numRepeats)
LogicError("AddToRowRepeatValuesOf: a.GetNumRows() != GetNumRows() * numRepeats.");
long n = (long) a.GetNumCols(), m = (long) GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
us(i + 1, j) += a(k * m + i + 1, j);
us(i + 2, j) += a(k * m + i + 2, j);
us(i + 3, j) += a(k * m + i + 3, j);
}
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddFoldedPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Transpose()
{
if (IsEmpty())
LogicError("Transpose: Matrix is empty.");
CPUMatrix<ElemType> c;
c.AssignTransposeOf(*this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTransposeOf(const CPUMatrix<ElemType>& a)
{
if (this == &a)
LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose.");
if (a.IsEmpty())
LogicError("AssignTransposeOf: Matrix a is empty.");
RequireSize(a.GetNumCols(), a.GetNumRows());
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(j, i) = a(i, j);
us(j, i + 1) = a(i + 1, j);
us(j, i + 2) = a(i + 2, j);
us(j, i + 3) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(j, i) = a(i, j);
}
}
return *this;
}
// dst[i] = src[i] * alpha + dst[i] * beta
// scale a column vector and add it to another
// The usual special case: If beta = 0, then dst[] is not read, and may be uninitialized or NaN.
template <class ElemType>
static void ScaleAndAddColumn(ElemType beta, ElemType* dst, const ElemType* src, size_t numRows, ElemType alpha)
{
if (alpha != 1) // rare case: just do the full thing
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + alpha * src[i];
else if (beta == 1) // used in backprop
for (size_t i = 0; i < numRows; i++)
dst[i] += src[i];
else if (beta == 0) // plain assignment
memcpy(dst, src, sizeof(ElemType) * numRows);
else // alpha=1, arbitrary beta: also rare case
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + src[i];
}
// *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoGatherColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoGatherColumnsOf: Map must be a row vector.");
if (beta)
VerifySize(a.GetNumRows(), idx.GetNumCols());
else
Resize(a.GetNumRows(), idx.GetNumCols());
auto& us = *this;
// race-condition consideration: Since this loops over independent output columns, this has no race condition. Cf. DoScatterColumnsOf().
#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows.
foreach_column(jOut, us)
{
auto jInF = idx(0, jOut); // this is the column we need to get
if (std::isnan(jInF) || jInF < 0) // negative index means gap
continue;
size_t jIn = (size_t)jInF;
if (jIn >= a.GetNumCols())
InvalidArgument("DoGatherColumnsOf: Map out of bounds. %ld >= %ld", (long int)jIn, (long int)a.GetNumCols());
ScaleAndAddColumn(beta, &us(0,jOut), &a(0,jIn), us.GetNumRows(), alpha);
}
return *this;
}
// *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoScatterColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoScatterColumnsOf: Map must be a row vector.");
if (idx.GetNumCols() != a.GetNumCols())
InvalidArgument("DoScatterColumnsOf: Map must have width of input vector.");
if (a.GetNumRows() != GetNumRows())
InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector.");
auto& us = *this;
// pre-scale with beta upfront
// Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding.
Scale(beta, us); // if beta is 0, then this will be a memset()
ScatterValues(idx.Data(), a.Data(), us.Data(), alpha, idx.GetNumCols(), a.GetNumRows(), GetNumCols(), idx.GetNumRows());
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const ElemType v)
{
if (IsEmpty())
LogicError("SetValue: Matrix is empty.");
bool isFinite = std::numeric_limits<ElemType>::is_integer || std::isfinite((double) v);
if (isFinite && v == 0)
{
memset(Data(), 0, sizeof(ElemType) * GetNumElements());
}
else
{
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// 2-way thread parallelism is sufficient for the memory bound
// operation of just setting the values of an array.
const unsigned SETVALUE_NUM_THREADS = 2;
UNUSED(SETVALUE_NUM_THREADS); // in case OMP is turned off.
#pragma omp parallel for num_threads(SETVALUE_NUM_THREADS)
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = v;
bufPtr[i + 1] = v;
bufPtr[i + 2] = v;
bufPtr[i + 3] = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaskColumnsValue(const CPUMatrix<char>& columnsMask, ElemType val, size_t numColsPerMaskEntry)
{
if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry))
RuntimeError("MaskColumnsValue: Matrix number of columns must equal 'column mask number of columns * numColsPerMaskEntry'.");
auto& us = *this;
long n = (long)columnsMask.GetNumCols(), m = (long) GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
if (columnsMask(0, j) == 1)
continue;
for (long k = 0; k < numColsPerMaskEntry; ++k)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
us(i + 1, (j * numColsPerMaskEntry) + k) = val;
us(i + 2, (j * numColsPerMaskEntry) + k) = val;
us(i + 3, (j * numColsPerMaskEntry) + k) = val;
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType* colPointer, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (colPointer == NULL)
return;
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = colPointer[i];
us(i + 1, j) = colPointer[i + 1];
us(i + 2, j) = colPointer[i + 2];
us(i + 3, j) = colPointer[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = colPointer[i];
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType val, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = val;
us(i + 1, j) = val;
us(i + 2, j) = val;
us(i + 3, j) = val;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = val;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const CPUMatrix<ElemType>& valMat, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (valMat.GetNumRows() != GetNumRows() || valMat.GetNumCols() != 1)
LogicError("The valMat matrix has incorrect number of rows or columns.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = valMat(i, 0);
us(i + 1, j) = valMat(i + 1, 0);
us(i + 2, j) = valMat(i + 2, 0);
us(i + 3, j) = valMat(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = valMat(i, 0);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUMatrix<ElemType>& deepCopyFrom)
{
if (this == &deepCopyFrom)
return;
SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.Data(), 0);
}
#if 0
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUSparseMatrix<ElemType>& deepCopyFrom)
{
deepCopyFrom.AssignColumnSliceToDense(*this, 0, deepCopyFrom.GetNumCols());
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUSparseMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
#endif
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
if (pArray == nullptr && numRows * numCols > 0)
InvalidArgument("Invalid pArray. pArray == nullptr, but matrix is of size %d * %d = %d.", (int)numRows, (int)numCols, (int)(numRows * numCols));
SetFormat(matrixFormatDense);
SetComputeDeviceId(CPUDEVICE);
// if it's externally managed, then populate the structure
if (matrixFlags & matrixFlagDontOwnBuffer)
{
// free previous array allocation if any before overwriting
delete[] Buffer();
m_numRows = numRows;
m_numCols = numCols;
SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true);
SetSizeAllocated(GetNumElements());
}
else
{
RequireSize(numRows, numCols);
if (!IsEmpty())
{
if (!(matrixFlags & matrixFormatRowMajor)) // compatible to internal structure
memcpy(Data(), pArray, GetNumElements() * sizeof(ElemType));
else // need to transpose
{
ElemType* bufPtr = Data();
auto& us = *this;
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, us)
{
cblas_dcopy((int) numRows, reinterpret_cast<double*>(pArray + j), (int) numCols, reinterpret_cast<double*>(bufPtr + LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, us)
{
{
#pragma warning(suppress : 4244)
cblas_scopy((int) numRows, reinterpret_cast<float*>(pArray + j), (int) numCols, reinterpret_cast<float*>(bufPtr + LocateColumn(j)), 1);
}
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const ElemType v)
{
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = v;
us(i + 1, i + 1) = v;
us(i + 2, i + 2) = v;
us(i + 3, i + 3) = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = v;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const CPUMatrix<ElemType>& vector)
{
if (IsEmpty() || vector.IsEmpty())
LogicError("SetDiagonalValue: Matrix is empty.");
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1)
LogicError("SetDiagonalValue: input vector must be a vector.");
if (vector.GetNumElements() == 1) // reduce to simple form
SetDiagonalValue(vector(0, 0));
else if (vector.GetNumRows() != GetNumRows() && vector.GetNumCols() != GetNumRows())
LogicError("SetDiagonalValue: input vector's dimension does not agree with [this].");
else
{
auto& us = *this;
long m = (long) GetNumRows();
if (vector.GetNumRows() == 1) // row vector
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(0, i);
us(i + 1, i + 1) = vector(0, i + 1);
us(i + 2, i + 2) = vector(0, i + 2);
us(i + 3, i + 3) = vector(0, i + 3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(0, i);
}
}
else
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(i, 0);
us(i + 1, i + 1) = vector(i + 1, 0);
us(i + 2, i + 2) = vector(i + 2, 0);
us(i + 3, i + 3) = vector(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(i, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::uniform_real_distribution<ElemType> r(low, high);
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = r(generator);
bufPtr[i + 1] = r(generator);
bufPtr[i + 2] = r(generator);
bufPtr[i + 3] = r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(RNGHandle& rngHandle, const ElemType low, const ElemType high)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::uniform_real_distribution<ElemType> r(low, high);
std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r]() {return r(cpuRNGHandle->Generator()); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(RNGHandle& rngHandle, const ElemType mean, const ElemType stdev)
{
if (IsEmpty())
LogicError("SetGaussianRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::normal_distribution<ElemType> r(mean, stdev);
auto n = AsMultipleOf(GetNumElements(), 2);
std::generate(Data(), Data() + n, [&cpuRNGHandle, &r]() {return r(cpuRNGHandle->Generator()); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGumbelRandomValue(RNGHandle& rngHandle, const ElemType loc, const ElemType scale)
{
if (IsEmpty())
LogicError("SetGumbelRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::uniform_real_distribution<ElemType> r(0, 1);
std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r, loc, scale]() {return loc - scale * log(-log1p(-r(cpuRNGHandle->Generator()))); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetGaussianRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetGaussianRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
// #pragma omp parallel for is not thread safe. Also the results would not be deterministic
foreach_coord (i, j, us)
{
us(i, j) = r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetTruncatedNormalRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetTruncatedNormalRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetTruncatedNormalRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long)time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
const ElemType high = mean + 2 * sigma;
const ElemType low = mean - 2 * sigma;
// #pragma omp parallel for is not thread safe. Also the results would not be deterministic
foreach_coord(i, j, us)
{
ElemType tmp = 0;
do
tmp = r(generator);
while (tmp < low || tmp > high ); // Rejection sampling is fine here because the acceptance probability is about 0.9545
us(i, j) = tmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AddGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetUniformRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
long m = (long) GetNumRows(), n = (long) GetNumCols();
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = r(generator);
us(i + 1, j) = r(generator);
us(i + 2, j) = r(generator);
us(i + 3, j) = r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = r(generator);
}
}
}
//maskRate: percentage of values masked out (similar to dropout rate)
//scaleValue: which scale value to set to the left ones (unmasked items).
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
auto& us = *this;
boost::random::uniform_real_distribution<ElemType> r(0, 1);
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType v;
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
v = r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 1, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 2, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 3, j) = v <= maskRate ? 0 : scaleValue;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v = r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? 0 : scaleValue;
}
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::Adagrad(CPUMatrix<ElemType>& gradients, const bool needAveMultiplier)
{
ElemType aveMultiplier = 0;
if (IsEmpty() || gradients.GetNumCols() != GetNumCols() || gradients.GetNumRows() != GetNumRows())
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols());
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols())
LogicError("The matrix gradients must have the same rows and columns as this matrix.");
ElemType *a = Data(), *d_v = gradients.Data();
size_t n = GetNumElements();
const ElemType floor = 1e-16f;
ElemType a0, a1, a2, a3;
// disable omp here because aveMultiper needs to be added atomically. however, it seems the result is incorrect even if rmp atomic and amp critical are used.
// #pragma omp parallel for
for (long i = 0; i < (n & ~3); i += 4) // four-way unrolling
{
a[i] += d_v[i] * d_v[i];
a[i + 1] += d_v[i + 1] * d_v[i + 1];
a[i + 2] += d_v[i + 2] * d_v[i + 2];
a[i + 3] += d_v[i + 3] * d_v[i + 3];
a0 = sqrt(a[i] + floor);
a1 = sqrt(a[i + 1] + floor);
a2 = sqrt(a[i + 2] + floor);
a3 = sqrt(a[i + 3] + floor);
d_v[i] /= a0;
d_v[i + 1] /= a1;
d_v[i + 2] /= a2;
d_v[i + 3] /= a3;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0 + 1 / a1 + 1 / a2 + 1 / a3;
}
}
// get the last few elements if any
for (long i = n & ~3; i < n; i++)
{
a[i] += d_v[i] * d_v[i];
a0 = sqrt(a[i] + floor);
d_v[i] /= a0;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0;
}
}
if (needAveMultiplier && n > 0)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::FSAdagrad(CPUMatrix<ElemType>& gradients,
CPUMatrix<ElemType>& functionValues,
ElemType learnRatePerSample,
ElemType momentum,
ElemType adaWeight,
ElemType adaMul,
bool unitGainMomentum)
{
auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0);
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
if (adaSqr != 0.0f)
{
ElemType ada = sqrt(adaSqr);
ElemType w = adaMul * ((ElemType) 1.0 / ada);
if (w > 10.0f)
w = 10.0f;
g *= w;
}
if (momentum > 0.0f)
{
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
}
g *= learnRatePerSample;
val[i] -= g;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Adam(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample,
ElemType momentum, ElemType adaWeight, ElemType adaMul, ElemType epsilon, bool unitGainMomentum, bool adamax)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0);
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType ada;
if (!adamax)
{
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
ada = sqrt(adaSqr);
}
else
ada = smoothAda[i] = std::max(adaWeight * smoothAda[i], abs(g));
ElemType w = adaMul * (ElemType)( 1.0 / (ada + epsilon));
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
val[i] -= g * w * learnRatePerSample;
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::RmsProp(CPUMatrix<ElemType>& gradients,
ElemType RMS_GAMMA,
ElemType RMS_WGT_INC,
ElemType RMS_WGT_MAX,
ElemType RMS_WGT_DEC,
ElemType RMS_WGT_MIN,
const bool needAveMultiplier)
{
const ElemType floor = 1e-6f;
size_t n = gradients.GetNumElements();
ElemType* curr_grad = gradients.Data();
if (IsEmpty() || GetNumCols() < gradients.GetNumCols() * 3)
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols() * 3);
SetValue(0.0);
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* steps = Data() + 2 * n; // current step size
// initialize moving average of gradient-squared
for (long i = 0; i < n; i++)
avars[i] = curr_grad[i] * curr_grad[i];
// initialize starting step size
for (long i = 0; i < n; i++)
steps[i] = ElemType(0.02);
}
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* signs = Data() + n; // sign of previous gradient
ElemType* steps = Data() + 2 * n; // current step size
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols() * 3)
LogicError("The matrix gradients does not have expected dimensions.");
ElemType ONE_MINUS_GAMMA = ElemType(1.0) - RMS_GAMMA;
// int upd[] = {
// 2,2,0,
// 2,2,0,
// 1,1,1,
// 2,2,0,
// 1,2,1,
// 0,2,2,
// 1,1,1,
// 0,2,2,
// 0,2,2,
// };
// for (long i=0; i<n; i++)
// {
// avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
// // grad sign base 3: 0->neg, 1->zero, 2->pos
// const int grad_sign = 1 + (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
// // signs[i] contains three consecutive grad_sign
// signs[i] = 3*(int(signs[i]) % 9) + grad_sign;
// switch(upd[int(signs[i])])
// {
// case 0:
// steps[i] = max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
// break;
// case 2:
// steps[i] = min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
// break;
// }
// curr_grad[i] *= steps[i] / sqrt(avars[i] + floor);
// }
ElemType aveMultiplier = 0, a;
for (long i = 0; i < n; i++)
{
avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
const int grad_sign = (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
if (signs[i] * grad_sign > 0)
steps[i] = std::min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
else
steps[i] = std::max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
a = steps[i] / sqrt(avars[i] + floor);
curr_grad[i] *= a;
signs[i] = (ElemType) grad_sign;
if (needAveMultiplier)
aveMultiplier += a;
}
if (needAveMultiplier)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::AdaDelta(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothX2 = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = rho * smoothAda[i] + (1 - rho) * g * g;
smoothAda[i] = adaSqr;
ElemType x2 = smoothX2[i];
ElemType deltaX = -sqrt(x2 + epsilon) / sqrt(adaSqr + epsilon) * g;
smoothX2[i] = rho * smoothX2[i] + (1 - rho) * deltaX * deltaX;
val[i] += learningRate * deltaX;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Reshape(const size_t numRows, const size_t numCols)
{
if (numRows * numCols != GetNumElements())
InvalidArgument("Reshape: Total number of elements does not match.");
m_numRows = numRows;
m_numCols = numCols;
}
// RequireSize() -- Tests if the matrix is the right size. If not, resizes the matrix. This avoids the VerifyResizable check if we're already the right size.
template <class ElemType>
void CPUMatrix<ElemType>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() != numRows || GetNumCols() != numCols)
Resize(numRows, numCols, growOnly);
}
// Resize() -- change matrix size
// This function is cheap if the matrix size does not change.
// Current content is not preserved.
// If growOnly is true, resize will not reallocate memory if the current memory is large enough (i.e., will not shrink).
// If this object does not own its memory then new memory cannot be allocated (one can still shrink and/or reshape).
template <class ElemType>
void CPUMatrix<ElemType>::Resize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() == numRows && GetNumCols() == numCols)
return;
VerifyResizable(__func__);
size_t numElements = numRows * numCols;
if (numElements > GetSizeAllocated() || // grow allocation
(!growOnly && (numElements != GetSizeAllocated()))) // shrink allocation (not if 'growOnly')
{
// reallocate buffer
ElemType* pArray = nullptr;
if (numElements > 0)
{
pArray = NewArray<ElemType>(numElements);
}
// success: update the object
delete[] Buffer();
SetBuffer(pArray, numElements * sizeof(ElemType));
SetSizeAllocated(numElements);
}
// success
m_sliceViewOffset = 0;
m_numRows = numRows;
m_numCols = numCols;
}
// allocated by the callee but should be deleted by the caller
// TODO: change to use STL vector instead
template <class ElemType>
ElemType* CPUMatrix<ElemType>::CopyToArray() const
{
size_t numElements = GetNumElements();
if (numElements != 0)
{
ElemType* arrayCopyTo = NewArray<ElemType>(numElements);
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
return arrayCopyTo;
}
else
{
return nullptr;
}
}
//memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done
//return number of elements copied
template <class ElemType>
size_t CPUMatrix<ElemType>::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const
{
size_t numElements = GetNumElements();
if (numElements > currentArraySize)
{
delete arrayCopyTo;
arrayCopyTo = NewArray<ElemType>(numElements);
currentArraySize = numElements;
}
if (numElements != 0)
{
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
}
return numElements;
}
template <typename ElemType>
void CPUMatrix<ElemType>::CopySection(size_t /*numRows*/, size_t /*numCols*/, ElemType* /*dst*/, size_t /*colStride*/) const
{
// REVIEW alexeyk: currently not used by CPU, but implement when possible.
RuntimeError("Not implemented.");
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateColumn(const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(col == 0 || col < GetNumCols());
return col * m_numRows; // matrix in column-wise storage
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateElement(const size_t row, const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(row < m_numRows);
return LocateColumn(col) + row; // matrix in column-wise storage
}
#pragma endregion Basic Operators
#pragma region Member BLAS Functions
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(ElemType alpha)
{
return AssignSumOf(alpha, *this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignSumOf(alpha, *this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha + a(i, j);
us(i + 1, j) = alpha + a(i + 1, j);
us(i + 2, j) = alpha + a(i + 2, j);
us(i + 3, j) = alpha + a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha + a(i, j);
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
//if a is a scalar, add it to all elements.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(const CPUMatrix<ElemType>& a)
{
// if (a.GetNumElements() == 1)
// *this += a(0,0);
// else
ScaleAndAdd(1, a, *this);
return *this;
}
//if [this] and a have same dimension then OUTPUT=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(const CPUMatrix<ElemType>& a) const
{
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c(a);
c += (*this)(0, 0);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c(*this);
c += a(0, 0);
return c;
}
else
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c += a;
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.GetNumElements() == 1)
{
SetValue(b);
(*this) += a;
}
else
{
SetValue(a);
(*this) += b;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(ElemType alpha)
{
return AssignDifferenceOf(*this, alpha);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignDifferenceOf(*this, alpha);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha - a(i, j);
us(i + 1, j) = alpha - a(i + 1, j);
us(i + 2, j) = alpha - a(i + 2, j);
us(i + 3, j) = alpha - a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha - a(i, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const ElemType alpha)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) - alpha;
us(i + 1, j) = a(i + 1, j) - alpha;
us(i + 2, j) = a(i + 2, j) - alpha;
us(i + 3, j) = a(i + 3, j) - alpha;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) - alpha;
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(-1, a, *this);
return *this;
}
//if [this] and a have same dimension then output=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(const CPUMatrix<ElemType>& a) const
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c -= a;
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (this != &a)
{
RequireSize(a.GetNumRows(), a.GetNumCols());
SetValue(a);
}
(*this) -= b;
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator*=(ElemType alpha)
{
Scale(alpha, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
Scale(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
Scale(alpha, a, *this);
return *this;
}
// [this]=a*b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB)
{
if (a.GetNumElements() == 1)
{
if (transposeB)
AssignTransposeOf(b);
(*this) *= a(0, 0);
}
else if (b.GetNumElements() == 1)
{
if (transposeA)
AssignTransposeOf(a);
(*this) *= b(0, 0);
}
else
Multiply(a, transposeA, b, transposeB, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(const CPUMatrix<ElemType>& a) const
{
auto& us = *this;
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(us(0, 0), a);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(a(0, 0), us);
return c;
}
else
{
CPUMatrix<ElemType> c;
Multiply(*this, a, c);
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator/=(ElemType alpha)
{
(*this) *= 1 / alpha;
return (*this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator/(ElemType alpha) const
{
return ((*this) * (1 / alpha));
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator^=(ElemType alpha)
{
auto& us = *this;
ElementWisePower(alpha, us, us);
return us;
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator^(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
ElementWisePower(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementPowerOf(const CPUMatrix<ElemType>& a, const ElemType power)
{
ElementWisePower(power, a, *this);
return *this;
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
return AssignElementProductOf(*this, a);
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementDivideBy(const CPUMatrix<ElemType>& a)
{
return AssignElementDivisionOf(*this, a);
}
//[this]=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOf: The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) * b(i, j);
us(i + 1, j) = a(i + 1, j) * b(i + 1, j);
us(i + 2, j) = a(i + 2, j) * b(i + 2, j);
us(i + 3, j) = a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) * b(i, j);
}
}
return *this;
}
//[this] +=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match [this].");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) += a(i, j) * b(i, j);
us(i + 1, j) += a(i + 1, j) * b(i + 1, j);
us(i + 2, j) += a(i + 2, j) * b(i + 2, j);
us(i + 3, j) += a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) += a(i, j) * b(i, j);
}
}
return *this;
}
//[this]=a ./ b
// TODO: This clips the divisor by a small value. Is that really what one would want?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementDivisionOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementDivisionOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementDivisionOf : The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
foreach_coord (i, j, us)
{
ElemType v = b(i, j);
if (v >= 0 && v < smallValue)
us(i, j) = a(i, j) / smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) = a(i, j) / (-smallValue);
else
us(i, j) = a(i, j) / v;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= a(i, 0);
us(i + 1, j) *= a(i + 1, 0);
us(i + 2, j) *= a(i + 2, 0);
us(i + 3, j) *= a(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= a(i, 0);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= v;
us(i + 1, j) *= v;
us(i + 2, j) *= v;
us(i + 3, j) *= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
if (v >= 0 && v < EPS_IN_INVERSE)
v = EPS_IN_INVERSE;
else if (v < 0 && v > -EPS_IN_INVERSE)
v = (-EPS_IN_INVERSE);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) /= v;
us(i + 1, j) /= v;
us(i + 2, j) /= v;
us(i + 3, j) /= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) /= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
for (long i = 0; i < m; i++)
{
ElemType v = a(i, 0);
if (v >= 0 && v < smallValue)
us(i, j) /= smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) /= (-smallValue);
else
us(i, j) /= v;
}
}
return *this;
}
//[this]=1 ./ a
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementInverse()
{
return AssignElementInverseOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementInverseOf(const CPUMatrix<ElemType>& a)
{
ElemType smallValue = EPS_IN_INVERSE;
if (a.IsEmpty())
LogicError("AssignElementInverseOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) < 0 && a(i, j) > -smallValue)
us(i, j) = 1 / (-smallValue);
else if (a(i, j) >= 0 && a(i, j) < smallValue)
us(i, j) = 1 / smallValue;
else
us(i, j) = 1 / a(i, j);
}
return *this;
}
//[this]=sigmoid([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoid()
{
return AssignSigmoidOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) >= 0)
us(i, j) = 1 / (1 + exp(-a(i, j)));
else
{
ElemType v = exp(a(i, j));
us(i, j) = v / (1 + v);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLinearRectifierDerivative()
{
return AssignLinearRectifierDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLinearRectifierDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLinearRectifierDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
us(i + 1, j) = a(i + 1, j) > 0.0f ? 1.0f : 0.0f;
us(i + 2, j) = a(i + 2, j) > 0.0f ? 1.0f : 0.0f;
us(i + 3, j) = a(i + 3, j) > 0.0f ? 1.0f : 0.0f;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoidDerivative()
{
return AssignSigmoidDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
ElemType v1 = a(i + 1, j);
us(i + 1, j) = v1 * (1 - v1);
ElemType v2 = a(i + 2, j);
us(i + 2, j) = v2 * (1 - v2);
ElemType v3 = a(i + 3, j);
us(i + 3, j) = v3 * (1 - v3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
}
}
return *this;
}
//[this]=tanh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTanh()
{
return AssignTanhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignTanhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = tanh(a(i, j));
us(i + 1, j) = tanh(a(i + 1, j));
us(i + 2, j) = tanh(a(i + 2, j));
us(i + 3, j) = tanh(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = tanh(a(i, j));
}
}
return *this;
}
//[this]=softmax([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLogSoftmax(const bool isColWise)
{
return AssignLogSoftmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogSoftmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignLogSoftmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(0, j);
foreach_row (i, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_row (i, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_row (i, us)
us(i, j) -= sum;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(i, 0);
foreach_column (j, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_column (j, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_column (j, us)
us(i, j) -= sum;
}
}
return *this;
}
//[this]=hardmax([this])
//the max element is 1 else is 0
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceHardmax(const bool isColWise)
{
return AssignHardmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignHardmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignHardmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max
ElemType maxV = a(0, j);
long maxI = 0;
foreach_row (i, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxI = i;
}
}
foreach_row (i, us)
us(i, j) = (i == maxI) ? 1.0f : 0.0f;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max
ElemType maxV = a(i, 0);
long maxJ = 0;
foreach_column (j, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxJ = j;
}
}
foreach_column (j, us)
us(i, j) = (j == maxJ) ? 1.0f : 0.0f;
}
}
return *this;
}
//[this]=sqrt([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSqrt()
{
return AssignSqrtOf(*this);
}
//to prevent negative values caused by floating operations, we force inputs to be >=0
//this may, however, hide problems in the caller.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSqrtOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSqrtOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
us(i + 1, j) = sqrt(max((ElemType)0, a(i + 1, j)));
us(i + 2, j) = sqrt(max((ElemType)0, a(i + 2, j)));
us(i + 3, j) = sqrt(max((ElemType)0, a(i + 3, j)));
}
// remaining
for (long i = m & ~3; i < m; i++)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceExp()
{
return AssignExpOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignExpOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignExpOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = exp(a(i, j));
us(i + 1, j) = exp(a(i + 1, j));
us(i + 2, j) = exp(a(i + 2, j));
us(i + 3, j) = exp(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = exp(a(i, j));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAbs()
{
return AssignAbsOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAbsOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAbsOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = abs(a(i, j));
us(i + 1, j) = abs(a(i + 1, j));
us(i + 2, j) = abs(a(i + 2, j));
us(i + 3, j) = abs(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = abs(a(i, j));
}
}
return *this;
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog()
{
return AssignLogOf(*this);
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog10()
{
return AssignLog10Of(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v < EPS_IN_LOG)
{
us(i, j) = LOG_OF_EPS_IN_LOG;
}
else
us(i, j) = log(v);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLog10Of(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v <= 0)
LogicError("AssignLogOf: Log can only applied to numbers larger than 0.");
else if (v < EPS_IN_LOG)
{
us(i, j) = LOG10_OF_EPS_IN_LOG;
}
else
us(i, j) = log10(v);
}
return *this;
}
//[this]=cos([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosine()
{
return AssignCosineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCosineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = cos(v);
}
return *this;
}
//[this]=-sin([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceNegativeSine()
{
return AssignNegativeSineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNegativeSineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = -sin(v);
}
return *this;
}
//Threshold truncating: this[i] = max( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateBottom(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateBottom: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
if (us(i + 1, j) < threshold)
us(i + 1, j) = threshold;
if (us(i + 2, j) < threshold)
us(i + 2, j) = threshold;
if (us(i + 3, j) < threshold)
us(i + 3, j) = threshold;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncate(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
auto& us = *this;
ElemType locThresholdPos = abs(threshold);
ElemType locTHresholdNeg = -locThresholdPos;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
if (us(i + 1, j) > locThresholdPos)
us(i + 1, j) = locThresholdPos;
else if (us(i + 1, j) < locTHresholdNeg)
us(i + 1, j) = locTHresholdNeg;
if (us(i + 2, j) > locThresholdPos)
us(i + 2, j) = locThresholdPos;
else if (us(i + 2, j) < locTHresholdNeg)
us(i + 2, j) = locTHresholdNeg;
if (us(i + 3, j) > locThresholdPos)
us(i + 3, j) = locThresholdPos;
else if (us(i + 3, j) < locTHresholdNeg)
us(i + 3, j) = locTHresholdNeg;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
}
}
return *this;
}
//x= x-threshold if x>threshold, x+threshold if x<-threshold, 0 otherwise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSoftThreshold(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
#pragma omp parallel for
for (long i = 0; i < (m & ~3); i += 4) // four-way unrolling
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
if (bufPtr[i + 1] > threshold)
bufPtr[i + 1] -= threshold;
else if (bufPtr[i + 1] < -threshold)
bufPtr[i + 1] += threshold;
else
bufPtr[i + 1] = 0;
if (bufPtr[i + 2] > threshold)
bufPtr[i + 2] -= threshold;
else if (bufPtr[i + 2] < -threshold)
bufPtr[i + 2] += threshold;
else
bufPtr[i + 2] = 0;
if (bufPtr[i + 3] > threshold)
bufPtr[i + 3] -= threshold;
else if (bufPtr[i + 3] < -threshold)
bufPtr[i + 3] += threshold;
else
bufPtr[i + 3] = 0;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
}
return *this;
}
//Threshold truncating: this[i] = max( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateBottomOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateBottomOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) < threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = min( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateTop(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateTop: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) > threshold)
us(i, j) = threshold;
}
return *this;
}
//Threshold truncating: this[i] = min( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateTopOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateTopOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) > threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = 0 if abs(this[i]<threshold).
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetToZeroIfAbsLessThan(const ElemType threshold)
{
if (IsEmpty())
LogicError("SetToZeroIfAbsLessThan: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (abs(us(i, j)) < threshold)
us(i, j) = 0;
}
return *this;
}
//sum of all abs(elements)
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfAbsElements() const
{
if (IsEmpty())
LogicError("SumOfAbsElements: Matrix is empty.");
if (sizeof(ElemType) == sizeof(double))
{
return (ElemType) cblas_dasum((int) GetNumElements(), reinterpret_cast<double*>(Data()), 1);
}
else
{
#pragma warning(suppress : 4244)
return cblas_sasum((int) GetNumElements(), reinterpret_cast<float*>(Data()), 1);
}
}
//sum of all elements
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfElements() const
{
if (IsEmpty())
LogicError("SumOfElements: Matrix is empty.");
ElemType sum = 0;
long m = (long) GetNumElements(); // note: OpenMP requires loop indices to be long, not size_t
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : sum)
for (long i = 0; i < (m & ~3); i += 4)
{
sum += bufPtr[i] + bufPtr[i + 1] + bufPtr[i + 2] + bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
sum += bufPtr[i];
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOfElements(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOfElements: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.SumOfElements();
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignOneHot(const CPUMatrix<ElemType>& a, vector<size_t>& shape, size_t axis)
{
if (a.IsEmpty())
LogicError("AssignOneHot: Matrix a is empty.");
if (axis >= shape.size())
LogicError("AssignOneHot: axis is not correct");
size_t item_size = 1;
for (size_t i = 0; i < shape.size() && i < axis; i++)
item_size *= shape[i];
size_t num_class = shape[axis];
auto& us = *this;
auto nCols = a.GetNumCols();
auto nRows = num_class * a.GetNumRows();
us.RequireSize(nRows, nCols);
ElemType* bufPtr = Data();
ElemType* aBufPtr = a.Data();
memset(bufPtr, 0, sizeof(ElemType) * nRows *nCols);
#pragma omp parallel for
for (long i = 0; i < a.GetNumElements(); i++)
{
if (aBufPtr[i] >= 0 && aBufPtr[i] < num_class)
{
size_t block_id = i / item_size;
size_t item_id = i % item_size;
bufPtr[block_id * num_class * item_size + item_id + item_size * (size_t)aBufPtr[i]] = 1;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GatherFromTarget(const CPUMatrix<ElemType>& indices, const CPUMatrix<ElemType>& target, size_t row_elements)
{
if (indices.IsEmpty() || target.IsEmpty())
LogicError("GatherFromTarget: input matrix is empty.");
if (row_elements == 0)
LogicError("GatherFromTarget: target matrix at least need 1 dim.");
auto nCols = indices.GetNumCols();
auto nRows = indices.GetNumRows() * row_elements;
this->RequireSize(nRows, nCols);
ElemType* indicesBufPtr = indices.Data();
ElemType* targetBufPtr = target.Data();
ElemType* buffer = Data();
#pragma omp parallel for
for (int i = 0; i < indices.GetNumElements(); i++)
{
memcpy(buffer + i * row_elements, targetBufPtr + ((size_t)indicesBufPtr[i] * row_elements), sizeof(ElemType) * row_elements);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ScatterToIndices(const CPUMatrix<ElemType>& values, const CPUMatrix<ElemType>& indices, size_t row_elements)
{
if (indices.IsEmpty() || values.IsEmpty())
LogicError("ScatterToIndices: input matrix is empty.");
ElemType* indicesBufPtr = indices.Data();
ElemType* valueBufPtr = values.Data();
ElemType* buffer = Data();
ScatterValues(indicesBufPtr, valueBufPtr, buffer, (ElemType)1, indices.GetNumElements(), row_elements, this->GetNumCols());
return *this;
}
template <class ElemType>
bool CPUMatrix<ElemType>::IsEqualTo(const CPUMatrix<ElemType>& a, const ElemType threshold /*= 1e-8*/) const
{
return AreEqual(*this, a, threshold);
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorSum(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty())
LogicError("VectorSum: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, a)
{
ElemType v = 0;
foreach_row (i, a)
{
#pragma omp atomic
v += a(i, j);
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, a)
{
ElemType v = 0;
foreach_column (j, a)
{
#pragma omp atomic
v += a(i, j);
}
c(i, 0) = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm1(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm1: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm1Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm1(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm2(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm2: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
ElemType* bufPtr = us.Data();
if (isColWise) // col-wise
{
c.RequireSize(1, n);
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_dnrm2(m, reinterpret_cast<double*>(bufPtr + us.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = cblas_snrm2(m, reinterpret_cast<float*>(bufPtr + us.LocateColumn(j)), 1);
}
}
}
else
{
c.RequireSize(m, 1);
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_dnrm2(n, reinterpret_cast<double*>(bufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_snrm2(n, reinterpret_cast<float*>(bufPtr + i), m);
}
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm2Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm2(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNormInf(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNormInf: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
// #pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
v = std::max(v, abs(us(i, j)));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
// #pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
v = std::max(v, abs(us(i, j)));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNormInfOf(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNormInf(*this, isColWise);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignInnerProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool isColWise)
{
InnerProduct(a, b, *this, isColWise);
return *this;
}
//column-wise crossproduct
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignKhatriRaoProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignKhatriRaoProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
RequireSize(rowsA * rowsB, cols);
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
for (long k = 0; k < cols; k++)
{
long jj = 0;
for (long j = 0; j < rowsB; j++)
{
for (long i = 0; i < rowsA; i++)
{
(*this)(jj++, k) = a(i, k) * b(j, k);
}
}
}
return *this;
}
//column-wise reshaped product. Used to compute KhatriRaoProduct Gradient
// this = reshape each column of a from (K1xK2,1) to (K1, K2)
// if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames).
// the output is a (K1, frames) matrix
// if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddColumnReshapeProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool transposeAColumn)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddColumnReshapeProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
if (rowsA % rowsB != 0)
InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b.");
long rowsC = rowsA / rowsB;
if (rowsC != GetNumRows() || cols != GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size.");
auto& us = *this;
if (transposeAColumn)
{
// find nrows and ncols of tbe reshaped a
long nrows = rowsB;
long ncols = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++) // row and col is transposed
{
ElemType v = 0;
for (size_t i = 0; i < nrows; i++)
{
v += a(k, t) * b(i, t);
k++;
}
us(j, t) += v;
}
}
}
else
{
size_t ncols = rowsB;
size_t nrows = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++)
{
for (size_t i = 0; i < nrows; i++)
{
us(i, t) += a(k, t) * b(j, t);
k++;
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithScaleOf(ElemType alpha, const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(alpha, a, *this);
return *this;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::FrobeniusNorm() const
{
if (IsEmpty())
LogicError("FrobeniusNorm: Matrix is empty.");
ElemType v = 0;
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : v)
for (long i = 0; i < (m & ~3); i += 4)
{
v += bufPtr[i] * bufPtr[i] + bufPtr[i + 1] * bufPtr[i + 1] + bufPtr[i + 2] * bufPtr[i + 2] + bufPtr[i + 3] * bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v += bufPtr[i] * bufPtr[i];
}
return sqrt(v);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignFrobeniusNormOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignFrobeniusNormOf: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.FrobeniusNorm();
return us;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNormInf() const
{
if (IsEmpty())
LogicError("MatrixNormInf: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
#pragma omp critical
{
v = std::max(v, abs(us(i, j)));
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm0() const
{
if (IsEmpty())
LogicError("MatrixNorm0: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) != 0)
{
#pragma omp critical
{
++v;
}
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm1() const
{
if (IsEmpty())
LogicError("MatrixNorm1: Matrix is empty.");
auto& us = *this;
ElemType sum = 0;
#pragma omp parallel for reduction(+ : sum)
foreach_coord (i, j, us)
{
sum += abs(us(i, j));
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) = (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AddSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) += (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
//I decided to use CPUMatrix<ElemType>& maxIndexes instead of integer vector because the result may be used to do additional calculation
template <class ElemType>
void CPUMatrix<ElemType>::VectorMax(CPUMatrix<ElemType>& maxIndexes, CPUMatrix<ElemType>& maxValues, const bool isColWise, int topK) const
{
if (IsEmpty())
LogicError("VectorMax: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
if (topK > m)
InvalidArgument("VectorMax: TopK must be less or equal than the number of rows");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
maxValues.RequireSize(topK, n);
maxIndexes.RequireSize(topK, n);
if (topK == 1)
{
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v < us(i, j))
{
index = i;
v = us(i, j);
}
}
maxValues(0, j) = v;
maxIndexes(0, j) = (ElemType) index;
}
}
else
{
std::vector<int> indices(m);
int i = 0;
std::generate(indices.begin(), indices.end(), [&i]
{
return i++;
});
const ElemType* curVal = Data();
ElemType* curIdx = maxIndexes.Data();
ElemType* curMax = maxValues.Data();
for (int icol = 0; icol < n; icol++, curVal += m, curIdx += topK, curMax += topK)
{
// Partial sort, descending order.
std::nth_element(indices.begin(), indices.begin() + topK, indices.end(),
[curVal](const int& a, const int& b)
{
return curVal[a] > curVal[b];
});
// REVIEW alexeyk: the following produces warning (see SCL_SECURE_NO_WARNINGS) so use loop instead.
// std::transform(indices.begin(), indices.begin() + topK, curIdx, [](const int& a) { return static_cast<ElemType>(a); });
for (int i2 = 0; i2 < topK; i2++)
{
curIdx[i2] = static_cast<ElemType>(indices[i2]);
curMax[i2] = curVal[indices[i2]];
}
}
}
}
else
{
if (topK > 1)
RuntimeError("Row-wise TopK max is not supported.");
maxValues.RequireSize(m, 1);
maxIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v < us(i, j))
{
index = j;
v = us(i, j);
}
}
maxValues(i, 0) = v;
maxIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorMin(CPUMatrix<ElemType>& minIndexes, CPUMatrix<ElemType>& minValues, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorMin: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
minValues.RequireSize(1, n);
minIndexes.RequireSize(1, n);
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v > us(i, j))
{
index = i;
v = us(i, j);
}
}
minValues(0, j) = v;
minIndexes(0, j) = (ElemType) index;
}
}
else
{
minValues.RequireSize(m, 1);
minIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v > us(i, j))
{
index = j;
v = us(i, j);
}
}
minValues(i, 0) = v;
minIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNumOfDiff(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, bool searchInCol)
{
if (a.GetNumCols() != b.GetNumCols())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of columns.");
if (!searchInCol && a.GetNumRows() != b.GetNumRows())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of rows.");
ElemType n = 0;
if (!searchInCol)
{
foreach_coord (i, j, a)
{
n += (a(i, j) != b(i, j));
}
}
else
{
size_t crow = b.GetNumRows();
const ElemType* curCol = b.Data();
for (size_t icol = 0; icol < a.GetNumCols(); icol++, curCol += crow)
{
auto res = std::find(curCol, curCol + crow, a(0, icol));
if (res == curCol + crow)
n++;
}
}
RequireSize(1, 1); // result should be one element
(*this)(0, 0) = n;
return *this;
}
#pragma endregion Member BLAS Functions
#pragma region Other helper Functions
struct PrintRange
{
// print from begin to skipBegin, then from skipEnd to end
// skipBegin = end if no split
size_t begin;
size_t skipBegin;
size_t skipEnd;
size_t end;
bool IsEmpty() const { return end <= begin; }
// examples:
// * 3..10
// * -3..-3: include end-3..end and 0..3
PrintRange(ptrdiff_t first, ptrdiff_t last, size_t total)
{
if (first >= 0 && last >= 0)
{
begin = (size_t)first;
end = (size_t)last + 1;
if (end > total) // allow INT_MAX, meaning to end
end = total;
skipBegin = end;
skipEnd = end;
}
else if (first < 0 && last < 0)
{
begin = 0;
skipBegin = (size_t)(-last);
skipEnd = (size_t)(total + first);
if (skipEnd <= skipBegin)
skipBegin = skipEnd = total;
end = total;
}
else // if other combinations are ever of interest then implement them here
LogicError("Print: Bounds must be either both positive or both negative.");
}
};
// use negative ranges to print corners, e.g. Print("name", -3, -3, -3, -3) will print the first 3 and last 3 rows/cols
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName, ptrdiff_t rowFirst, ptrdiff_t rowLast, ptrdiff_t colFirst, ptrdiff_t colLast) const
{
fprintf(stderr, "\n###### ");
if (matrixName != nullptr)
fprintf(stderr, "%s ", matrixName);
fprintf(stderr, "(%lu, %lu)", (unsigned long)GetNumRows(), (unsigned long)GetNumCols());
if (rowFirst != 0 || colFirst != 0 || (size_t)(rowLast + 1) != GetNumRows() || (size_t)(colLast + 1) != GetNumCols())
fprintf(stderr, " [%ld:%ld, %ld:%ld]", (long)rowFirst, (long)rowLast, (long)colFirst, (long)colLast);
fprintf(stderr, " ######\n\n");
if (IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
PrintRange rowRange(rowFirst, rowLast, GetNumRows());
PrintRange colRange(colFirst, colLast, GetNumCols());
if (rowRange.IsEmpty() || colRange.IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
const auto& us = *this;
if (rowRange.begin > 0)
fprintf(stderr, "...\n");
for (size_t i = rowRange.begin; i < rowRange.end; i++)
{
if (i == rowRange.skipBegin) // insert ... between the two blocks if any
{
fprintf(stderr, "...\n");
i = rowRange.skipEnd;
}
if (colRange.begin > 0) // ... at line start
fprintf(stderr, "...\t");
for (size_t j = colRange.begin; j < colRange.end; j++)
{
if (j == colRange.skipBegin)
{
fprintf(stderr, "...\t");
j = colRange.skipEnd;
}
fprintf(stderr, "%.10f\t", us(i, j));
}
if (colRange.end < GetNumCols()) // ... at line end
fprintf(stderr, "...");
fprintf(stderr, "\n");
}
if (rowRange.end < GetNumRows())
fprintf(stderr, "...\n");
}
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName /*=nullptr*/) const
{
Print(matrixName, 0, GetNumRows() - 1, 0, GetNumCols() - 1);
}
// file I/O
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::ReadFromFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::WriteToFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPackedConvolutionInput(const CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding)
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horitzontalSubsample) must be less or equal than kernelHeight (or kernelWidth).");
const size_t packedInputRows = kernelWidth * kernelHeight * inputChannels;
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize);
if (zeroPadding)
SetValue((ElemType) 0);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1.0f + halfKernelHeight) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1.0f + halfKernelWidth) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
(*this)(packRow, packCol) = currentInputValue;
}
packColBase += (long) outputHeight;
}
}
}
return *this;
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::UnpackConvolutionInput(CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding) const
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horizonSubsample) must be less than or equal to kernelHeight (or kernelWidth).");
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1.0f + halfKernelHeight) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1.0f + halfKernelWidth) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
currentInputValue += (*this)(packRow, packCol);
}
packColBase += (long) outputHeight;
}
inputSubBatch(id, sample) = currentInputValue;
}
}
return inputSubBatch;
}
//assume each column is an input sample. Each sample is stored in (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignMaxPoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < (long) batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType maxVal = -FLT_MAX;
ElemType minVal = FLT_MAX;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
const ElemType val = inputBatch(rowInInput, sample); // pf[rowInWindow*channels];
maxVal = std::max(maxVal, val);
minVal = std::min(minVal, val);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = maxVal;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddMaxPoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const CPUMatrix<ElemType>& inputBatch, const CPUMatrix<ElemType>& outputBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = inputBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = (long) (nXC / channels); // row in input
const long c = (long) (nXC % channels); // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
ElemType inputValue = inputBatch(inputIndexWithinSample, sample);
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = (long) (outY * outputHeightTimesChannel + outX * channels + c);
if (inputValue == outputBatch(outputIndex, sample))
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAveragePoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
const size_t windowSize = windowWidth * windowHeight;
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType sum = 0;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
sum += inputBatch(rowInInput, sample);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = sum / windowSize;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddAveragePoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = outputGradientBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const long windowSize = (long) (windowWidth * windowHeight);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = nXC / (long) channels; // row in input
const long c = nXC % (long) channels; // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / (long) verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = outY * outputHeightTimesChannel + outX * (long) channels + c;
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample) / windowSize;
}
}
}
}
return *this;
}
#pragma endregion Other Helper Functions
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionForward(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += kernel.Data()[ivBase + skip + i] * (*this)(colBase + dcol, sample);
}
output(row, sample) = sum;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardData(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
grad(colBase + dcol, sample) += curGrad * kernel.Data()[ivBase + skip + i];
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardKernel(const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& kernelGrad) const
{
// Do NOT parallelize these loops!
for (size_t sample = 0; sample < GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < in.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < in.GetNumRows());
kernelGrad.Data()[ivBase + skip + i] += curGrad * in(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInput(size_t unrollCols, size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
output.Data()[(row * batchSize + sample) * unrollCols + skip + i] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionOutput(size_t unrollCols, size_t mapInCount, size_t mapOutCount, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
if (mpRowCol.GetNumRows() % mapOutCount != 0)
InvalidArgument("The number of rows in mpRowCol must be multiple of mapOutCount.");
size_t mapOutSize = mpRowCol.GetNumRows() / mapOutCount;
size_t batchSize = GetNumCols();
size_t kernelSize = runs(1, 0);
if (kernelSize % mapInCount != 0)
InvalidArgument("kernelSize must be multiple of mapInCount.");
size_t kernelMapSize = kernelSize / mapInCount;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < std::min(size, (int)kernelMapSize); i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
size_t isrc = row;
size_t idst = ((colBase + dcol) * batchSize + sample) * unrollCols + ((skip + i) % kernelMapSize) * mapOutCount;
for (size_t outMap = 0; outMap < mapOutCount; outMap++, isrc += mapOutSize)
{
assert(isrc < GetNumElements());
assert(idst + outMap < output.GetNumElements());
output.Data()[idst + outMap] = (*this)(isrc, sample);
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInputForKernelBackprop(size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
size_t unrollCols = mapOutSize * batchSize;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
size_t idst = (skip + i) * unrollCols + row * batchSize + sample;
assert(idst < output.GetNumElements());
output.Data()[idst] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
assert(std::numeric_limits<ElemType>::has_infinity);
ElemType res = -std::numeric_limits<ElemType>::infinity();
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
res = std::max(res, (*this)(colBase + dcol, sample));
}
output(row, sample) = res;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingBackward(const CPUMatrix<ElemType>& out, const CPUMatrix<ElemType>& in,
const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices,
CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType g = (*this)(row, sample);
ElemType m = out(row, sample);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
if (in(colBase + dcol, sample) >= m)
{
#pragma omp atomic
grad(colBase + dcol, sample) += g;
break;
}
}
}
}
}
// For each image, for each ROI, this function treats that ROI as an image
// and does max pooling so that it has output size pooledHeight x pooledWidth.
// It loops over each location in the output tensor, computes which ROI
// and image should populate that location, computes the subset of the image
// corresponding to the ROI and which pixels in that subset should go into the
// output location, then takes the max value over that window.
// src: Images [W x H x C x N]
// roiData: ROIs [4 x numROIs x N],
// dst: Pooled ROIs [PW x PH x C x numROIs x N]
// argmax: max positions [PW x PH x C x numROIs x N]
// where PW = Pooled Width, PH = Pooled Height, C = Channels, N = Batch Size
template <class ElemType>
void CPUMatrix<ElemType>::ROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& output,
CPUMatrix<ElemType>& argmax) const
{
size_t roiOutputSize = pooledHeight * pooledWidth * channels;
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
auto img = ColumnSlice(imgIdx, 1);
auto rois = roiData.ColumnSlice(imgIdx, 1);
#pragma omp parallel for
for (int roiIdx = 0; roiIdx < numRois; roiIdx++)
{
// each ROI is 4 elements: (x, y, w, h).
int base = roiIdx * 4;
// scaled ROI numbers (relative to original image size)
// roi points are doubles that represent location relative to image
ElemType scX = rois(base, (ElemType)0);
ElemType scY = rois(base + (ElemType)1, (ElemType)0);
ElemType scW = rois(base + (ElemType)2, (ElemType)0);
ElemType scH = rois(base + (ElemType)3, (ElemType)0);
// compute actual spatial location of the ROI in our featuremap.
size_t x = (size_t)round(scX * width);
size_t y = (size_t)round(scY * height);
ElemType roiW = (ElemType)max(round(scW * width), (ElemType)1);
ElemType roiH = (ElemType)max(round(scH * height), (ElemType)1);
const ElemType winW = roiW / (ElemType)pooledWidth;
const ElemType winH = roiH / (ElemType)pooledHeight;
// inspired by Ross Girshick fast-rcnn caffe cpu: https://github.com/rbgirshick/fast-rcnn
// loop over spatial locations in output.
#pragma omp parallel for
for (int outw = 0; outw < pooledWidth; outw++)
{
for (int outh = 0; outh < pooledHeight; outh++)
{
// compute the top left corner of the input
// spatial window corresponding to this output unit
size_t hstart = (size_t)floor(outh * winH);
size_t wstart = (size_t)floor(outw * winW);
// compute bottom right corner (not included)
size_t hend = (size_t)ceil((outh + 1) * winH);
size_t wend = (size_t)ceil((outw + 1) * winW);
// offset window based on ROI top left corner.
// these indices are into the input slice.
hstart = min(max(hstart + y, (size_t)0), height);
wstart = min(max(wstart + x, (size_t)0), width);
hend = min(max(hend + y, (size_t)0), height);
wend = min(max(wend + x, (size_t)0), width);
bool isempty = (hend <= hstart) || (wend <= wstart);
for (size_t c = 0; c < channels; c++)
{
// [W x H x C x R x N]; R = ROIs per image
size_t outputIdx = roiIdx * roiOutputSize + outw + outh * pooledWidth + c * pooledHeight * pooledWidth;
size_t maxidx = 0;
ElemType maxval = isempty ? (ElemType)0 : -FLT_MAX;
size_t baseIdx = c * height * width;
for (size_t h = hstart; h < hend; h++)
{
for (size_t w = wstart; w < wend; w++)
{
// stored argmax indices are relative to the current channel.
size_t dataIdx = w + h * width;
if (img(baseIdx + dataIdx, 0) > maxval)
{
maxval = img(baseIdx + dataIdx, 0);
maxidx = dataIdx;
}
}
}
output(outputIdx, imgIdx) = maxval;
argmax(outputIdx, imgIdx) = maxidx;
}
}
}
}
}
}
// This function loops over locations in the input to the ROIPoolingNode (image locations).
// It loops over the ROIs corresponding to that image, seeing which ones could contain the current location
// in their output. For each ROI, it checks the argmax data to see if that ROI indeed chose
// this pixel location as the maximum. If so, it increments the gradient term for the input location.
template <class ElemType>
void CPUMatrix<ElemType>::ROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& grad,
CPUMatrix<ElemType>& argmax) const
{
// loop over images in the batch.
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
// ROIs for this image. length 4*numRois;
auto rois = roiData.ColumnSlice(imgIdx, 1).Data();
// gradient values for all ROIs from this image. length numRois*pooledHeight*pooledWidth*channels;
auto pooledGrad = ColumnSlice(imgIdx, 1).Data();
auto argmaxCol = argmax.ColumnSlice(imgIdx, 1).Data();
// loop over spatial locations in the image.
#pragma omp parallel for
for (int w = 0; w < width; w++)
{
#pragma omp parallel for
for (int h = 0; h < width; h++)
{
// loop over the ROIs seeing which ones contain this location.
for (int roiN = 0; roiN < numRois; roiN++)
{
// each ROI is 4 elements: (x, y, w, h).
int roiOffset = roiN * 4;
// ROI data is relative to original image size
size_t roiStartW = (size_t)round(rois[roiOffset + 0] * width);
size_t roiStartH = (size_t)round(rois[roiOffset + 1] * height);
size_t roiWidth = max((size_t)round(rois[roiOffset + 2] * width), (size_t)1);
size_t roiHeight = max((size_t)round(rois[roiOffset + 3] * height), (size_t)1);
// skip this ROI if it doesn't contain the current input location.
const bool inROI = (w >= roiStartW && w < roiStartW + roiWidth &&
h >= roiStartH && h < roiStartH + roiHeight);
if (!inROI)
continue;
ElemType winH = (ElemType)roiHeight / (ElemType)pooledHeight;
ElemType winW = (ElemType)roiWidth / (ElemType)pooledWidth;
// what pooled nodes in the output for this ROI could have pooled this input location?
size_t phstart = (size_t)((h - roiStartH) / winH);
size_t pwstart = (size_t)((w - roiStartW) / winW);
size_t phend = (size_t)(ceil((h - roiStartH + 1) / winH));
size_t pwend = (size_t)(ceil((w - roiStartW + 1) / winW));
phstart = min(max(phstart, (size_t)0), pooledHeight);
phend = min(max(phend, (size_t)0), pooledHeight);
pwstart = min(max(pwstart, (size_t)0), pooledWidth);
pwend = min(max(pwend, (size_t)0), pooledWidth);
for (size_t c = 0; c < channels; c++)
{
ElemType gradient = 0;
// [W x H x C x N]
size_t index = w + h*width + c*height*width;
// go right up to channel c of the current ROI.
size_t offset = (roiN * channels + c) * pooledWidth * pooledHeight;
const ElemType* offsetPoolGrad = pooledGrad + offset;
const ElemType* offsetArgmax = argmaxCol + offset;
for (size_t ph = phstart; ph < phend; ph++)
{
for (size_t pw = pwstart; pw < pwend; pw++)
{
if ((size_t)offsetArgmax[ph * pooledWidth + pw] == (w + h * width))
gradient += offsetPoolGrad[ph * pooledWidth + pw];
}
}
grad(index, imgIdx) = gradient;
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices,
const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& poolInput,
CPUMatrix<ElemType>& input) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < input.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType curMax = poolInput(colBase + indices(i0, 0), sample);
ElemType prevMax = curMax;
int imax = 0;
for (int i = 1; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < poolInput.GetNumRows());
curMax = std::max(curMax, poolInput(colBase + dcol, sample));
if (curMax > prevMax)
{
prevMax = curMax;
imax = i;
}
}
int dcol = indices(i0 + imax, 0);
assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
input(colBase + dcol, sample) = (*this)(row, sample);
//int i = (int)poolIn(row, sample);
//assert(0 <= i && i < size);
//int dcol = indices(i0 + i, 0);
//assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
//input(colBase + dcol, sample) = (*this)(row, sample);
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += (*this)(colBase + dcol, sample);
}
// Note that we divide by size which is the number of actual elements (does not include padding).
// if poolIncludePad == true, use avg_pool_include_pad
if (poolIncludePad)
size = indices(0, 0);
output(row, sample) = sum / size;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingBackward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
int tmp = size;
if (poolIncludePad)
size = indices(0, 0);
assert(size > 0);
ElemType g = (*this)(row, sample) / size;
size = tmp;
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
#pragma omp atomic
grad(colBase + dcol, sample) += g;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchNormalizationForward(const CPUMatrix<ElemType>& scale, const CPUMatrix<ElemType>& bias, bool inferenceOnly, double expAvgFactor, double blendFactor,
CPUMatrix<ElemType>& runMean, CPUMatrix<ElemType>& runVariance, CPUMatrix<ElemType>& out, double epsilon,
CPUMatrix<ElemType>& saveMean, CPUMatrix<ElemType>& saveInvStdDev) const
{
if (GetNumRows() % scale.GetNumRows() != 0)
LogicError("The number of rows of this matrx must be multiple of the number of rows of the scale matrix.");
if (!inferenceOnly || expAvgFactor != 0 || blendFactor != 1)
RuntimeError("Batch normalization training on CPU is not yet implemented.");
saveMean.Resize(0, 0); // only doing inference: these two are not produced
saveInvStdDev.Resize(0, 0);
bool spatial = GetNumRows() != scale.GetNumRows();
if (spatial)
{
size_t spatialSize = GetNumRows() / scale.GetNumRows();
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
size_t imap = irow / spatialSize;
ElemType stdDev = sqrt(runVariance(imap, 0) + epsilon);
out(irow, icol) = scale(imap, 0) * ((*this)(irow, icol) - runMean(imap, 0)) / stdDev + bias(imap, 0);
}
}
}
else
{
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
ElemType stdDev = sqrt(runVariance(irow, 0) + epsilon);
out(irow, icol) = scale(irow, 0) * ((*this)(irow, icol) - runMean(irow, 0)) / stdDev + bias(irow, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchNormalizationBackward(const CPUMatrix<ElemType>& in, CPUMatrix<ElemType>& grad, const CPUMatrix<ElemType>& scale, double blendFactor,
const CPUMatrix<ElemType>& saveMean, const CPUMatrix<ElemType>& saveInvStdDev,
CPUMatrix<ElemType>& scaleGrad, CPUMatrix<ElemType>& biasGrad) const
{
UNUSED(in); UNUSED(grad); UNUSED(scale); UNUSED(blendFactor), UNUSED(saveMean); UNUSED(saveInvStdDev); UNUSED(scaleGrad); UNUSED(biasGrad);
RuntimeError("Batch normalization training on CPU is not yet implemented.");
}
#pragma region Static BLAS Functions
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = alpha * op(a) * op(b) + beta*c</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="beta">Scalar</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
ElemType beta, CPUMatrix<ElemType>& c, shared_ptr<QuantizedMultiplier<ElemType>> pQuantizedMultiplier)
{
if (a.IsEmpty() || b.IsEmpty())
return;
int m, n, k, l;
int lda, ldb, ldc;
CBLAS_TRANSPOSE mklTransA;
CBLAS_TRANSPOSE mklTransB;
if (transposeA)
{
m = (int) a.GetNumCols();
k = (int) a.GetNumRows();
lda = k;
mklTransA = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
m = (int) a.GetNumRows();
k = (int) a.GetNumCols();
lda = m;
mklTransA = CBLAS_TRANSPOSE::CblasNoTrans;
}
if (transposeB)
{
l = (int) b.GetNumCols();
n = (int) b.GetNumRows();
ldb = n;
mklTransB = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
l = (int) b.GetNumRows();
n = (int) b.GetNumCols();
ldb = l;
mklTransB = CBLAS_TRANSPOSE::CblasNoTrans;
}
assert(m > 0 && k > 0 && l > 0 && n > 0); // converting from size_t to int may cause overflow
if (k != l)
InvalidArgument("CPUMatrix<ElemType>::MultiplyAndWeightedAdd : The inner dimensions of a and b must match.");
if (beta == 0)
c.RequireSize(m, n);
else
c.VerifySize(m, n); // Can't resize if beta != 0
ldc = (int) c.GetNumRows();
if (pQuantizedMultiplier == nullptr)
{
if (sizeof(ElemType) == sizeof(double))
{
cblas_dgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<double*>(a.Data()), lda, reinterpret_cast<double*>(b.Data()), ldb, beta, reinterpret_cast<double*>(c.Data()), ldc);
}
else
{
#pragma warning(suppress : 4244)
cblas_sgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<float*>(a.Data()), lda, reinterpret_cast<float*>(b.Data()), ldb, beta, reinterpret_cast<float*>(c.Data()), ldc);
}
}
else
{
// TODO: support transpose product
if (mklTransA == CBLAS_TRANSPOSE::CblasTrans || mklTransB == CBLAS_TRANSPOSE::CblasTrans)
LogicError("Quantized multiplier currently doesn't support transpose.");
pQuantizedMultiplier->Multiply(m, n, k, a.Data(), b.Data(), c.Data());
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Multiply1x1AndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b,
ElemType beta, CPUMatrix<ElemType>& c)
{
if (a.GetNumElements() != 1)
InvalidArgument("the argument a must be a scalar"); // a is a scalar
ElemType f = alpha * a.Get00Element();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f;
else
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f + c(i, j) * beta;
}
template <class ElemType>
void CPUMatrix<ElemType>::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& v, ElemType beta, CPUMatrix<ElemType>& c)
{
if (v.GetNumRows() != 1 && v.GetNumCols() != 1)
InvalidArgument("the argument v must be a vector"); // v is a vector
if (beta == 0)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
else
c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0
const ElemType* vd = v.Data();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j];
else
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j] + c(i, j) * beta;
}
/* compute singular value decomposition as
A = U*SIGMA*VT
W is used as temp working memory
*/
template <class ElemType>
void CPUMatrix<ElemType>::SVD(const CPUMatrix<ElemType>& A, CPUMatrix<ElemType>& SIGMA, CPUMatrix<ElemType>& U, CPUMatrix<ElemType>& VT, CPUMatrix<ElemType>& W)
{
if (A.IsEmpty())
LogicError("SVD: input matrix is empty.");
int info;
int m, n, lda, ldu, ldvt;
m = (int) A.GetNumRows();
n = (int) A.GetNumCols();
W.GetNumRows(); // W is used as temp working memory
lda = m;
ldu = m;
ldvt = n;
U.RequireSize(m, m);
SIGMA.RequireSize(std::min(m, n), 1);
VT.RequireSize(n, n);
if (sizeof(ElemType) == sizeof(double))
{
#ifdef USE_MKL
double wkopt;
int lwork = -1;
dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, &wkopt, &lwork, &info);
lwork = (int) wkopt;
W.RequireSize(lwork, 1);
dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, reinterpret_cast<double*>(W.Data()), &lwork, &info);
#else
std::vector<double> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_dgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<double*>(A.Data()), (int) lda, reinterpret_cast<double*>(SIGMA.Data()),
reinterpret_cast<double*>(U.Data()), (int) ldu, reinterpret_cast<double*>(VT.Data()), (int) ldvt, &superb[0]);
#endif
}
else
{
#ifdef USE_MKL
float wkopt;
int lwork = -1;
sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, &wkopt, &lwork, &info);
lwork = (int) wkopt;
W.RequireSize(lwork, 1);
sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, reinterpret_cast<float*>(W.Data()), &lwork, &info);
#else
std::vector<float> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_sgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<float*>(A.Data()), (int) lda, reinterpret_cast<float*>(SIGMA.Data()),
reinterpret_cast<float*>(U.Data()), (int) ldu, reinterpret_cast<float*>(VT.Data()), (int) ldvt, &superb[0]);
#endif
}
if (info > 0)
{
RuntimeError("The algorithm computing SVD failed to converge.\n");
}
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b) + c</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndAdd(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 1.0, c);
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignSoftmaxSum(const CPUMatrix<ElemType>& softmax, CPUMatrix<ElemType>& c)
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = (int) (*this)(0, instance_id);
log_likelihood += softmax(instance_id, sample);
}
c(0, 0) = -log_likelihood;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNCEUnnormalizedEval(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = -(int) (*this)(0, instance_id);
ElemType score = bias(sample, 0);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += b(dim, sample) * a(dim, instance_id);
log_likelihood += score;
}
c(0, 0) = -log_likelihood;
}
//samples+prob gradient hidden embedding embedding/hidden
//a.m_CPUMatrix->AssignNCEDerivative(*tmp.m_CPUMatrix, *a.m_CPUMatrix, *b.m_CPUMatrix, inputIndex, *c.m_CPUMatrix);
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNCEDerivative(const CPUMatrix<ElemType>& tmp, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t inputIndex, CPUMatrix<ElemType>& c)
{
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
if (inputIndex == 1)
{
#pragma omp parallel for
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, instance_id) -= b(dim, sample) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 2)
{
int i_blocks = omp_get_num_threads() * 16;
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
#pragma omp parallel for
for (int ib = 0; ib < i_blocks; ib++)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
if (sample % i_blocks == ib)
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, sample) -= a(dim, instance_id) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 3)
{
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
c(0, sample) -= tmp(sample_id, instance_id);
}
}
else
InvalidArgument("The argument inputIndex must be 1 or 2 or 3.");
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNoiseContrastiveEstimation(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& tmp, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
double log_likelihood = 0.0;
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
size_t num_noise_samples = sample_size - 1;
double log_num_noise_samples = std::log(num_noise_samples);
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
double score = bias(0, sample);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += a(dim, instance_id) * b(dim, sample);
double sample_prob = -(*this)(2 * sample_id + 1, instance_id);
if (sample_id == 0)
sample_prob = -sample_prob;
double score_noise = log_num_noise_samples + sample_prob;
double z = LogAdd(score, score_noise);
double logprob = score - z;
double logprob_noise = score_noise - z;
tmp(sample_id, instance_id) = (ElemType) -std::exp(logprob);
if (sample_id == 0)
tmp(sample_id, instance_id) += 1;
log_likelihood += sample_id == 0 ? logprob : logprob_noise;
}
c(0, 0) = (ElemType) -log_likelihood;
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b)</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 0.0, c);
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b are not transposed): c = a * b</summary>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, false, b, false, 0.0, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a + c</summary>
/// if a is a column vector, add to all columns of c
/// if a is a row vector, add to all rows of c
/// if a is a scalar, add to all rows of c
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::ScaleAndAdd(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty() || c.IsEmpty())
LogicError("ScaleAndAdd: one of the input matrices is empty.");
if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector
{
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
const int incy = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n)
InvalidArgument("Dimension of matrix c does not match dimension of matrix a.");
if (sizeof(ElemType) == sizeof(double))
{
cblas_daxpy(len, alpha, reinterpret_cast<double*>(a.Data()), incx, reinterpret_cast<double*>(c.Data()), incy);
}
else
{
#pragma warning(suppress : 4244)
cblas_saxpy(len, alpha, reinterpret_cast<float*>(a.Data()), incx, reinterpret_cast<float*>(c.Data()), incy);
}
}
else if (a.GetNumElements() == 1) // scalar, add to all elements
{
ElemType v = alpha * a(0, 0);
long m = (long) c.GetNumRows(), n = (long) c.GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
c(i, j) += v;
c(i + 1, j) += v;
c(i + 2, j) += v;
c(i + 3, j) += v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
c(i, j) += v;
}
}
}
else if (a.GetNumCols() == 1) // col vector, add it to all columns
{
int m = (int) c.GetNumRows();
if (m != (int) a.GetNumRows())
InvalidArgument("To add column vector, rows should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
cblas_daxpy(m, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(m, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
}
else // row vector, add it to all rows
{
int m = (int) c.GetNumRows();
int n = (int) c.GetNumCols();
if (n != (int) a.GetNumCols())
InvalidArgument("To add row vector, cols should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
cblas_daxpy(n, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(n, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + i), m);
}
}
}
}
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() &&
a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols()))
{
InvalidArgument("AddScaledDifference: a, b, and c must have same dimension.");
}
if (a.IsEmpty())
LogicError("AddScaledDifference: Input matrix a is empty.");
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] += alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] += alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] += alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
{
InvalidArgument("AssignScaledDifference: a, b must have same dimension.");
}
if (a.IsEmpty())
LogicError("AssignScaledDifference: Input matrix a is empty.");
if (&c != &a && &c != &b)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] = alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] = alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] = alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
// c[ci,cj] += a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AddElementToElement(ElemType beta, const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AddElementToElement: index out of range.");
ElemType us = beta ? beta * c(ci, cj) : 0; // do not multiply if beta is 0, could be a NaN
us += a(ai, aj);
c(ci, cj) = us;
}
////c[ci,cj] += a[ai,aj]
//template<class ElemType>
//void CPUMatrix<ElemType>::AddLogElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
//{
// if (ai >= a.GetNumRows() || aj >=a.GetNumCols() ||
// ci >= c.GetNumRows() || cj >=c.GetNumCols())
// InvalidArgument("AddElementToElement: index out of range.");
//
// ElemType v = a(ai,aj);
// c(ci, cj) += ((v < EPS_IN_LOG) ? LOG_OF_EPS_IN_LOG : log(v));
//}
#if 0 // now done as AddElementToElement (beta=0)
// c[ci,cj] = a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AssignElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AssignElementToElement: index out of range.");
c(ci, cj) = a(ai, aj);
}
#endif
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AddScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AssignScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
c.RequireSize(m, n);
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (alpha == 0)
{
memset(cBufPtr, 0, sizeof(ElemType) * c.GetNumElements());
return;
}
long size = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (size & ~3); i += 4)
{
cBufPtr[i] = alpha * aBufPtr[i];
cBufPtr[i + 1] = alpha * aBufPtr[i + 1];
cBufPtr[i + 2] = alpha * aBufPtr[i + 2];
cBufPtr[i + 3] = alpha * aBufPtr[i + 3];
}
// remaining elements
for (long i = size & ~3; i < size; i++)
{
cBufPtr[i] = alpha * aBufPtr[i];
}
}
/// <summary>Matrix-scalar multiply with col-major matrices: a = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if (alpha == 0 && incx == 1)
{
memset(a.Data(), 0, sizeof(ElemType) * len);
}
else if (sizeof(ElemType) == sizeof(double))
{
cblas_dscal(len, alpha, reinterpret_cast<double*>(a.Data()), incx);
}
else
{
#pragma warning(suppress : 4244)
cblas_sscal(len, alpha, reinterpret_cast<float*>(a.Data()), incx);
}
}
/// <summary>Matrix multiply with col-major matrices: a = alpha[1,1] * a</summary>
/// <param name="alpha">1x1 matrix</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(CPUMatrix<ElemType> alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
if (alpha.GetNumElements() != 1)
LogicError("Matrix alpha must be 1x1");
CPUMatrix<ElemType>::Scale(alpha(0, 0), a);
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProduct(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(1, n);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
}
else
{
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
}
}
// treat matrices as vectors. do vec(a)^T vec(b)
template <class ElemType>
ElemType CPUMatrix<ElemType>::InnerProductOfMatrices(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProductOfMatrices: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension.");
if (sizeof(ElemType) == sizeof(double))
{
return (ElemType) cblas_ddot((int) a.GetNumElements(), reinterpret_cast<double*>(a.Data()), 1, reinterpret_cast<double*>(b.Data()), 1);
}
else
{
#pragma warning(suppress : 4244)
return (ElemType) cblas_sdot((int) a.GetNumElements(), reinterpret_cast<float*>(a.Data()), 1, reinterpret_cast<float*>(b.Data()), 1);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ElementWisePower(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: The input matrix a is empty.");
c.RequireSize(a.GetNumRows(), a.GetNumCols());
if (alpha == 2)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j);
}
}
else if (alpha == 3)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j) * a(i, j);
}
}
else
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = pow(a(i, j), alpha);
}
}
}
template <class ElemType>
bool CPUMatrix<ElemType>::AreEqual(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const ElemType threshold /*= 1e-8*/)
{
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
return false;
bool result = true;
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (abs(a(i, j) - b(i, j)) > threshold)
{
result = false;
break;
}
}
return result;
}
// see Matrix<ElemType>::TensorShuffleScaleAndAdd() for comments
template <class ElemType>
void CPUMatrix<ElemType>::TensorShuffleScaleAndAdd(ElemType keepWeight, const CPUMatrix<ElemType>& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
size_t N = D * S * M * K * T;
const auto pa = a.Data();
const auto pb = b.Data();
auto pc = c.Data();
// Note: This code is written to match a GPU implementation. It is not super-efficient on the CPU.
for (size_t na = 0; na < N; na++) // loop over all elements
{
// recover the 5 indices from the loop counter
size_t d = na % D;
size_t s = (na / D) % S;
size_t m = (na / D / S) % M;
size_t k = (na / D / S / M) % K;
size_t t = (na / D / S / M / K) % T;
// compute index for the a and b/c tensors
assert(na == (((t * K + k) * M + m) * S + s) * D + d); // input tensor of dimension (D x S x M x K x T)
size_t nb = (((t * S + s) * M + m) * K + k) * D + d; // output tensor of dimension (D x K x M x S x T): k/K and s/S swapped
assert(nb < N);
// perform the computation
ElemType cval = keepWeight ? keepWeight * pb[nb] : 0; // if weight is 0 then don't bother to read memory (efficiency) or to multiply (NaN-safe)
cval += scaleFactor * pa[na];
pc[nb] = cval;
}
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Ones(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Zeros(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(0);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Eye(const size_t rows)
{
CPUMatrix<ElemType> c(rows, rows); // will initialize to 0
c.SetDiagonalValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomUniform(const size_t rows, const size_t cols, const ElemType low, const ElemType high, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetUniformRandomValue(low, high, seed);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomGaussian(const size_t rows, const size_t cols, const ElemType mean, const ElemType sigma, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetGaussianRandomValue(mean, sigma, seed);
return c;
}
template <class ElemType>
bool CPUMatrix<ElemType>::HasElement(const CPUMatrix<ElemType>& mat, const ElemType v)
{
bool bHas = false;
bool isvFinite = std::isfinite(v);
#pragma omp parallel for
for (long j = 0; j < mat.GetNumElements(); j++)
{
#pragma omp flush(bHas)
if (!bHas)
{
ElemType cur = mat.Data()[j];
if (isvFinite && std::isfinite(cur))
{
if (cur == v)
bHas = true;
}
else if (std::isnan(v) && std::isnan(cur))
bHas = true;
else if (std::isinf(v) && std::isinf(cur) && std::signbit(v) == std::signbit(cur))
bHas = true;
}
}
return bHas;
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]
// the inputs are two rwo vectors
// the output is a matrix of size(neg+1, col)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(negnumber + 1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
long m = (long) GetNumRows(), n = (long) GetNumCols(); // a and b are of size (1,n)
// #pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, j);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < m; i++)
{
us(i, j) = a(0, j) * b(0, (j + shift + i - 1) % n);
}
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProductWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
InvalidArgument("InnerProduct: Both matrices should be normal ones, not vectors");
// c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(negnumber + 1, n); // this line ischanged
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
else
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
}
else
{
InvalidArgument("InnerProduct: Rowwise is not supported yet");
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = (ElemType) cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GetARowByIndex(const CPUMatrix<ElemType>& a, size_t index)
{
if (a.IsEmpty())
LogicError("GetARowByIndex: the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
if (index < 0 || index >= m)
LogicError("GetARowByIndex: the row index is out of range.");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
auto& us = *this;
RequireSize(1, n);
for (long j = 0; j < n; j++)
{
us(0, j) = a(index, j);
}
return *this;
}
// input: a, a row vector
// input: b, a matrix. b.col == a.col
// input firstmatrixfixed: If true, keep a's order. Otherwise, keep b's order
// output: c, a matrix. c.size == b.size
/*
Example, a = [a1 a2 a3]
b = [b11 b12 b13;
b21 b22 b23 ]
if true:
shift = 1
then c = [a1*b12 a2*b13 a3*b11
a1*b22 a2*b23 a3*b21]
if shift = 2
then c = [ a1*b13 a2*b11 a3*b12
a1*b23 a2*b21 a3*b22]
i.e. we do column-wise shift
if false:
shift = 1
then c = [a2*b11 a3*b12 a1*b13
a2*b21 a3*b22 a1*b23]
shift = 2
then c = [ a3*b11 a1*b12 a2*b13
a3*b21 a1*b22 a2*b23]
*/
template <class ElemType>
void CPUMatrix<ElemType>::ConductRowElementMultiplyWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, size_t shift, bool bFirstmatrixfixed)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != 1 || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
c.RequireSize(k, l); // c must the the same size of b
if (bFirstmatrixfixed)
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, j) * b(i, (j + shift) % l);
}
}
}
else
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, (j + shift) % l) * b(i, j);
}
}
}
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]. We will do element product with shift.
// inputs are 2 row vectors
// output is a row vector
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
// long m = (long)GetNumRows(), n = (long)GetNumCols(); // a and b are of size (1,n)
long n = (long) GetNumCols(); // a and b are of size (1,n)
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, (j + shift) % n);
}
return *this;
}
#pragma endregion Static BLAS Functions
// 'double' version of LogAdd
inline double LogAddD(double x, double y)
{
return LogAdd(x, y);
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::LogSumOfElements() const
{
ElemType fAlpha = (ElemType) LZERO;
ElemType* bufPtr = Data();
for (int k = 0; k < GetNumElements(); k++)
fAlpha = (ElemType) LogAddD(fAlpha, bufPtr[k]);
return fAlpha;
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFBackwardCompute(const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& pair_scores)
{
int iNumPos = (int) lbls.GetNumCols();
int iNumLab = (int) lbls.GetNumRows();
int lastLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, iNumPos - 1) != 0)
{
lastLbl = ik;
break;
}
beta.RequireSize(iNumLab, iNumPos);
for (int t = iNumPos - 1; t >= 0; t--)
{
#pragma omp parallel for
for (int k = 0; k < iNumLab; k++)
{
_rcrfBackwardCompute(t, k, alpha, beta, pair_scores);
}
}
};
// Calculate alpha in forward-backward calculation. equation (6), (7) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// GPU x dimension corresponds to utterances, y dimension corresponds to phone sequence in each utterance
// prob (input): the posterior output from the network
// alpha (output): alpha for forward-backward calculation.
// phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance
// phoneBound (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance
// uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance.
// uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch
// uttBeginFrame(input): the positon of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance.
// uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch
// numChannels (input): channel number in this minibatch
// uttNum (input): number of utterances
// t (input): time stamp to process
// maxPhoneNum (input): the max number of phones between utterances
// totalPhoneNum (input): the total number of phones of all utterances
// blankTokenId (input): id of the CTC blank token
// delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference.
// Alpha and Beta scores outside of the delay boundary are set to zero.
// Setting this parameter smaller will result in shorted delay between label output during decoding.
// delayConstraint=-1 means no constraint
template<class ElemType>
void _assignAlphaScore(
const ElemType *prob,
ElemType *alphaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
size_t numChannels,
const size_t uttNum,
const size_t t,
const size_t maxPhoneNum, // Maximum length of utterance in this MB
const size_t totalPhoneNum, // Total number of phones
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
// Index of the label in the sequence
// Current and previous phone indices in phoneSeq matrix
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
// Actual current phone label
size_t phoneId = (size_t)(phoneSeq[labelid]);
// Index of the current frame in minibatch
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
// Index of probability of observing phoneId at frame timeId
size_t probId = timeId*totalPhoneNum + phoneId;
size_t alphaId = maxPhoneNum* timeId + phoneSeqId; // alpha_t(s)
if (t == 0)
{
// Initialize recursion
if (phoneSeqId == 1 || phoneSeqId == 2)
{
alphaScore[alphaId] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
size_t timeId_1 = timeId - numChannels; // Index corresponding to (t-1)
size_t alphaId_0 = maxPhoneNum* timeId_1 + phoneSeqId; // alpha_{t-1}(s)
size_t alphaId_1 = alphaId_0 - 1; // alpha_{t-1}(s-1)
size_t alphaId_2 = alphaId_0 - 2; // alpha_{t-1}(s-2)
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId > 2)
{
size_t labelid_2 = labelid - 2;
// if current label is not blank and not equal prev non-blank label
if ((size_t)(phoneSeq[labelid]) != blankTokenId && phoneId != (size_t)(phoneSeq[labelid_2]))
{
x = LogAdd(x, alphaScore[alphaId_2]);
}
}
if (phoneSeqId > 1)
{
x = LogAdd(x, alphaScore[alphaId_1]);
}
x = LogAdd(x, alphaScore[alphaId_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId]; // Probability of observing given label at given time
else
ascore = 0;
alphaScore[alphaId] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t labelid_r = labelid + 2;
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_r]);
if (phoneId == blankTokenId)
{
// only constraint right side
if (t > phoneBoundId_r + delayConstraint - 1)
alphaScore[alphaId] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
alphaScore[alphaId] = LZERO;
}
}
}
}
}
}
}
// Calculate beta in forward-backward calculation, equation (10), (11) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignBetaScore(
const ElemType *prob,
ElemType *betaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const size_t numChannels,
const size_t uttNum,
const long t,
const size_t maxPhoneNum,
const size_t totalPhoneNum,
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
size_t labelid_2 = labelid + 2;
size_t phoneId = (LONG64)(phoneSeq[labelid]);
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
size_t probId = timeId*totalPhoneNum + phoneId;
size_t betaid = maxPhoneNum* timeId + phoneSeqId;
size_t timeId_1 = timeId + numChannels;
size_t betaid_0 = maxPhoneNum* timeId_1 + phoneSeqId;
size_t betaid_1 = betaid_0 + 1;
size_t betaid_2 = betaid_0 + 2;
if (t == frameNum - 1)
{
if (phoneSeqId == phoneNum - 3 || phoneSeqId == phoneNum - 2)
{
betaScore[betaid] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId < phoneNum - 3)
{
if (phoneSeq[labelid] != blankTokenId && phoneId != phoneSeq[labelid_2])
{
x = LogAdd(x, betaScore[betaid_2]);
}
}
if (phoneSeqId < phoneNum - 2)
{
x = LogAdd(x, betaScore[betaid_1]);
}
x = LogAdd(x, betaScore[betaid_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId];
else
ascore = 0;
betaScore[betaid] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_2]);
if (phoneId == blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint - 1)
betaScore[betaid] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
betaScore[betaid] = LZERO;
}
}
}
}
}
}
}
// Calculate CTC score. equation (8) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
template<class ElemType>
void _assignTotalScore(ElemType *betaScore,
std::vector<ElemType>& totalScore,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const size_t numChannels,
const size_t maxPhoneNum)
{
#pragma omp parallel for
for (int uttId = 0; uttId < uttNum; uttId++) {
if (uttId < uttNum)
{
LONG64 alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
betaScore[alphaId_0] = LogAdd(betaScore[alphaId_0 + 1], betaScore[alphaId_0 + 2]);
totalScore[uttId] = betaScore[alphaId_0];
}
}
}
// Calculate derivative, equation (15) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignCTCScore(
ElemType *CTCscore,
ElemType *prob,
ElemType *alphaScore,
ElemType *betaScore,
ElemType *phoneSeq,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const std::vector<size_t>& uttFrameNum,
const size_t numChannels,
const size_t maxPhoneNum,
const size_t totalPhoneNum)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
#pragma omp parallel for
for (int t = 0; t < uttFrameNum[uttId]; t++) {
size_t phoneNum = uttPhoneNum[uttId];
size_t alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
ElemType P_lx = betaScore[alphaId_0];
for (int s = 1; s < phoneNum - 1; s++)
{
long phoneId = phoneSeq[uttId*maxPhoneNum + s];
size_t alphaId = maxPhoneNum* timeId + s;
size_t probId = timeId*totalPhoneNum + phoneId;
if (phoneId != SIZE_MAX)
{
ElemType logoccu = alphaScore[alphaId] + betaScore[alphaId] - prob[probId] - (ElemType)P_lx;
CTCscore[probId] = LogAdd(CTCscore[probId], logoccu);
}
}
for (int s = 0; s < totalPhoneNum; s++)
{
size_t probId = timeId*totalPhoneNum + s;
ElemType logoccu = CTCscore[probId];
if (logoccu < LZERO)
CTCscore[probId] = 0.0f;
else
CTCscore[probId] = exp(logoccu);
}
}
}
}
template<class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCTCScore(
const CPUMatrix<ElemType>& prob, CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& phoneSeq, const CPUMatrix<ElemType>& phoneBoundary, CPUMatrix<ElemType> & totalScore, const std::vector<size_t>& uttToChanInd, const std::vector<size_t> & uttBeginFrame, const std::vector<size_t> & uttFrameNum,
const std::vector<size_t> & uttPhoneNum, const size_t numParallelSequences, const size_t maxFrameNum, const size_t blankTokenId, const int delayConstraint, const bool isColWise)
{
// Column wise representation of sequences in input matrices (each column is one sequence/utterance)
if (isColWise)
{
// Total number of phones
size_t totalPhoneNum = prob.GetNumRows();
size_t uttNum = uttFrameNum.size();
// Max number of phones in utterances in this minibatch
size_t maxPhoneNum = phoneSeq.GetNumRows();
for (size_t t = 0; t < maxFrameNum; t++)
{
_assignAlphaScore(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
for (LONG64 t = maxFrameNum - 1; t >= 0; t--)
{
_assignBetaScore(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
std::vector<ElemType> scores(uttNum);
_assignTotalScore(beta.Data(), scores, uttNum, uttToChanInd, uttBeginFrame, numParallelSequences, maxPhoneNum);
_assignCTCScore(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, uttToChanInd,
uttBeginFrame, uttPhoneNum, uttFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum);
totalScore(0, 0) = 0.0;
for (size_t utt = 0; utt < uttNum; utt++)
{
totalScore(0,0) -= scores[utt];
}
return *this;
}
else {
LogicError("Only ColWise minibatch layout is supported.");
}
return *this;
}
/// the kernel function for RCRF backward computation
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfBackwardCompute(size_t t, size_t k, const CPUMatrix<ElemType>& alpha,
CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores)
{
size_t iNumLab = alpha.GetNumRows();
size_t iNumPos = alpha.GetNumCols();
ElemType fSum;
ElemType fTmp = (ElemType) LZERO;
if (t == iNumPos - 1)
{
fSum = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LogAddD(fSum, alpha(j, t));
}
fTmp = alpha(k, t) - fSum;
beta(k, t) = fTmp;
}
else
{
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LZERO;
for (int m = 0; m < iNumLab; m++)
{
fSum = (ElemType) LogAddD(fSum, alpha(m, t) + pair_scores(j, m));
}
fTmp = (ElemType) LogAddD(fTmp, beta(j, t + 1) + alpha(k, t) + pair_scores(j, k) - fSum);
}
beta(k, t) = fTmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFTransGrdCompute(const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd)
{
int iNumPos = (int) alpha.GetNumCols();
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
for (size_t tPos = 0; tPos < iNumPos; tPos++)
{
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
#pragma omp parallel for
for (int i = 0; i < iNumLab; i++)
{
_rcrfTransGrdCompute(i, lbls, alpha, beta, pair_scores, grd, tPos);
}
// transition score
int i = -1;
if (tPos == 0)
i = firstLbl;
else
{
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, tPos - 1) != 0)
{
i = ik;
break;
}
}
int j = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
{
if (lbls(ik, tPos) != 0)
{
j = ik;
break;
}
}
grd(j, i) -= 1.0;
}
};
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfTransGrdCompute(size_t i,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd,
const size_t tPos // position
)
{
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
{
ElemType fTmp = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
if (tPos == 0)
{
if (i == firstLbl)
{
fTmp = 0;
}
else
{
fTmp = (ElemType) LZERO;
}
}
else
{
fTmp = a(i, 0);
}
fTmp += pair_scores(j, i);
ElemType fSum = (ElemType) LZERO;
for (int k = 0; k < iNumLab; k++)
{
ElemType fTmp2;
if (tPos == 0)
{
if (k == firstLbl)
{
fTmp2 = 0;
}
else
{
fTmp2 = (ElemType) LZERO;
}
}
else
{
fTmp2 = a(k, 0);
}
fSum = (ElemType) LogAddD(fSum, fTmp2 + pair_scores(j, k));
}
fTmp -= fSum;
fTmp += b(j, 0);
grd(j, i) += exp(fTmp);
}
}
};
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DropFrame(const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& gamma, const ElemType& threshhold)
{
auto& us = *this;
if (us.GetNumCols() != gamma.GetNumCols() || us.GetNumRows() != gamma.GetNumRows())
LogicError("DropFrame: target matrix is not in the same size as gamm matrix.");
#pragma omp parallel for
foreach_column (j, label)
{
bool dropframe = false;
foreach_row (i, label)
{
if (fabs(label(i, j) - 1.0f) < 0.1)
{
if (gamma(i, j) < threshhold)
dropframe = true;
break;
}
}
foreach_row (i, label)
{
us(i, j) = 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSequenceError(const ElemType hsmoothingWeight, const CPUMatrix<ElemType>& label,
const CPUMatrix<ElemType>& dnnoutput, const CPUMatrix<ElemType>& gamma, ElemType alpha)
{
auto& us = *this;
foreach_coord (i, j, us)
us(i, j) += alpha * (label(i, j) - (1 - hsmoothingWeight) * dnnoutput(i, j) - hsmoothingWeight * gamma(i, j));
return *this;
}
// note: this function does not depend on the <ElemType> parameter
template <class ElemType>
int CPUMatrix<ElemType>::SetNumThreads(int numThreads)
{
if (numThreads == 0) // use default
return numThreads;
int mthreads = (int) std::thread::hardware_concurrency();
if (numThreads <= 0)
numThreads = std::max(1, mthreads + numThreads);
if (numThreads > mthreads)
numThreads = mthreads;
#ifdef _OPENMP
omp_set_num_threads(numThreads);
numThreads = omp_get_max_threads();
#ifdef USE_MKL
mkl_set_num_threads(numThreads);
#elif defined(USE_OPENBLAS)
openblas_set_num_threads(numThreads);
#endif
#endif
return numThreads;
}
template <class ElemType>
int CPUMatrix<ElemType>::GetMaxNumThreads()
{
int numThreads = (int)std::thread::hardware_concurrency();
#ifdef _OPENMP
numThreads = omp_get_max_threads();
#endif
return numThreads;
}
// To ensure Intel MKL calls return the same results on all Intel or Intel compatible CPUs,
// the function set CBWR compatible mode.
template <class ElemType>
void CPUMatrix<ElemType>::SetCompatibleMode()
{
#ifdef USE_MKL
if (mkl_cbwr_set(MKL_CBWR_COMPATIBLE) != MKL_CBWR_SUCCESS)
RuntimeError("Could not set MKL compatible mode.");
#endif
}
// =======================================================================
// TensorView support
// =======================================================================
// To save time, this makes extensive use of templates and macros.
// -----------------------------------------------------------------------
// function to compute the value for a given output location (perform reduction if needed)
// -----------------------------------------------------------------------
// perform loop over reduction index m
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m>
struct TensorOpReduction
{
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t) m];
double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
// need to descend into one loop deeper
aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides));
}
// Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation.
return static_cast<double>(aggregate);
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&)
{
return opfn(pointers); // finally we are doing some work!!!
}
};
// perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices.
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, size_t N, int m>
struct TensorArgOpReduction
{
static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp)
{
size_t counter = 0;
size_t index = 0;
ElemType val = (ElemType)0;
switch (reducingOpDims.size())
{
case 3:
val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 2:
val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 1:
val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 0:
val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size());
}
return make_pair(val, index);
}
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t)m];
ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
bool update = false;
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
update = (aggregate > val);
break;
case ElementWiseOperator::opArgmax:
update = (aggregate < val);
break;
}
if (update)
{
aggregate = val;
index = counter - 1;
}
}
return aggregate;
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, size_t N>
struct TensorArgOpReduction<ElemType, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
counter++;
return *pointers[0]; // finally we are doing some work!!!
}
};
// -----------------------------------------------------------------------
// perform loop over regular index k for N-nary operations (N counting the output)
// -----------------------------------------------------------------------
// perform loop over regular index k and reducing index m for N operands (counting the output)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k>
struct TensorOpIteration
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t) k];
for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
// Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE.
// This is a very common case, e.g. adding vectors or computing the Sigmoid.
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
ElemType* pc = pointers[2];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default).
// TODO: The signedness of k (required for omp) causes an extra sign-extend.
// TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it?
}
};
// and unary
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
};
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1>
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// we are at element level for the result: perform the op (there may still be reduction)
ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
// scale
val *= alpha;
// combine with previous value in target matrix, then write it out
auto* pout = pointers.back();
if (beta != 0)
val += beta * *pout;
// save
*pout = val;
return;
}
};
// perform loop over regular index k and reducing index m for N operands (counting the output), the difference
// between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of
// the result. The reason that they aren't combined is because of performance.
template <class ElemType, size_t N, int k>
struct TensorArgOpIteration
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t)k];
for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
template <class ElemType, size_t N>
struct TensorArgOpIteration<ElemType, N, -1>
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// we are at element level for the result: perform the op (there may still be reduction)
auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp);
auto* pout = pointers.back();
*pout = (ElemType)val.second;
return;
}
};
// -----------------------------------------------------------------------
// map runtime parameters N to template parameters
// -----------------------------------------------------------------------
// tensor operation with k+1 dimensions (-1 means scalar)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k>
static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
size_t dims = reducingOpDims.size();
switch (dims)
{
case 2:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
{
// if all leading dimensions are 1, we can let the compiler do some unrolling
bool leadingAllOne = true;
for (size_t i = 0; i < N; i++)
leadingAllOne &= k >= 0 && regularStrides[i][0] == 1;
if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
default:
LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different k.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
pointers[i] += offsets[i];
size_t dims = regularOpDims.size();
switch (dims)
{
case 4:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 3:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 2:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different reductionOps
template <class ElemType, typename OPFN, size_t N>
static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we
// used double for aggregator of sum. But:
// * for min and max reductions this is meaningless.
// * It is not consitent with what we do on GPU, there we aggregate on ElemType.
// * It costs performance.
// TODO: apdapt e2e tests to run with aggregator of type ElemType.
#define CaseTensorOpWithFnAndReduction(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \
{ \
return Op##oper(a, b); \
}, \
offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
switch (reductionOp)
{
CaseTensorOpWithFnAndReduction(Sum);
CaseTensorOpWithFnAndReduction(LogSum);
CaseTensorOpWithFnAndReduction(Min);
CaseTensorOpWithFnAndReduction(Max);
CaseTensorOpWithFnAndReduction(ElementwiseProduct);
default:
LogicError("Specified ElementWiseOperator op %d not suported as reduction operation.", (int)reductionOp);
}
}
// -----------------------------------------------------------------------
// entry points from Matrix.cpp; also map op to a lambda
// -----------------------------------------------------------------------
// perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum &&
reductionOp != ElementWiseOperator::opLogSum &&
reductionOp != ElementWiseOperator::opMin &&
reductionOp != ElementWiseOperator::opMax &&
reductionOp != ElementWiseOperator::opElementwiseProduct)
InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented.");
// TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize.
#define CaseUnaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \
{ \
return Op##oper((*(pp[0]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 2> pointers = {a.Data(), Data()};
switch (op)
{
ForAllUnaryOps(CaseUnaryTensorOp);
default:
LogicError("TensorOp: Unknown unary op code %d.", (int) op);
}
}
// perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 3>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum.");
#define CaseBinaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 3> pointers = {a.Data(), b.Data(), Data()};
switch (op)
{
ForAllBinaryOps(CaseBinaryTensorOp);
default:
LogicError("TensorOp: Unknown op binary code %d.", (int) op);
}
}
// perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 4>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum.");
#define CaseTernaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), Data()};
switch (op)
{
ForAllTernaryOps(CaseTernaryTensorOp);
default:
LogicError("TensorOp: Unknown ternary op code %d.", (int) op);
}
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmin() const
{
int minArg = -1;
ElemType minValue = std::numeric_limits<ElemType>::max();
#pragma omp parallel
{
int localMinArg = -1;
ElemType localMinValue = std::numeric_limits<ElemType>::max();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMinValue > Data()[index])
{
localMinArg = index;
localMinValue = Data()[index];
}
// If we have more then one min value, select the one with lower index.
else if ((localMinValue == Data()[index]) && (localMinArg > index))
{
localMinArg = index;
}
}
#pragma omp critical
{
if (minValue > localMinValue)
{
minArg = localMinArg;
minValue = localMinValue;
}
// If we have more then one min value, select the one with lower index.
else if ((minValue == localMinValue) && (minArg > localMinArg))
{
minArg = localMinArg;
}
}
}
return minArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmax() const
{
int maxArg = -1;
ElemType maxValue = std::numeric_limits<ElemType>::min();
#pragma omp parallel
{
int localMaxArg = -1;
ElemType localMaxValue = std::numeric_limits<ElemType>::min();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMaxValue < Data()[index])
{
localMaxArg = index;
localMaxValue = Data()[index];
}
// If we have more then one max value, select the one with lower index.
else if ((localMaxValue == Data()[index]) && (localMaxArg > index))
{
localMaxArg = index;
}
}
#pragma omp critical
{
if (maxValue < localMaxValue)
{
maxArg = localMaxArg;
maxValue = localMaxValue;
}
// If we have more then one max value, select the one with lower index.
else if ((maxValue == localMaxValue) && (maxArg > localMaxArg))
{
maxArg = localMaxArg;
}
}
}
return maxArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::ArgOp(ElementWiseOperator reductionOp) const
{
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
return Argmin();
break;
case ElementWiseOperator::opArgmax:
return Argmax();
break;
}
InvalidArgument("ArgOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
return -1;
}
template <class ElemType>
void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opArgmin &&
reductionOp != ElementWiseOperator::opArgmax)
InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
if (GetNumElements() == 1)
{
Data()[0] = (ElemType) a.ArgOp(reductionOp);
}
else
{
const size_t N = 2;
array<ElemType*, N> pointers = { a.Data(), Data() };
for (size_t i = 0; i < N; i++)
pointers[i] += offsets[i];
switch (regularOpDims.size())
{
case 2:
TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 1:
TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 0:
TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size());
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ScatterValues(ElemType* indices, ElemType* value, ElemType* data, ElemType alpha, size_t num_indices, size_t rows, size_t cols, size_t indices_step)
{
if (!indices || !value || !data)
LogicError("ScatterValues: input data is null.");
#pragma omp parallel
{
int ithread = omp_get_thread_num();
int nthread = omp_get_num_threads();
for (auto i = 0; i < num_indices; i++)
{
auto col_r = indices[i * indices_step];
if (std::isnan(col_r) || col_r < 0)
continue;
auto col = (size_t)col_r;
//ignore the elements that is not partitioned into this thread
if (col % nthread != ithread)
continue;
if (col >= cols)
InvalidArgument("ScatterValues: Indices map out of bounds. %ld >= %ld", (long int)col, (long int)cols);
auto index = col * rows;
auto offset = i * rows;
for (auto j = 0; j < rows; j++)
data[index + j] = data[index + j] + alpha * value[offset + j];
}
}
}
// We use Matrix<char> as the backing store for QuantizedMatrix
// Let's explicitly instantiate the methods we need for that purpose
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols, char* pArray, const size_t matrixFlags);
template CPUMatrix<char>::CPUMatrix();
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char> const&);
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char>&&);
template size_t CPUMatrix<char>::LocateElement(size_t, size_t) const;
template CPUMatrix<char> CPUMatrix<char>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<char>& CPUMatrix<char>::operator=(CPUMatrix<char>&&);
template void CPUMatrix<char>::SetValue(const char);
template void CPUMatrix<char>::SetValue(const size_t numRows, const size_t numCols, char* pArray, size_t matrixFlags);
template void CPUMatrix<char>::SetValue(CPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(CPUSparseMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUSparseMatrix<char> const&);
template void CPUMatrix<char>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<char>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template char* CPUMatrix<char>::CopyToArray(void) const;
template void CPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const;
template void CPUMatrix<char>::Reshape(const size_t, const size_t);
// Support <short>
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols, short* pArray, const size_t matrixFlags);
template CPUMatrix<short>::CPUMatrix();
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short> const&);
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short>&&);
template size_t CPUMatrix<short>::LocateElement(size_t, size_t) const;
template CPUMatrix<short> CPUMatrix<short>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<short>& CPUMatrix<short>::operator=(CPUMatrix<short>&&);
template void CPUMatrix<short>::SetValue(const short);
template void CPUMatrix<short>::SetValue(const size_t numRows, const size_t numCols, short* pArray, size_t matrixFlags);
template void CPUMatrix<short>::SetValue(CPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(CPUSparseMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUSparseMatrix<short> const&);
template void CPUMatrix<short>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<short>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template short* CPUMatrix<short>::CopyToArray(void) const;
template void CPUMatrix<short>::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const;
template void CPUMatrix<short>::Reshape(const size_t, const size_t);
template CPUMatrix<int>::CPUMatrix(const size_t, const size_t, int*, const size_t);
}}}
|
3d7pt_var.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-12,16)),t1);t3<=min(min(min(floord(Nt+Ny-4,16),floord(16*t1+Ny+29,16)),floord(32*t2+Ny+28,16)),floord(32*t1-32*t2+Nz+Ny+27,16));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(32*t2-Nz-124,128)),ceild(16*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(16*t1+Nx+29,128)),floord(32*t2+Nx+28,128)),floord(16*t3+Nx+12,128)),floord(32*t1-32*t2+Nz+Nx+27,128));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),16*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),16*t3+14),128*t4+126),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
blas.c
|
#include "blas.h"
#include "utils.h"
#include <math.h>
#include <assert.h>
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void reorg_cpu(float *x, int out_w, int out_h, int out_c, int batch, int stride, int forward, float *out)
{
int b,i,j,k;
int in_c = out_c/(stride*stride);
//printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward);
//printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride);
for(b = 0; b < batch; ++b){
for(k = 0; k < out_c; ++k){
for(j = 0; j < out_h; ++j){
for(i = 0; i < out_w; ++i){
int in_index = i + out_w*(j + out_h*(k + out_c*b));
int c2 = k % in_c;
int offset = k / in_c;
int w2 = i*stride + offset % stride;
int h2 = j*stride + offset / stride;
int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b));
if(forward) out[out_index] = x[in_index]; // used by default for forward (i.e. forward = 0)
else out[in_index] = x[out_index];
}
}
}
}
}
void flatten(float *x, int size, int layers, int batch, int forward)
{
float* swap = (float*)xcalloc(size * layers * batch, sizeof(float));
int i,c,b;
for(b = 0; b < batch; ++b){
for(c = 0; c < layers; ++c){
for(i = 0; i < size; ++i){
int i1 = b*layers*size + c*size + i;
int i2 = b*layers*size + i*layers + c;
if (forward) swap[i2] = x[i1];
else swap[i1] = x[i2];
}
}
}
memcpy(x, swap, size*layers*batch*sizeof(float));
free(swap);
}
void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c)
{
int i;
for(i = 0; i < n; ++i){
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc)
{
int i;
for(i = 0; i < n; ++i){
if(da) da[i] += dc[i] * s[i];
if(db) db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * (a[i] - b[i]);
}
}
static float relu(float src) {
if (src > 0) return src;
return 0;
}
void shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_output, float *out, float *in, float *weights, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
{
// nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
int id;
#pragma omp parallel for
for (id = 0; id < size; ++id) {
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float sum = 1, max_val = -FLT_MAX;
int i;
if (weights && weights_normalization) {
if (weights_normalization == SOFTMAX_NORMALIZATION) {
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
float w = weights[weights_index];
if (max_val < w) max_val = w;
}
}
const float eps = 0.0001;
sum = eps;
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) sum += relu(w);
else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
}
}
if (weights) {
float w = weights[src_i / step];
if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
out[id] = in[id] * w; // [0 or c or (c, h ,w)]
}
else out[id] = in[id];
// layers
for (i = 0; i < n; ++i) {
int add_outputs = outputs_of_layers[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
int out_index = id;
float *add = layers_output[i];
if (weights) {
const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
out[out_index] += add[add_index] * w; // [0 or c or (c, h ,w)]
}
else out[out_index] += add[add_index];
}
}
}
}
void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers,
float **layers_delta, float *delta_out, float *delta_in, float *weights, float *weight_updates, int nweights, float *in, float **layers_output, WEIGHTS_NORMALIZATION_T weights_normalization)
{
// nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
int id;
#pragma omp parallel for
for (id = 0; id < size; ++id) {
int src_id = id;
int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float grad = 1, sum = 1, max_val = -FLT_MAX;;
int i;
if (weights && weights_normalization) {
if (weights_normalization == SOFTMAX_NORMALIZATION) {
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
float w = weights[weights_index];
if (max_val < w) max_val = w;
}
}
const float eps = 0.0001;
sum = eps;
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) sum += relu(w);
else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
}
/*
grad = 0;
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float delta_w = delta_in[id] * in[id];
const float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) grad += delta_w * relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) grad += delta_w * expf(w - max_val) / sum;
}
*/
}
if (weights) {
float w = weights[src_i / step];
if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)]
weight_updates[src_i / step] += delta_in[id] * in[id] * grad;
}
else delta_out[id] += delta_in[id];
// layers
for (i = 0; i < n; ++i) {
int add_outputs = outputs_of_layers[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
int out_index = id;
float *layer_delta = layers_delta[i];
if (weights) {
float *add = layers_output[i];
const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
layer_delta[add_index] += delta_in[id] * w; // [0 or c or (c, h ,w)]
weight_updates[weights_index] += delta_in[id] * add[add_index] * grad;
}
else layer_delta[add_index] += delta_in[id];
}
}
}
}
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int i,j,k,b;
for(b = 0; b < batch; ++b){
for(k = 0; k < minc; ++k){
for(j = 0; j < minh; ++j){
for(i = 0; i < minw; ++i){
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
}
}
}
}
void mean_cpu(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1./(batch * spatial);
int i,j,k;
for(i = 0; i < filters; ++i){
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
}
void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1./(batch * spatial - 1);
int i,j,k;
for(i = 0; i < filters; ++i){
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += pow((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
}
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int b, f, i;
for(b = 0; b < batch; ++b){
for(f = 0; f < filters; ++f){
for(i = 0; i < spatial; ++i){
int index = b*filters*spatial + f*spatial + i;
x[index] = (x[index] - mean[f])/(sqrt(variance[f] + .000001f));
}
}
}
}
void const_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
void mul_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX];
}
void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX];
}
void scal_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA;
}
void scal_add_cpu(int N, float ALPHA, float BETA, float *X, int INCX)
{
int i;
for (i = 0; i < N; ++i) X[i*INCX] = X[i*INCX] * ALPHA + BETA;
}
void fill_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
if (INCX == 1 && ALPHA == 0) {
memset(X, 0, N * sizeof(float));
}
else {
for (i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
}
void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i, j;
int index = 0;
for(j = 0; j < B; ++j) {
for(i = 0; i < NX; ++i){
if(X) X[j*NX + i] += OUT[index];
++index;
}
for(i = 0; i < NY; ++i){
if(Y) Y[j*NY + i] += OUT[index];
++index;
}
}
}
void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i, j;
int index = 0;
for(j = 0; j < B; ++j) {
for(i = 0; i < NX; ++i){
OUT[index++] = X[j*NX + i];
}
for(i = 0; i < NY; ++i){
OUT[index++] = Y[j*NY + i];
}
}
}
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
void mult_add_into_cpu(int N, float *X, float *Y, float *Z)
{
int i;
for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i];
}
void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float diff = truth[i] - pred[i];
float abs_val = fabs(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
}
else {
error[i] = 2*abs_val - 1;
delta[i] = (diff > 0) ? 1 : -1;
}
}
}
void l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float diff = truth[i] - pred[i];
error[i] = fabs(diff);
delta[i] = diff > 0 ? 1 : -1;
}
}
void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t-p;
}
}
void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float t = truth[i];
float p = pred[i];
error[i] = -t*log(p) - (1-t)*log(1-p);
delta[i] = t-p;
}
}
void l2_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float diff = truth[i] - pred[i];
error[i] = diff * diff;
delta[i] = diff;
}
}
float dot_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
float dot = 0;
for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY];
return dot;
}
void softmax(float *input, int n, float temp, float *output, int stride)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for(i = 0; i < n; ++i){
if(input[i*stride] > largest) largest = input[i*stride];
}
for(i = 0; i < n; ++i){
float e = exp(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int g, b;
for(b = 0; b < batch; ++b){
for(g = 0; g < groups; ++g){
softmax(input + b*batch_offset + g*group_offset, n, temp, output + b*batch_offset + g*group_offset, stride);
}
}
}
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
int i, j, k, b;
for (b = 0; b < batch; ++b) {
for (k = 0; k < c; ++k) {
for (j = 0; j < h*stride; ++j) {
for (i = 0; i < w*stride; ++i) {
int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride;
int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
if (forward) out[out_index] = scale*in[in_index];
else in[in_index] += scale*out[out_index];
}
}
}
}
}
void constrain_cpu(int size, float ALPHA, float *X)
{
int i;
for (i = 0; i < size; ++i) {
X[i] = fminf(ALPHA, fmaxf(-ALPHA, X[i]));
}
}
void fix_nan_and_inf_cpu(float *input, size_t size)
{
int i;
for (i = 0; i < size; ++i) {
float val = input[i];
if (isnan(val) || isinf(val))
input[i] = 1.0f / i; // pseudo random value
}
}
// Euclidean_norm
float math_vector_length(float *A, unsigned int feature_size)
{
float sum = 0;
int i;
for (i = 0; i < feature_size; ++i)
{
sum += A[i] * A[i];
}
float vector_length = sqrtf(sum);
return vector_length;
}
float cosine_similarity(float *A, float *B, unsigned int feature_size)
{
float mul = 0.0, d_a = 0.0, d_b = 0.0;
int i;
for(i = 0; i < feature_size; ++i)
{
mul += A[i] * B[i];
d_a += A[i] * A[i];
d_b += B[i] * B[i];
}
float similarity;
float divider = sqrtf(d_a) * sqrtf(d_b);
if (divider > 0) similarity = mul / divider;
else similarity = 0;
return similarity;
}
// num_of_samples = 2 * loaded_images = mini_batch_size
float P_constrastive(int i, int l, int *labels, int num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim)
{
if (i == l) {
fprintf(stderr, " Error: in P_constrastive must be i != l, while i = %d, l = %d \n", i, l);
getchar();
}
const float sim = cos_sim[i*num_of_samples + l]; // cosine_similarity(z[i], z[l], feature_size);
const float numerator = expf(sim / temperature);
float denominator = 0;
int k;
for (k = 0; k < num_of_samples; ++k) {
//if (k != i && labels[k] != labels[i]) {
if (k != i) {
const float sim_den = cos_sim[k*num_of_samples + l]; // cosine_similarity(z[k], z[l], feature_size);
denominator += expf(sim_den / temperature);
}
}
float result = numerator / denominator;
return result;
}
// i - id of the current sample in mini_batch
// labels[num_of_samples] - array with class_id for each sample in the current mini_batch
// z[feature_size][num_of_samples] - array of arrays with contrastive features (output of conv-layer, f.e. 128 floats for each sample)
// delta[feature_size] - array with deltas for backpropagation
// temperature - scalar temperature param (temperature > 0), f.e. temperature = 0.07: Supervised Contrastive Learning
void grad_contrastive_loss_positive(int i, int *labels, int num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim, float *p_constrastive, float *delta)
{
const float vec_len = math_vector_length(z[i], feature_size);
int j;
int N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j]) N++;
}
if (N == 0 || temperature == 0 || vec_len == 0) {
fprintf(stderr, " Error: N == 0 || temperature == 0 || vec_len == 0. N=%f, temperature=%f, vec_len=%f \n", N, temperature, vec_len);
getchar();
}
const float mult = 1 / ((N - 1) * temperature * vec_len);
for (j = 0; j < num_of_samples; ++j) {
//if (i != j && (i/2) == (j/2)) {
if (i != j && labels[i] == labels[j]) {
const float sim = cos_sim[i*num_of_samples + j]; // cosine_similarity(z[i], z[j], feature_size);
const float P = p_constrastive[i*num_of_samples + j]; // P_constrastive(i, j, labels, num_of_samples, z, feature_size, temperature, cos_sim);
//const float custom_pos_mult = 1 - sim;
int m;
for (m = 0; m < feature_size; ++m) {
const float d = mult*(sim * z[i][m] - z[j][m]) * (1 - P); // good
//const float d = mult*(sim * z[j][m] - z[j][m]) * (1 - P); // bad
// printf(" pos: z[j][m] = %f, z[i][m] = %f, d = %f, sim = %f \n", z[j][m], z[i][m], d, sim);
delta[m] -= d;
}
}
}
}
// i - id of the current sample in mini_batch
// labels[num_of_samples] - array with class_id for each sample in the current mini_batch
// z[feature_size][num_of_samples] - array of arrays with contrastive features (output of conv-layer, f.e. 128 floats for each sample)
// delta[feature_size] - array with deltas for backpropagation
// temperature - scalar temperature param (temperature > 0), f.e. temperature = 0.07: Supervised Contrastive Learning
void grad_contrastive_loss_negative(int i, int *labels, int num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim, float *p_constrastive, float *delta)
{
const float vec_len = math_vector_length(z[i], feature_size);
int j;
int N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j]) N++;
}
if (N == 0 || temperature == 0 || vec_len == 0) {
fprintf(stderr, " Error: N == 0 || temperature == 0 || vec_len == 0. N=%f, temperature=%f, vec_len=%f \n", N, temperature, vec_len);
getchar();
}
const float mult = 1 / ((N - 1) * temperature * vec_len);
for (j = 0; j < num_of_samples; ++j) {
//if (i != j && (i/2) == (j/2)) {
if (i != j && labels[i] == labels[j]) {
int k;
for (k = 0; k < num_of_samples; ++k) {
//if (k != i && k != j && labels[k] != labels[i]) {
if (k != i && k != j) {
const float sim = cos_sim[i*num_of_samples + k]; // cosine_similarity(z[i], z[k], feature_size);
const float P = p_constrastive[i*num_of_samples + k]; // P_constrastive(i, k, labels, num_of_samples, z, feature_size, temperature, cos_sim);
//const float custom_pos_mult = 1 + sim;
int m;
for (m = 0; m < feature_size; ++m) {
const float d = mult*(z[k][m] - sim * z[i][m]) * P; // good
//const float d = mult*(z[k][m] - sim * z[k][m]) * P; // bad
//printf(" neg: z[k][m] = %f, z[i][m] = %f, d = %f, sim = %f \n", z[k][m], z[i][m], d, sim);
delta[m] -= d;
}
}
}
}
}
}
|
GB_unaryop__identity_uint16_uint8.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_uint8
// op(A') function: GB_tran__identity_uint16_uint8
// C type: uint16_t
// A type: uint8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_uint8
(
uint16_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "aux.h"
#include "omp.h"
#define MAX_THREADS 8
#define MAXIT 1000000
double sequential_minimization(double s, int p, double x0, double y0);
double parallel_minimization(double s, int p, double x0, double y0);
int main(int argc, char **argv){
long t_start, t_end;
double s, x0, y0, z;
int i, p;
// Command line argument: array length
if ( argc == 3 ) {
p = atoi(argv[1]); /* the number of points to be evaluated */
s = atof(argv[2]); /* the step length */
} else {
printf("Usage:\n\n ./main p s\n\nwhere p is the number of points around the current minimum where the function has to be evaluated\nand s is the step size.\n");
return 1;
}
/* No need to change this seetings unless for debugging */
x0 = 10; y0 = 10;
t_start = usecs();
z = sequential_minimization(s, p, x0, y0);
t_end = usecs();
printf("Sequential time : %8.2f msec.\n",((double)t_end-t_start)/1000.0);
printf("\n\n");
t_start = usecs();
z = parallel_minimization(s, p, x0, y0);
t_end = usecs();
printf("Parallel time : %8.2f msec.\n",((double)t_end-t_start)/1000.0);
return 0;
}
double sequential_minimization(double s, int p, double x0, double y0){
int cnt, i;
double z, x, y, nx, ny, nz;
double xyz[MAX_THREADS][3];
xyz[0][0] = x0; xyz[0][1]=y0; xyz[0][2] = evaluate(xyz[0][0], xyz[0][1]);
for(cnt=0;cnt<MAXIT;cnt++){
x = xyz[0][0];
y = xyz[0][1];
z = xyz[0][2];
/* Evaluate function on the 8 points around the current minimum */
/* The current minimum is included again in the evaluation for
simplicipy; this makes a total of 9 evaluations */
for(i=0; i<p; i++){
nx = x+ s*cos(2.0*M_PI*i/((double)p));
ny = y+ s*sin(2.0*M_PI*i/((double)p));
nz = evaluate(nx,ny);
/* printf("%f %f %f\n",nx,ny,nz); */
/* If the evaluation at this point is lower than the current
minimum, set this point as the new minimum */
if(nz<xyz[0][2]){
xyz[0][2] = nz;
xyz[0][0] = nx;
xyz[0][1] = ny;
}
}
/* Uncomment the line below if you want to debug */
/* printf("%4d -- %5.2f %5.2f %10.4f\n",cnt,xyz[0][0], xyz[0][1], xyz[0][2]); */
/* If no improvement over the old minimum, terminate */
if(xyz[0][2]>=z) break;
}
printf("Minimum found is %.10f at x=%.4f, y=%.4f in %d steps\n",xyz[0][2],xyz[0][0],xyz[0][1],cnt);
return xyz[0][2];
}
double parallel_minimization(double s, int p, double x0, double y0){
int cnt, i;
double z, x, y, nx, ny, nz;
double xyz[MAX_THREADS][3];
xyz[0][0] = x0; xyz[0][1]=y0; xyz[0][2] = evaluate(xyz[0][0], xyz[0][1]);
{
for(cnt=0;cnt<MAXIT;cnt++){
x = xyz[0][0];
y = xyz[0][1];
z = xyz[0][2];
/* Evaluate function on the 8 points around the current minimum */
/* The current minimum is included again in the evaluation for
simplicipy; this makes a total of 9 evaluations */
#pragma omp parallel for private(nx, ny, nz)
for(i=0; i<p; i++){
{
nx = x+ s*cos(2.0*M_PI*i/((double)p));
ny = y+ s*sin(2.0*M_PI*i/((double)p));
nz = evaluate(nx,ny);
/* printf("%f %f %f\n",nx,ny,nz); */
/* If the evaluation at this point is lower than the current
minimum, set this point as the new minimum */
if(nz<xyz[0][2]){
xyz[0][2] = nz;
xyz[0][0] = nx;
xyz[0][1] = ny;
}
}
}
/* Uncomment the line below if you want to debug */
/* printf("%4d -- %5.2f %5.2f %10.4f\n",cnt,xyz[0][0], xyz[0][1], xyz[0][2]); */
/* If no improvement over the old minimum, terminate */
if(xyz[0][2]>=z) break;
}
}
printf("Minimum found is %.10f at x=%.4f, y=%.4f in %d steps\n",xyz[0][2],xyz[0][0],xyz[0][1],cnt);
return xyz[0][2];
}
|
lu_decompose.c
|
/**
* \file
* \brief [LU decomposition](https://en.wikipedia.org/wiki/LU_decompositon) of a
* square matrix
* \author [Krishna Vedala](https://github.com/kvedala)
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Perform LU decomposition on matrix
* \param[in] A matrix to decompose
* \param[out] L output L matrix
* \param[out] U output U matrix
* \param[in] mat_size input square matrix size
*/
int lu_decomposition(double **A, double **L, double **U, int mat_size)
{
int row, col, j;
// regularize each row
for (row = 0; row < mat_size; row++)
{
// Upper triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++) lu_sum += L[row][j] * U[j][col];
// Evaluate U[i,k]
U[row][col] = A[row][col] - lu_sum;
}
// Lower triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
if (row == col)
{
L[row][col] = 1.;
continue;
}
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++) lu_sum += L[col][j] * U[j][row];
// Evaluate U[i,k]
L[col][row] = (A[col][row] - lu_sum) / U[row][row];
}
}
return 0;
}
/** Function to display square matrix */
void display(double **A, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("% 3.3g \t", A[i][j]);
}
putchar('\n');
}
}
/** Main function */
int main(int argc, char **argv)
{
int mat_size = 3; // default matrix size
const int range = 10;
const int range2 = range >> 1;
if (argc == 2)
mat_size = atoi(argv[1]);
srand(time(NULL)); // random number initializer
/* Create a square matrix with random values */
double **A = (double **)malloc(mat_size * sizeof(double *));
double **L = (double **)malloc(mat_size * sizeof(double *)); // output
double **U = (double **)malloc(mat_size * sizeof(double *)); // output
for (int i = 0; i < mat_size; i++)
{
// calloc so that all valeus are '0' by default
A[i] = (double *)calloc(mat_size, sizeof(double));
L[i] = (double *)calloc(mat_size, sizeof(double));
U[i] = (double *)calloc(mat_size, sizeof(double));
for (int j = 0; j < mat_size; j++)
/* create random values in the limits [-range2, range-1] */
A[i][j] = (double)(rand() % range - range2);
}
lu_decomposition(A, L, U, mat_size);
printf("A = \n");
display(A, mat_size);
printf("\nL = \n");
display(L, mat_size);
printf("\nU = \n");
display(U, mat_size);
/* Free dynamically allocated memory */
for (int i = 0; i < mat_size; i++)
{
free(A[i]);
free(L[i]);
free(U[i]);
}
free(A);
free(L);
free(U);
return 0;
}
|
rawMD5_fmt_plug.c
|
/*
* Raw-MD5 (thick) based on Raw-MD4 w/ mmx/sse/intrinsics
* This software is Copyright (c) 2011 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* OMP added May 2013, JimF
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawMD5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawMD5);
#else
#include <string.h>
#include "arch.h"
#include "md5.h"
#include "misc.h" // error()
#include "common.h"
#include "johnswap.h"
#include "formats.h"
#include "base64_convert.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
//#undef SIMD_COEF_32
//#undef SIMD_PARA_MD5
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 256 // core i7
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-MD5"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifndef MD5_BUF_SIZ
#define MD5_BUF_SIZ 16
#endif
#define CIPHERTEXT_LENGTH 32
#define DIGEST_SIZE 16
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define FORMAT_TAG "$dynamic_0$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define FORMAT_TAG2 "{MD5}"
#define FORMAT_TAG2_LEN (sizeof(FORMAT_TAG2) - 1)
static struct fmt_tests tests[] = {
{"5a105e8b9d40e1329780d62ea2265d8a", "test1"},
{FORMAT_TAG "5a105e8b9d40e1329780d62ea2265d8a", "test1"},
{"098f6bcd4621d373cade4e832627b4f6", "test"},
{"098F6BCD4621D373CADE4E832627B4F6", "test"},
{FORMAT_TAG "378e2c4a07968da2eca692320136433d", "thatsworking"},
{FORMAT_TAG "8ad8757baa8564dc136c1e07507f4a98", "test3"},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
#ifdef DEBUG
{FORMAT_TAG "c9ccf168914a1bcfc3229f1948e67da0","1234567890123456789012345678901234567890123456789012345"},
#if PLAINTEXT_LENGTH >= 80
{FORMAT_TAG "57edf4a22be3c955ac49da2e2107b67a","12345678901234567890123456789012345678901234567890123456789012345678901234567890"},
#endif
#endif
{"{MD5}CY9rzUYh03PK3k6DJie09g==", "test"},
{NULL}
};
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 55
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*4*SIMD_COEF_32 )
#else
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
static uint32_t (*saved_key)[MD5_BUF_SIZ*NBKEYS];
static uint32_t (*crypt_key)[DIGEST_SIZE/4*NBKEYS];
#else
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_key)[4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#else
self->params.max_keys_per_crypt *= 10;
#endif
#ifndef SIMD_COEF_32
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
}
/* Convert {MD5}CY9rzUYh03PK3k6DJie09g== to 098f6bcd4621d373cade4e832627b4f6 */
static char *prepare(char *fields[10], struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
if (!strncmp(fields[1], FORMAT_TAG2, FORMAT_TAG2_LEN) && strlen(fields[1]) == FORMAT_TAG2_LEN+24) {
int res;
res = base64_convert(&fields[1][FORMAT_TAG2_LEN], e_b64_mime, 24,
out, e_b64_hex, sizeof(out),
flg_Base64_HEX_LOCASE, 0);
if (res >= 0)
return out;
}
return fields[1];
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (*p == '$' && !strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG;
if (ciphertext[0] == '$' &&
!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH);
strlwr(&out[TAG_LENGTH]);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned long dummy;
unsigned int i[DIGEST_SIZE/sizeof(unsigned int)];
} _out;
unsigned int *out = _out.i;
unsigned int i;
unsigned int temp;
ciphertext += TAG_LENGTH;
for (i=0; i<4; i++)
{
temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])]));
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24;
#if ARCH_LITTLE_ENDIAN
out[i]=temp;
#else
out[i]=JOHNSWAP(temp);
#endif
}
#if SIMD_COEF_32 && defined(REVERSE_STEPS)
md5_reverse(out);
#endif
return out;
}
static char *source(char *source, void *binary)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG;
uint32_t b[4];
char *p;
int i, j;
memcpy(b, binary, sizeof(b));
#if SIMD_COEF_32 && defined(REVERSE_STEPS)
md5_unreverse(b);
#endif
#if ARCH_LITTLE_ENDIAN==0
alter_endianity(b, 16);
#endif
p = &out[TAG_LENGTH];
for (i = 0; i < 4; i++)
for (j = 0; j < 8; j++)
*p++ = itoa16[(b[i] >> ((j ^ 1) * 4)) & 0xf];
return out;
}
#ifdef SIMD_COEF_32
static void set_key(char *_key, int index)
{
#if ARCH_ALLOWS_UNALIGNED
const uint32_t *key = (uint32_t*)_key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const uint32_t *key = (uint32_t*)(is_aligned(_key, sizeof(uint32_t)) ?
_key : strcpy(buf_aligned, _key));
#endif
uint32_t *keybuffer = &((uint32_t*)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*SIMD_COEF_32];
uint32_t *keybuf_word = keybuffer;
unsigned int len;
uint32_t temp;
len = 0;
while((temp = *key++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xff) | (0x80 << 8);
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xffff) | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = temp | (0x80U << 24);
len+=3;
goto key_cleaning;
}
*keybuf_word = temp;
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
#ifdef DEBUG
/* This function is higly optimized and assumes that we are
never ever given a key longer than fmt_params.plaintext_length.
If we are, buffer overflows WILL happen */
if (len > PLAINTEXT_LENGTH) {
fprintf(stderr, "\n** Core bug: got len %u\n'%s'\n", len, _key);
error();
}
#endif
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[14*SIMD_COEF_32] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef SIMD_COEF_32
static char *get_key(int index)
{
static char out[PLAINTEXT_LENGTH + 1];
unsigned int i;
uint32_t len = ((uint32_t*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*SIMD_COEF_32] >> 3;
for (i=0;i<len;i++)
out[i] = ((char*)saved_key)[GETPOS(i, index)];
out[i] = 0;
return (char*)out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
#endif
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < loops; index++) {
#if SIMD_COEF_32
SIMDmd5body(saved_key[index], crypt_key[index], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);
#else
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], saved_len[index]);
MD5_Final((unsigned char *)crypt_key[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x, y;
#if 1
const unsigned int c = (count + SIMD_COEF_32 - 1) / SIMD_COEF_32;
#else
const unsigned int c = SIMD_PARA_MD5;
#endif
for (y = 0; y < c; y++)
for (x = 0; x < SIMD_COEF_32; x++)
{
if ( ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+x] )
return 1;
}
return 0;
#else
unsigned int index = 0;
#if 1
for (index = 0; index < count; index++)
#endif
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int x = index&(SIMD_COEF_32-1);
unsigned int y = (unsigned int)index/SIMD_COEF_32;
return ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4];
#else
return !memcmp(binary, crypt_key[index], DIGEST_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
#ifdef SIMD_COEF_32
uint32_t crypt_key[DIGEST_SIZE / 4];
MD5_CTX ctx;
char *key = get_key(index);
MD5_Init(&ctx);
MD5_Update(&ctx, key, strlen(key));
MD5_Final((void*)crypt_key, &ctx);
#ifdef REVERSE_STEPS
md5_reverse(crypt_key);
#endif
return !memcmp(get_binary(source), crypt_key, DIGEST_SIZE);
#else
return 1;
#endif
}
#ifdef SIMD_COEF_32
#define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4
static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_0; }
static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_1; }
static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_2; }
static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_3; }
static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_4; }
static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_5; }
static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
#endif
struct fmt_main fmt_rawMD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG, FORMAT_TAG2 },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
private-clauseModificado.c
|
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main()
{
int i, n = 7;
int a[n], suma;
for (i=0; i<n; i++)
a[i] = i;
suma=10;
#pragma omp parallel private(suma)
{
#pragma omp for
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf("thread %d suma a[%d] / ", omp_get_thread_num(), i);
}
printf("\n thread %d suma= %d", omp_get_thread_num(), suma);
}
printf("\n");
}
|
convolution_1x1_pack4to16.h
|
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4to16_avx512(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m128 _v = _mm_load_ps(r0);
_mm_store_ps(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to16_avx512(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
3d25pt.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-6,8),ceild(8*t2-Nz-19,32));t3<=min(floord(4*Nt+Ny-9,32),floord(4*t1+Ny-1,32));t3++) {
for (t4=max(max(ceild(t1-14,16),ceild(8*t2-Nz-51,64)),ceild(32*t3-Ny-51,64));t4<=min(min(floord(4*Nt+Nx-9,64),floord(4*t1+Nx-1,64)),floord(32*t3+Nx+19,64));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),8*t3+6),16*t4+14);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
DRB065-pireduction-orig-no.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Classic PI calculation using reduction
*/
#define num_steps 2000000000
#include <stdio.h>
int main(int argc, char** argv)
{
double pi = 0.0;
long int i;
double x, interval_width;
interval_width = 1.0/(double)num_steps;
#pragma omp parallel for private(i ,x ) reduction(+:pi)
for (i = 0; i < num_steps; i++) {
x = (i+ 0.5) * interval_width;
pi += 1.0 / (x*x + 1.0);
}
pi = pi * 4.0 * interval_width;
printf ("PI=%f\n", pi);
return 0;
}
|
libperf.c
|
/**
* Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <string.h>
#include <tools/perf/lib/libperf_int.h>
#include <unistd.h>
#if _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \
"message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \
#_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t worker_addr_len;
size_t total_wireup_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST];
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t
uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem)
{
ucs_status_t status;
status = uct_iface_mem_alloc(perf->uct.iface, length,
flags, "perftest", alloc_mem);
if (status != UCS_OK) {
ucs_free(alloc_mem);
ucs_error("failed to allocate memory: %s", ucs_status_string(status));
return status;
}
ucs_assert(alloc_mem->md == perf->uct.md);
return UCS_OK;
}
static void uct_perf_test_free_host(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem)
{
uct_iface_mem_free(alloc_mem);
}
static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count)
{
if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) ||
(src_mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_error("wrong memory type passed src - %d, dst - %d",
src_mem_type, dst_mem_type);
} else {
memcpy(dst, src, count);
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.send_mem);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.recv_mem);
if (status != UCS_OK) {
goto err_free_send;
}
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_recv;
}
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_recv:
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
err_free_send:
perf->allocator->uct_free(perf, &perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
perf->allocator->uct_free(perf, &perf->uct.send_mem);
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
ucs_time_t start_time = ucs_get_time();
perf->start_time_acc = ucs_get_accurate_time();
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + start_time;
perf->prev_time = start_time;
perf->prev.time = start_time;
perf->prev.time_acc = perf->start_time_acc;
perf->current.time_acc = perf->start_time_acc;
}
/* Initialize/reset all parameters that could be modified by the warm-up run */
static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned i;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
ucx_perf_test_start_clock(perf);
}
static void ucx_perf_test_init(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned group_index;
perf->params = *params;
group_index = rte_call(perf, group_index);
if (0 == group_index) {
perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type];
} else {
perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type];
}
ucx_perf_test_prepare_new_run(perf, params);
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
ucs_time_t median;
double factor;
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time_acc - perf->start_time_acc;
/* Latency */
median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE);
result->latency.typical = ucs_time_to_sec(median) / factor;
result->latency.moment_average =
(perf->current.time_acc - perf->prev.time_acc)
/ (perf->current.iters - perf->prev.iters)
/ factor;
result->latency.total_average =
(perf->current.time_acc - perf->start_time_acc)
/ perf->current.iters
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->bandwidth.total_average =
perf->current.bytes /
(perf->current.time_acc - perf->start_time_acc) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->msgrate.total_average =
perf->current.msgs /
(perf->current.time_acc - perf->start_time_acc) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
/* check if zero-size messages are requested and supported */
if ((/* they are not supported by: */
/* - UCT tests, except UCT AM Short/Bcopy */
(params->api == UCX_PERF_API_UCT) ||
(/* - UCP RMA and AMO tests */
(params->api == UCX_PERF_API_UCP) &&
(params->command != UCX_PERF_CMD_AM) &&
(params->command != UCX_PERF_CMD_TAG) &&
(params->command != UCX_PERF_CMD_TAG_SYNC) &&
(params->command != UCX_PERF_CMD_STREAM))) &&
ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->api == UCX_PERF_API_UCP) &&
((params->send_mem_type != UCS_MEMORY_TYPE_HOST) ||
(params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) &&
((params->command == UCX_PERF_CMD_PUT) ||
(params->command == UCX_PERF_CMD_GET) ||
(params->command == UCX_PERF_CMD_ADD) ||
(params->command == UCX_PERF_CMD_FADD) ||
(params->command == UCX_PERF_CMD_SWAP) ||
(params->command == UCX_PERF_CMD_CSWAP))) {
/* TODO: remove when support for non-HOST memory types will be added */
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index)
{
uct_ep_h ep = perf->uct.peers[peer_index].ep;
uct_completion_t comp;
ucs_status_t status;
int started;
started = 0;
comp.func = NULL;
comp.count = 2;
do {
if (!started) {
status = uct_ep_flush(ep, 0, &comp);
if (status == UCS_OK) {
--comp.count;
} else if (status == UCS_INPROGRESS) {
started = 1;
} else if (status != UCS_ERR_NO_RESOURCE) {
ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status));
return;
}
}
uct_worker_progress(perf->uct.worker);
} while (comp.count > 1);
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
if (status != UCS_OK) {
ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status));
}
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params,
ucs_memory_type_t mem_type,
uct_md_attr_t *md_attr)
{
if (!(md_attr->cap.access_mem_type == mem_type) &&
!(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT,
ucs_memory_type_names[mem_type],
UCT_PERF_TEST_PARAMS_ARG(params));
return UCS_ERR_INVALID_PARAM;
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface, uct_md_h md)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_md_attr_t md_attr;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_md_query(md, &md_attr);
if (status != UCS_OK) {
ucs_error("uct_md_query(%s) failed: %s",
params->uct.md_name, ucs_status_string(status));
return status;
}
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s",
UCT_PERF_TEST_PARAMS_ARG(params),
ucs_status_string(status));
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s",
UCT_PERF_TEST_PARAMS_ARG(params),
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
params->am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size (%zu)",
params->am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = ADDR_BUF_SIZE;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
uct_ep_params_t ep_params;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len);
ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <=
UCS_PTR_BYTE_OFFSET(buffer, buffer_size));
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE;
ep_params.iface = perf->uct.iface;
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR |
UCT_EP_PARAM_FIELD_IFACE_ADDR;
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len);
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer,
&perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.dev_addr = dev_addr;
ep_params.iface_addr = iface_addr;
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status;
size_t message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *address_p;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) {
mem_map_params.flags |= non_blk_flag;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*address_p = mem_attr.address;
return UCS_OK;
err:
return status;
}
static void ucp_perf_test_free_host(const ucx_perf_context_t *perf,
void *address, ucp_mem_h memh)
{
ucs_status_t status;
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->send_buffer, &perf->ucp.send_memh,
UCP_MEM_MAP_NONBLOCK);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->recv_buffer, &perf->ucp.recv_memh,
0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf)
{
unsigned i, thread_count = perf->params.thread_count;
ucs_status_ptr_t *req;
ucs_status_t status;
for (i = 0; i < thread_count; ++i) {
if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) {
ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey);
}
if (perf->ucp.tctx[i].perf.ucp.ep != NULL) {
req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep,
UCP_EP_CLOSE_MODE_FLUSH);
if (UCS_PTR_IS_PTR(req)) {
do {
ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker);
status = ucp_request_check_status(req);
} while (status == UCS_INPROGRESS);
ucp_request_release(req);
} else if (UCS_PTR_STATUS(req) != UCS_OK) {
ucs_warn("failed to close ep %p on thread %d: %s\n",
perf->ucp.tctx[i].perf.ucp.ep, i,
ucs_status_string(UCS_PTR_STATUS(req)));
}
}
}
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf)
{
unsigned thread_count = perf->params.thread_count;
void *rkey_buffer = NULL;
void *req = NULL;
unsigned group_size, group_index, i;
ucx_perf_ep_info_t *remote_info;
ucp_ep_params_t ep_params;
ucp_address_t *address;
ucs_status_t status;
size_t buffer_size;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
if (group_size != 2) {
ucs_error("perftest requires group size to be exactly 2 "
"(actual group size: %u)", group_size);
return UCS_ERR_UNSUPPORTED;
}
buffer_size = ADDR_BUF_SIZE * thread_count;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
/* Initialize all endpoints and rkeys to NULL to handle error flow */
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].perf.ucp.ep = NULL;
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
/* receive the data from the remote peer, extract the address from it
* (along with additional wireup info) and create an endpoint to the peer */
rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req);
remote_info = buffer;
for (i = 0; i < thread_count; i++) {
address = (ucp_address_t*)(remote_info + 1);
rkey_buffer = UCS_PTR_BYTE_OFFSET(address,
remote_info->ucp.worker_addr_len);
perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params,
&perf->ucp.tctx[i].perf.ucp.ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer,
&perf->ucp.tctx[i].perf.ucp.rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
} else {
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
remote_info = UCS_PTR_BYTE_OFFSET(remote_info,
remote_info->ucp.total_wireup_len);
}
free(buffer);
return UCS_OK;
err_free_eps_buffer:
ucp_perf_test_destroy_eps(perf);
free(buffer);
err:
return status;
}
static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf,
uint64_t features)
{
unsigned i, j, thread_count = perf->params.thread_count;
size_t address_length = 0;
void *rkey_buffer = NULL;
void *req = NULL;
ucx_perf_ep_info_t *info;
ucp_address_t *address;
ucs_status_t status;
struct iovec *vec;
size_t rkey_size;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
goto err;
}
} else {
rkey_size = 0;
}
/* each thread has an iovec with 3 entries to send to the remote peer:
* ep_info, worker_address and rkey buffer */
vec = calloc(3 * thread_count, sizeof(struct iovec));
if (vec == NULL) {
ucs_error("failed to allocate iovec");
status = UCS_ERR_NO_MEMORY;
goto err_rkey_release;
}
/* get the worker address created for every thread and send it to the remote
* peer */
for (i = 0; i < thread_count; i++) {
status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker,
&address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s",
ucs_status_string(status));
}
goto err_free_workers_vec;
}
vec[i * 3].iov_base = malloc(sizeof(*info));
if (vec[i * 3].iov_base == NULL) {
ucs_error("failed to allocate vec entry for info");
status = UCS_ERR_NO_MEMORY;
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
goto err_free_workers_vec;
}
info = vec[i * 3].iov_base;
info->ucp.worker_addr_len = address_length;
info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size;
info->rkey_size = rkey_size;
info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer;
vec[(i * 3) + 0].iov_len = sizeof(*info);
vec[(i * 3) + 1].iov_base = address;
vec[(i * 3) + 1].iov_len = address_length;
vec[(i * 3) + 2].iov_base = rkey_buffer;
vec[(i * 3) + 2].iov_len = info->rkey_size;
address_length = 0;
}
/* send to the remote peer */
rte_call(perf, post_vec, vec, 3 * thread_count, &req);
rte_call(perf, exchange_vec, req);
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
for (i = 0; i < thread_count; i++) {
free(vec[i * 3].iov_base);
ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker,
vec[(i * 3) + 1].iov_base);
}
free(vec);
return UCS_OK;
err_free_workers_vec:
for (j = 0; j < i; j++) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
free(vec);
err_rkey_release:
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
err:
return status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
ucs_status_t status;
unsigned i;
/* pack the local endpoints data and send to the remote peer */
status = ucp_perf_test_send_local_data(perf, features);
if (status != UCS_OK) {
goto err;
}
/* receive remote peer's endpoints' data and connect to them */
status = ucp_perf_test_receive_remote_data(perf);
if (status != UCS_OK) {
goto err;
}
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
goto err_destroy_eps;
}
/* force wireup completion */
for (i = 0; i < perf->params.thread_count; i++) {
status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed on theread %d: %s",
i, ucs_status_string(status));
}
}
return status;
err_destroy_eps:
ucp_perf_test_destroy_eps(perf);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
ucp_perf_barrier(perf);
ucp_perf_test_destroy_eps(perf);
}
static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf)
{
unsigned i;
for (i = 0; i < perf->params.thread_count; i++) {
if (perf->ucp.tctx[i].perf.ucp.worker != NULL) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
}
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf,
const ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter,
ucs_div_round_up(params->max_iter, 10));
perf->report_interval = ULONG_MAX;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_component_h *uct_components;
uct_component_attr_t component_attr;
uct_tl_resource_desc_t *tl_resources;
unsigned md_index, num_components;
unsigned tl_index, num_tl_resources;
unsigned cmpt_index;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_components(&uct_components, &num_components);
if (status != UCS_OK) {
goto out;
}
for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) {
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT;
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES;
component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) *
component_attr.md_resource_count);
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) {
status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL,
&md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
ucs_strncpy_zero(perf->params.uct.md_name,
component_attr.md_resources[md_index].md_name,
UCT_MD_NAME_MAX);
status = uct_md_open(uct_components[cmpt_index],
component_attr.md_resources[md_index].md_name,
md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_components_list;
}
for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.cmpt = uct_components[cmpt_index];
perf->uct.md = md;
status = UCS_OK;
goto out_release_components_list;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
}
ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT,
UCT_PERF_TEST_PARAMS_ARG(&perf->params));
status = UCS_ERR_NO_DEVICE;
out_release_components_list:
uct_release_component_list(uct_components);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
#if _OPENMP
(void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker);
#else
(void*)perf->ucp.tctx[0].perf.ucp.worker);
#endif
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE |
UCT_IFACE_PARAM_FIELD_STATS_ROOT |
UCT_IFACE_PARAM_FIELD_RX_HEADROOM |
UCT_IFACE_PARAM_FIELD_CPU_MASK,
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface,
perf->uct.md);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
goto out_iface_close;
}
/* Enable progress before `uct_iface_flush` and `uct_worker_progress` called
* to give a chance to finish connection for some tranports (ib/ud, tcp).
* They may return UCS_INPROGRESS from `uct_iface_flush` when connections are
* in progress */
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static void ucp_perf_request_init(void *req)
{
ucp_perf_request_t *request = req;
request->context = NULL;
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
unsigned i, thread_count;
size_t message_size;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES |
UCP_PARAM_FIELD_REQUEST_SIZE |
UCP_PARAM_FIELD_REQUEST_INIT;
ucp_params.features = 0;
ucp_params.request_size = sizeof(ucp_perf_request_t);
ucp_params.request_init = ucp_perf_request_init;
if (perf->params.thread_count > 1) {
/* when there is more than one thread, a ucp_worker would be created for
* each. all of them will share the same ucp_context */
ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED;
ucp_params.mt_workers_shared = 1;
}
status = ucp_perf_test_fill_params(&perf->params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
thread_count = perf->params.thread_count;
message_size = ucx_perf_get_message_size(&perf->params);
status = ucp_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
ucs_warn("ucp test failed to allocate memory");
goto err_cleanup;
}
perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t));
if (perf->ucp.tctx == NULL) {
ucs_warn("ucp test failed to allocate memory for thread contexts");
goto err_free_mem;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = perf->params.thread_mode;
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].tid = i;
perf->ucp.tctx[i].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
perf->ucp.tctx[i].perf.send_buffer =
UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size);
perf->ucp.tctx[i].perf.recv_buffer =
UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size);
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
goto err_free_tctx_destroy_workers;
}
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_tctx_destroy_workers;
}
return UCS_OK;
err_free_tctx_destroy_workers:
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
err_free_mem:
ucp_perf_test_free_mem(perf);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(const ucx_perf_params_t *params,
ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
ucx_perf_global_init();
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_init(perf, params);
if (perf->allocator == NULL) {
ucs_error("Unsupported memory types %s<->%s",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
status = UCS_ERR_UNSUPPORTED;
goto out_free;
}
if ((params->api == UCX_PERF_API_UCT) &&
(perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_warn("UCT tests also copy 2-byte values from %s memory to "
"%s memory, which may impact performance results",
ucs_memory_type_names[perf->allocator->mem_type],
ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]);
}
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out_free;
}
status = ucx_perf_funcs[params->api].setup(perf);
if (status != UCS_OK) {
goto out_free;
}
if (params->thread_count == 1) {
if (params->api == UCX_PERF_API_UCP) {
perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker;
perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep;
perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr;
perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey;
}
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1, 0);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
static ucs_status_t ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t status;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_calc_result(perf, result);
out:
return status;
}
static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
unsigned i, thread_count = perf->params.thread_count;
double lat_sum_total_avegare = 0.0;
ucx_perf_result_t agg_result;
agg_result.iters = tctx[0].result.iters;
agg_result.bytes = tctx[0].result.bytes;
agg_result.elapsed_time = tctx[0].result.elapsed_time;
agg_result.bandwidth.total_average = 0.0;
agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */
agg_result.latency.total_average = 0.0;
agg_result.msgrate.total_average = 0.0;
agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */
/* when running with multiple threads, the moment average value is
* undefined since we don't capture the values of the last iteration */
agg_result.msgrate.moment_average = 0.0;
agg_result.bandwidth.moment_average = 0.0;
agg_result.latency.moment_average = 0.0;
agg_result.latency.typical = 0.0;
/* in case of multiple threads, we have to aggregate the results so that the
* final output of the result would show the performance numbers that were
* collected from all the threads.
* BW and message rate values will be the sum of their values from all
* the threads, while the latency value is the average latency from the
* threads. */
for (i = 0; i < thread_count; i++) {
agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average;
agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average;
lat_sum_total_avegare += tctx[i].result.latency.total_average;
}
agg_result.latency.total_average = lat_sum_total_avegare / thread_count;
rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1);
}
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
int ti, thread_count = perf->params.thread_count;
ucs_status_t* statuses;
ucs_status_t status;
omp_set_num_threads(thread_count);
statuses = calloc(thread_count, sizeof(ucs_status_t));
if (statuses == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < thread_count; ti++) {
if (UCS_OK != tctx[ti].status) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(tctx[ti].status));
status = tctx[ti].status;
}
}
ucx_perf_thread_report_aggregated_results(perf);
free(statuses);
out:
return status;
}
#else
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
void ucx_perf_global_init()
{
static ucx_perf_allocator_t host_allocator = {
.mem_type = UCS_MEMORY_TYPE_HOST,
.init = ucs_empty_function_return_success,
.ucp_alloc = ucp_perf_test_alloc_host,
.ucp_free = ucp_perf_test_free_host,
.uct_alloc = uct_perf_test_alloc_host,
.uct_free = uct_perf_test_free_host,
.memcpy = ucx_perf_test_memcpy_host,
.memset = memset
};
UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest);
ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator;
/* FIXME Memtype allocator modules must be loaded to global scope, otherwise
* alloc hooks, which are using dlsym() to get pointer to original function,
* do not work. Need to use bistro for memtype hooks to fix it.
*/
UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL);
}
|
3d25pt_var.c
|
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unop__identity_uint64_uint32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint64_uint32
// op(A') function: GB_unop_tran__identity_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint64_uint32
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hello_world_omp.c
|
#include <stdio.h>
#include <omp.h>
int main(int argc, char* argv[]) {
#pragma omp parallel
{
int id = omp_get_thread_num();
int nthrds = omp_get_num_threads();
printf("Hello from thread %d of %d\n", id, nthrds);
}
return 0;
}
|
GB_unop__identity_fc64_bool.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_bool)
// op(A') function: GB (_unop_tran__identity_fc64_bool)
// C type: GxB_FC64_t
// A type: bool
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_bool)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
push_superphotons.c
|
/******************************************************************************
* *
* PUSH_SUPERPHOTONS.C *
* *
* INTEGRATE SUPERPHOTON GEODESICS *
* *
******************************************************************************/
#include "decs.h"
#if RADIATION
// void push(double dt, struct of_photon *ph);
void push(double X[NDIM], double Kcov[NDIM], double Kcon[NDIM], double dt);
void get_X_source(double Kcon[NDIM], double src[NDIM]);
void get_K_source(
double X[NDIM], double Kcov[NDIM], double Kcon[NDIM], double src[NDIM]);
#define MAX_SUBDIV (10)
int push_X_K(double X[NDIM], double Kcov[NDIM], double Kcon[NDIM],
grid_prim_type P, grid_prim_type Prad, double KdotKprev, int type,
double dtpush) {
int nsubdiv = 0;
double Xprev[NDIM], Kcovprev[NDIM], Kconprev[NDIM], KdotK = 0.;
int good_push;
// Store initial X and K
for (int mu = 0; mu < NDIM; mu++) {
Xprev[mu] = X[mu];
Kcovprev[mu] = Kcov[mu];
Kconprev[mu] = Kcon[mu];
}
do {
for (int mu = 0; mu < NDIM; mu++) {
X[mu] = Xprev[mu];
Kcov[mu] = Kcovprev[mu];
Kcon[mu] = Kconprev[mu];
}
for (int n = 0; n < pow(2, nsubdiv); n++) {
if (type == TYPE_TRACER) {
#if TRACERS
push_tracers(X, Kcov, Kcon, P, Prad, dtpush / pow(2, nsubdiv));
#else
fprintf(stderr,
"[push_X_K]: Bad photon type!\n"
"\ttype = %d\n"
"\tX = [%g %g %g %g]\n"
"\tKcov = [%g %g %g %g]\n"
"\tKcon = [%g %g %g %g]\n",
type, X[0], X[1], X[2], X[3], Kcov[0], Kcov[1], Kcov[2], Kcov[3],
Kcon[0], Kcon[1], Kcon[2], Kcon[3]);
exit(-1);
#endif
} else {
push(X, Kcov, Kcon, dtpush / pow(2, nsubdiv));
}
}
if (type == TYPE_TRACER) {
good_push = 1; // TODO: is this right?
} else {
good_push = is_null(Kcov, Kcon, Kcovprev[0], KdotKprev, &KdotK);
}
nsubdiv++;
} while (!good_push && nsubdiv < MAX_SUBDIV);
if (nsubdiv == MAX_SUBDIV) {
// fprintf(stderr, "Geodesic failure!\n");
// fprintf(stderr, "X[] = %e %e %e %e\n", X[0], X[1], X[2], X[3]);
return PUSH_FAIL;
}
return PUSH_SUCCESS;
}
int push_superphoton(struct of_photon *ph, grid_prim_type P,
grid_prim_type Prad, double dtpush) {
// Store X and K at step n
for (int n = 0; n < NSUP - 1; n++) {
for (int mu = 0; mu < NDIM; mu++) {
ph->X[n][mu] = ph->X[n + 1][mu];
ph->Kcov[n][mu] = ph->Kcov[n + 1][mu];
ph->Kcon[n][mu] = ph->Kcon[n + 1][mu];
}
}
int status = push_X_K(ph->X[2], ph->Kcov[2], ph->Kcon[2], P, Prad,
ph->KdotKprev, ph->type, dtpush);
ph->KdotKprev = dot(ph->Kcov[2], ph->Kcon[2]);
return status;
}
void push_superphotons(grid_prim_type P, grid_prim_type Prad, double dt) {
timer_start(TIMER_PUSH);
int step_lost_local = 0;
int step_tot_local = 0;
int tracer_tot_local = 0;
#pragma omp parallel reduction(+:step_lost_local) reduction(+:step_tot_local) \
reduction(+:tracer_tot_local)
{
int push_status;
struct of_photon *ph = photon_lists[omp_get_thread_num()];
struct of_photon *prev = NULL;
struct of_photon *head = ph;
// Push each photon from n to n+1
while (ph != NULL) {
if (to_be_pushed(t, dt, ph)) {
double dtpush = get_dtpush(ph, dt);
push_status = push_superphoton(ph, P, Prad, dtpush);
if (push_status == PUSH_FAIL) {
// fprintf(stderr, "Geodesic push failed!\n");
// fprintf(stderr, "X[] = %e %e %e %e\n", ph->X[2][0], ph->X[2][1],
// ph->X[2][2], ph->X[2][3]);
#if RADIATION == RADTYPE_NEUTRINOS
record_lepton_flux(ph);
#endif
list_remove(&ph, &head, &prev);
step_lost_local++;
continue;
}
}
if (ph->type == TYPE_TRACER)
tracer_tot_local++;
step_tot_local++;
prev = ph;
ph = ph->next;
} // ph != NULL
photon_lists[omp_get_thread_num()] = head;
} // omp parallel
step_lost += step_lost_local;
step_tot += step_tot_local;
tracer_tot += tracer_tot_local;
timer_stop(TIMER_PUSH);
}
#undef MAX_SUBDIV
// Second order update to X^{\mu}, K_{\mu} from t to t + dt
// void push(double dt, struct of_photon *ph)
void push(double X[NDIM], double Kcov[NDIM], double Kcon[NDIM], double dt) {
// Heun's method:
// x_n+1 = x_n + dt*(0.5*c1 + 0.5*c2)
// c1 = dxdt(t_n, x_n)
// c2 = dxdt(t_n + dt, x_n + dt*c1)
// y_n+1 = y_n + dt*(0.5*d1 + 0.5*d2)
// d1 = dydt(t_n, y_n)
// d2 = dydt(t_n + dt, y_n + dt*d1)
//
// where
// x = X^{\mu}, \mu = [1, 2, 3] (X[0] known)
// y = K_{\mu}, \mu = [1, 2] (K_0, K_3 conserved)
// dydt = -1/(2*k^{\nu})*k_b*k_c*(d g^{bc} / dx^{\mu})
// dxdt = k^{\mu} / k^0
double c1[NDIM], c2[NDIM], d1[NDIM], d2[NDIM];
double Xtmp[NDIM], Kcontmp[NDIM], Kcovtmp[NDIM];
double gcov[NDIM][NDIM], gcon[NDIM][NDIM];
memset(c1, 0, NDIM * sizeof(double));
memset(c2, 0, NDIM * sizeof(double));
memset(d1, 0, NDIM * sizeof(double));
memset(d2, 0, NDIM * sizeof(double));
// First stage
set_gcov(X, gcov);
gcon_func(gcov, gcon);
raise(Kcov, gcon, Kcontmp);
get_X_source(Kcontmp, c1);
get_K_source(X, Kcov, Kcontmp, d1);
for (int mu = 0; mu < NDIM; mu++) {
Xtmp[mu] = X[mu] + dt * c1[mu];
Kcovtmp[mu] = Kcov[mu] + dt * d1[mu];
}
// Second stage
set_gcov(Xtmp, gcov);
gcon_func(gcov, gcon);
raise(Kcovtmp, gcon, Kcontmp);
get_X_source(Kcontmp, c2);
get_K_source(Xtmp, Kcovtmp, Kcontmp, d2);
X[0] += dt;
for (int mu = 1; mu < NDIM; mu++) {
X[mu] += 0.5 * dt * (c1[mu] + c2[mu]);
Kcov[mu] += 0.5 * dt * (d1[mu] + d2[mu]);
}
// Also provide Kcon
set_gcov(X, gcov);
gcon_func(gcov, gcon);
raise(Kcov, gcon, Kcon);
}
void get_X_source(double Kcon[NDIM], double src[NDIM]) {
for (int mu = 0; mu < NDIM; mu++) {
src[mu] = Kcon[mu] / Kcon[0];
}
}
#define DELTA (1.e-6)
void get_K_source(
double X[NDIM], double Kcov[NDIM], double Kcon[NDIM], double src[NDIM]) {
// Don't do work when sources are trivial
#if METRIC == MINKOWSKI
static int killing[] = {1, 1, 1, 1};
#elif METRIC == MKS
static int killing[] = {1, 0, 0, 1};
#endif
for (int mu = 0; mu < NDIM; mu++) {
if (killing[mu] == 1) {
src[mu] = 0.;
} else {
src[mu] = 0.;
// Numerically evaluate d g^{\nu \kap} / dx^{\mu}
double Xm[NDIM], Xp[NDIM];
double gcovm[NDIM][NDIM], gcovp[NDIM][NDIM];
double gconm[NDIM][NDIM], gconp[NDIM][NDIM], dG;
for (int nu = 0; nu < NDIM; nu++) {
Xm[nu] = X[nu];
Xp[nu] = X[nu];
}
Xm[mu] -= DELTA;
Xp[mu] += DELTA;
set_gcov(Xm, gcovm);
set_gcov(Xp, gcovp);
gcon_func(gcovm, gconm);
gcon_func(gcovp, gconp);
for (int nu = 0; nu < NDIM; nu++) {
for (int kap = 0; kap < NDIM; kap++) {
dG = (gconp[nu][kap] - gconm[nu][kap]) / (Xp[mu] - Xm[mu]);
src[mu] += Kcov[nu] * Kcov[kap] * dG;
}
}
src[mu] *= -1. / (2. * Kcon[0]);
}
}
}
#undef DELTA
#endif // RADIATION
|
task-taskgroup-nested.c
|
/*
* task-taskgroup-nested.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include "ompt/ompt-signal.h"
int main(int argc, char *argv[]) {
int var = 0, a = 0;
#pragma omp parallel num_threads(2) shared(var, a)
#pragma omp master
{
#pragma omp taskgroup
{
#pragma omp task
{
#pragma omp task shared(var, a)
{
var++;
OMPT_SIGNAL(a);
}
}
// Give other thread time to steal the task and execute its child.
OMPT_WAIT(a, 1);
}
var++;
}
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
utils.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDQUANTUM_UTILS_H_
#define MINDQUANTUM_UTILS_H_
#include <stdint.h>
#ifdef USE_OPENMP
# include <omp.h>
#endif // USE_OPENMP // NOLINT
#ifdef _MSC_VER
# include <intrin.h>
#else
# include <x86intrin.h>
#endif // _MSC_VER
#include <complex>
#include <cstdlib>
#include <ctime>
#include <map>
#include <numeric>
#include <random>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "core/type.h"
namespace mindquantum {
extern const VT<CT<MT>> POLAR;
template <typename T, typename ST>
CT<T> ComplexInnerProduct(const ST *v1, const ST *v2, Index len) {
// len is (1UL>>n_qubits)*2
ST real_part = 0;
ST imag_part = 0;
auto size = len / 2;
#pragma omp parallel for reduction(+ : real_part, imag_part)
for (Index i = 0; i < size; i++) {
real_part += v1[2 * i] * v2[2 * i] + v1[2 * i + 1] * v2[2 * i + 1];
imag_part += v1[2 * i] * v2[2 * i + 1] - v1[2 * i + 1] * v2[2 * i];
}
CT<T> result = {static_cast<T>(real_part), static_cast<T>(imag_part)};
return result;
}
template <typename T, typename ST>
CT<T> ComplexInnerProductWithControl(const ST *v1, const ST *v2, Index len, Index ctrlmask) {
// len is (1UL>>n_qubits)*2
ST real_part = 0;
ST imag_part = 0;
auto size = len / 2;
#pragma omp parallel for reduction(+ : real_part, imag_part)
for (Index i = 0; i < size; i++) {
if ((i & ctrlmask) == ctrlmask) {
real_part += v1[2 * i] * v2[2 * i] + v1[2 * i + 1] * v2[2 * i + 1];
imag_part += v1[2 * i] * v2[2 * i + 1] - v1[2 * i + 1] * v2[2 * i];
}
}
CT<T> result = {static_cast<T>(real_part), static_cast<T>(imag_part)};
return result;
}
Index GetControlMask(const VT<Index> &ctrls);
PauliMask GetPauliMask(const VT<PauliWord> &pws);
#ifdef _MSC_VER
inline uint32_t CountOne(uint32_t n) {
return __popcnt(n);
}
inline uint64_t CountOne(uint64_t n) {
return __popcnt64(n);
}
inline uint32_t CountOne(int32_t n) {
return CountOne(uint32_t(n));
}
inline uint64_t CountOne(int64_t n) {
return CountOne(uint64_t(n));
}
#else
inline uint32_t CountOne(uint32_t n) {
int result;
asm("popcnt %1,%0" : "=r"(result) : "r"(n));
return result;
}
inline uint64_t CountOne(int64_t n) {
uint32_t *p = reinterpret_cast<uint32_t *>(&n);
return CountOne(p[0]) + CountOne(p[1]);
}
#endif // _MSC_VER
// inline int CountOne(uint64_t n) {
// uint8_t *p = reinterpret_cast<uint8_t *>(&n);
// return POPCNTTABLE[p[0]] + POPCNTTABLE[p[1]] + POPCNTTABLE[p[2]] +
// POPCNTTABLE[p[3]] + POPCNTTABLE[p[4]] + POPCNTTABLE[p[5]] +
// POPCNTTABLE[p[6]] + POPCNTTABLE[p[7]];
// }
// inline int CountOne(uint32_t n) {
// uint8_t *p = reinterpret_cast<uint8_t *>(&n);
// return POPCNTTABLE[p[0]] + POPCNTTABLE[p[1]] + POPCNTTABLE[p[2]] +
// POPCNTTABLE[p[3]];
// }
template <typename T>
PauliTerm<T> GenerateRandomPauliTerm(Index n_qubits) {
std::default_random_engine e(std::clock());
std::uniform_real_distribution<T> ut(-1.0, 1.0);
auto coeff = ut(e);
std::uniform_int_distribution<short> uit(0, 3);
VT<PauliWord> pws;
for (Index i = 0; i < n_qubits; i++) {
auto p = uit(e);
if (p != 3) {
pws.push_back(std::make_pair(i, (p + 'X')));
}
}
return std::make_pair(pws, coeff);
}
template <typename T>
void ShowPauliTerm(const PauliTerm<T> &pt) {
std::cout << pt.second << " [";
for (Index i = 0; i < static_cast<Index>(pt.first.size()); i++) {
auto &pw = pt.first[i];
std::cout << pw.second << pw.first;
if (i != static_cast<Index>(pt.first.size()) - 1) {
std::cout << " ";
}
}
std::cout << "]" << std::endl;
}
TimePoint NOW();
int TimeDuration(TimePoint start, TimePoint end);
template <typename T>
void PrintVec(T *vec, size_t len) {
auto cvec = reinterpret_cast<CTP<T>>(vec);
for (size_t i = 0; i < len / 2; i++) {
std::cout << cvec[i] << std::endl;
}
}
} // namespace mindquantum
#endif // MINDQUANTUM_UTILS_H_
|
omptough.c
|
#include <pthread.h>
#include <stdlib.h>
#include <malloc.h>
#include <unistd.h>
#include <stdio.h>
#include <omp.h>
#include "papi_test.h"
#define NITER (100000)
int main( int argc, char* argv[] )
{
int i;
int ret;
int nthreads;
int *evtset;
int *ctrcode;
nthreads = omp_get_max_threads();
evtset = (int*) malloc( sizeof(int)*nthreads );
ctrcode = (int*) malloc( sizeof(int)*nthreads );
tests_quiet(argc, argv); /* Set TESTS_QUIET variable */
ret=PAPI_library_init( PAPI_VER_CURRENT );
if( ret!=PAPI_VER_CURRENT && ret>0 )
{
fprintf(stderr, "PAPI library version mismatch '%s'\n",
PAPI_strerror(ret) );
exit(1);
}
if( ret<0 )
{
fprintf(stderr, "PAPI initialization error '%s'\n",
PAPI_strerror(ret) );
exit(1);
}
if( (ret = PAPI_thread_init((unsigned long (*)(void)) pthread_self)) != PAPI_OK )
{
fprintf(stderr, "PAPI thread initialization error '%s'\n",
PAPI_strerror(ret) );
exit(1);
}
for( i=0; i<nthreads; i++ )
{
evtset[i]=PAPI_NULL;
if( (ret=PAPI_event_name_to_code( "PAPI_TOT_INS", &ctrcode[i]))
!=PAPI_OK )
{
fprintf(stderr, "PAPI evt-name-to-code error '%s'\n",
PAPI_strerror(ret) );
}
}
for( i=0; i<NITER; i++ ){
#pragma omp parallel
{
int tid;
int pid;
tid = omp_get_thread_num();
pid = pthread_self();
if( (ret=PAPI_register_thread()) != PAPI_OK ){
if (!TESTS_QUIET) {
fprintf(stderr, "[%5d] Error in register thread (tid=%d pid=%d) '%s'\n",
i, tid, pid, PAPI_strerror(ret) );
test_fail(__FILE__, __LINE__, "omptough", 1);
}
}
evtset[tid]=PAPI_NULL;
if( (ret=PAPI_create_eventset(&(evtset[tid]))) != PAPI_OK ){
if (!TESTS_QUIET) {
fprintf(stderr, "[%5d] Error creating eventset (tid=%d pid=%d) '%s'\n",
i, tid, pid, PAPI_strerror(ret) );
test_fail(__FILE__, __LINE__, "omptough", 1);
}
}
if( (ret=PAPI_destroy_eventset(&(evtset[tid]))) != PAPI_OK ){
if (!TESTS_QUIET) {
fprintf(stderr, "[%5d] Error destroying eventset (tid=%d pid=%d) '%s'\n",
i, tid, pid, PAPI_strerror(ret) );
evtset[tid]=PAPI_NULL;
test_fail(__FILE__, __LINE__, "omptough", 1);
}
}
if( (ret=PAPI_unregister_thread()) != PAPI_OK ){
if (!TESTS_QUIET) {
fprintf(stderr, "[%5d] Error in unregister thread (tid=%d pid=%d) ret='%s'\n",
i, tid, pid, PAPI_strerror(ret) );
test_fail(__FILE__, __LINE__, "omptough", 1);
}
}
}
}
test_pass(__FILE__, NULL, 0);
exit(1);
}
|
project.c
|
//-----------------------------------------------------------------------------
// project.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/19/14 (Build 5.1.000)
// 04/14/14 (Build 5.1.004)
// 09/15/14 (Build 5.1.007)
// 03/19/15 (Build 5.1.008)
// 04/30/15 (Build 5.1.009)
// 08/01/16 (Build 5.1.011)
// 03/14/17 (Build 5.1.012)
// 05/10/18 (Build 5.1.013)
// Author: L. Rossman
//
// Project management functions.
//
// This module provides project-related services such as:
// o opening a new project and reading its input data
// o allocating and freeing memory for project objects
// o setting default values for object properties and options
// o initializing the internal state of all objects
// o managing hash tables for identifying objects by ID name
//
// Build 5.1.004:
// - Ignore RDII option added.
//
// Build 5.1.007:
// - Default monthly adjustments for climate variables included.
// - User-supplied GW flow equations initialized to NULL.
// - Storage node exfiltration object initialized to NULL.
// - Freeing of memory used for storage node exfiltration included.
//
// Build 5.1.008:
// - Constants used for dynamic wave routing moved to dynwave.c.
// - Input processing of minimum time step & number of
// parallel threads for dynamic wave routing added.
// - Default values of hyd. conductivity adjustments added.
// - Freeing of memory used for outfall pollutant load added.
//
// Build 5.1.009:
// - Fixed bug in computing total duration introduced in 5.1.008.
//
// Build 5.1.011:
// - Memory management of hydraulic event dates array added.
//
// Build 5.1.012:
// - Minimum conduit slope option initialized to 0 (none).
// - NO/YES no longer accepted as options for NORMAL_FLOW_LIMITED.
//
// Build 5.1.013:
// - omp_get_num_threads function protected against lack of compiler
// support for OpenMP.
// - Rain gage validation now performed after subcatchment validation.
// - More robust parsing of MinSurfarea option provided.
// - Support added for new RuleStep analysis option.
//
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include <stdlib.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP) //(5.1.013)
#include <omp.h> //
#else //
int omp_get_num_threads(void) { return 1;} //
#endif //
#include "headers.h"
#include "lid.h"
#include "hash.h"
#include "mempool.h"
//-----------------------------------------------------------------------------
// Shared variables
//-----------------------------------------------------------------------------
static HTtable* Htable[MAX_OBJ_TYPES]; // Hash tables for object ID names
static char MemPoolAllocated; // TRUE if memory pool allocated
//-----------------------------------------------------------------------------
// External Functions (declared in funcs.h)
//-----------------------------------------------------------------------------
// project_open (called from swmm_open in swmm5.c)
// project_close (called from swmm_close in swmm5.c)
// project_readInput (called from swmm_open in swmm5.c)
// project_readOption (called from readOption in input.c)
// project_validate (called from swmm_open in swmm5.c)
// project_init (called from swmm_start in swmm5.c)
// project_addObject (called from addObject in input.c)
// project_createMatrix (called from openFileForInput in iface.c)
// project_freeMatrix (called from iface_closeRoutingFiles)
// project_findObject
// project_findID
//-----------------------------------------------------------------------------
// Function declarations
//-----------------------------------------------------------------------------
static void initPointers(void);
static void setDefaults(void);
static void openFiles(char *f1, char *f2, char *f3);
static void createObjects(void);
static void deleteObjects(void);
static void createHashTables(void);
static void deleteHashTables(void);
//=============================================================================
void project_open(char *f1, char *f2, char *f3)
//
// Input: f1 = pointer to name of input file
// f2 = pointer to name of report file
// f3 = pointer to name of binary output file
// Output: none
// Purpose: opens a new SWMM project.
//
{
initPointers();
setDefaults();
openFiles(f1, f2, f3);
}
//=============================================================================
void project_readInput()
//
// Input: none
// Output: none
// Purpose: retrieves project data from input file.
//
{
// --- create hash tables for fast retrieval of objects by ID names
createHashTables();
// --- count number of objects in input file and create them
input_countObjects();
createObjects();
// --- read project data from input file
input_readData();
if ( ErrorCode ) return;
// --- establish starting & ending date/time
StartDateTime = StartDate + StartTime;
EndDateTime = EndDate + EndTime;
ReportStart = ReportStartDate + ReportStartTime;
ReportStart = MAX(ReportStart, StartDateTime);
// --- check for valid starting & ending date/times
if ( EndDateTime <= StartDateTime )
{
report_writeErrorMsg(ERR_START_DATE, "");
}
else if ( EndDateTime <= ReportStart )
{
report_writeErrorMsg(ERR_REPORT_DATE, "");
}
else
{
// --- compute total duration of simulation in seconds
double durationDate = EndDate - StartDate;
double durationTime = EndTime - StartTime;
TotalDuration = floor(durationDate * SECperDAY + durationTime * SECperDAY);
// --- reporting step must be <= total duration
if ( (double)ReportStep > TotalDuration )
{
ReportStep = (int)(TotalDuration);
}
// --- reporting step can't be < routing step
if ( (double)ReportStep < RouteStep )
{
report_writeErrorMsg(ERR_REPORT_STEP, "");
}
// --- convert total duration to milliseconds
TotalDuration *= 1000.0;
}
}
//=============================================================================
void project_validate()
//
// Input: none
// Output: none
// Purpose: checks validity of project data.
//
{
int i;
int j;
int err;
// --- validate Curves and TimeSeries
for ( i=0; i<Nobjects[CURVE]; i++ )
{
err = table_validate(&Curve[i]);
if ( err ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID);
}
for ( i=0; i<Nobjects[TSERIES]; i++ )
{
err = table_validate(&Tseries[i]);
if ( err ) report_writeTseriesErrorMsg(err, &Tseries[i]);
}
// --- validate hydrology objects
// (NOTE: order is important !!!!)
climate_validate();
lid_validate();
if ( Nobjects[SNOWMELT] == 0 ) IgnoreSnowmelt = TRUE;
if ( Nobjects[AQUIFER] == 0 ) IgnoreGwater = TRUE;
for ( i=0; i<Nobjects[AQUIFER]; i++ ) gwater_validateAquifer(i);
for ( i=0; i<Nobjects[SUBCATCH]; i++ ) subcatch_validate(i);
for ( i=0; i<Nobjects[GAGE]; i++ ) gage_validate(i); //(5.1.013)
for ( i=0; i<Nobjects[SNOWMELT]; i++ ) snow_validateSnowmelt(i);
// --- compute geometry tables for each shape curve
j = 0;
for ( i=0; i<Nobjects[CURVE]; i++ )
{
if ( Curve[i].curveType == SHAPE_CURVE )
{
Curve[i].refersTo = j;
Shape[j].curve = i;
if ( !shape_validate(&Shape[j], &Curve[i]) )
report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID);
j++;
}
}
// --- validate links before nodes, since the latter can
// result in adjustment of node depths
for ( i=0; i<Nobjects[NODE]; i++) Node[i].oldDepth = Node[i].fullDepth;
for ( i=0; i<Nobjects[LINK]; i++) link_validate(i);
for ( i=0; i<Nobjects[NODE]; i++) node_validate(i);
// --- adjust time steps if necessary
if ( DryStep < WetStep )
{
report_writeWarningMsg(WARN06, "");
DryStep = WetStep;
}
if ( RouteStep > (double)WetStep )
{
report_writeWarningMsg(WARN07, "");
RouteStep = WetStep;
}
// --- adjust individual reporting flags to match global reporting flag
if ( RptFlags.subcatchments == ALL )
for (i=0; i<Nobjects[SUBCATCH]; i++) Subcatch[i].rptFlag = TRUE;
if ( RptFlags.nodes == ALL )
for (i=0; i<Nobjects[NODE]; i++) Node[i].rptFlag = TRUE;
if ( RptFlags.links == ALL )
for (i=0; i<Nobjects[LINK]; i++) Link[i].rptFlag = TRUE;
// --- validate dynamic wave options
if ( RouteModel == DW ) dynwave_validate();
// --- adjust number of parallel threads to be used //(5.1.013)
#pragma omp parallel //(5.1.008)
{
if ( NumThreads == 0 ) NumThreads = omp_get_num_threads(); //(5.1.008)
else NumThreads = MIN(NumThreads, omp_get_num_threads()); //(5.1.008)
}
if ( Nobjects[LINK] < 4 * NumThreads ) NumThreads = 1; //(5.1.008)
}
//=============================================================================
void project_close()
//
// Input: none
// Output: none
// Purpose: closes a SWMM project.
//
{
deleteObjects();
deleteHashTables();
}
//=============================================================================
int project_init(void)
//
// Input: none
// Output: returns an error code
// Purpose: initializes the internal state of all objects.
//
{
int j;
climate_initState();
lid_initState();
for (j=0; j<Nobjects[TSERIES]; j++) table_tseriesInit(&Tseries[j]);
for (j=0; j<Nobjects[GAGE]; j++) gage_initState(j);
for (j=0; j<Nobjects[SUBCATCH]; j++) subcatch_initState(j);
for (j=0; j<Nobjects[NODE]; j++) node_initState(j);
for (j=0; j<Nobjects[LINK]; j++) link_initState(j);
return ErrorCode;
}
//=============================================================================
int project_addObject(int type, char *id, int n)
//
// Input: type = object type
// id = object ID string
// n = object index
// Output: returns 0 if object already added, 1 if not, -1 if hashing fails
// Purpose: adds an object ID to a hash table
//
{
int result;
int len;
char *newID;
// --- do nothing if object already placed in hash table
if ( project_findObject(type, id) >= 0 ) return 0;
// --- use memory from the hash tables' common memory pool to store
// a copy of the object's ID string
len = strlen(id) + 1;
newID = (char *) Alloc(len*sizeof(char));
strcpy(newID, id);
// --- insert object's ID into the hash table for that type of object
result = HTinsert(Htable[type], newID, n);
if ( result == 0 ) result = -1;
return result;
}
//=============================================================================
int project_findObject(int type, char *id)
//
// Input: type = object type
// id = object ID
// Output: returns index of object with given ID, or -1 if ID not found
// Purpose: uses hash table to find index of an object with a given ID.
//
{
return HTfind(Htable[type], id);
}
//=============================================================================
char *project_findID(int type, char *id)
//
// Input: type = object type
// id = ID name being sought
// Output: returns pointer to location where object's ID string is stored
// Purpose: uses hash table to find address of given string entry.
//
{
return HTfindKey(Htable[type], id);
}
//=============================================================================
double ** project_createMatrix(int nrows, int ncols)
//
// Input: nrows = number of rows (0-based)
// ncols = number of columns (0-based)
// Output: returns a pointer to a matrix
// Purpose: allocates memory for a matrix of doubles.
//
{
int i,j;
double **a;
// --- allocate pointers to rows
a = (double **) malloc(nrows * sizeof(double *));
if ( !a ) return NULL;
// --- allocate rows and set pointers to them
a[0] = (double *) malloc (nrows * ncols * sizeof(double));
if ( !a[0] ) return NULL;
for ( i = 1; i < nrows; i++ ) a[i] = a[i-1] + ncols;
for ( i = 0; i < nrows; i++)
{
for ( j = 0; j < ncols; j++) a[i][j] = 0.0;
}
// --- return pointer to array of pointers to rows
return a;
}
//=============================================================================
void project_freeMatrix(double **a)
//
// Input: a = matrix of floats
// Output: none
// Purpose: frees memory allocated for a matrix of doubles.
//
{
if ( a != NULL )
{
if ( a[0] != NULL ) free( a[0] );
free( a );
}
}
//=============================================================================
int project_readOption(char* s1, char* s2)
//
// Input: s1 = option keyword
// s2 = string representation of option's value
// Output: returns error code
// Purpose: reads a project option from a pair of string tokens.
//
// NOTE: all project options have default values assigned in setDefaults().
//
{
int k, m, h, s;
double tStep;
char strDate[25];
DateTime aTime;
DateTime aDate;
// --- determine which option is being read
k = findmatch(s1, OptionWords);
if ( k < 0 ) return error_setInpError(ERR_KEYWORD, s1);
switch ( k )
{
// --- choice of flow units
case FLOW_UNITS:
m = findmatch(s2, FlowUnitWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
FlowUnits = m;
if ( FlowUnits <= MGD ) UnitSystem = US;
else UnitSystem = SI;
break;
// --- choice of infiltration modeling method
case INFIL_MODEL:
m = findmatch(s2, InfilModelWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
InfilModel = m;
break;
// --- choice of flow routing method
case ROUTE_MODEL:
m = findmatch(s2, RouteModelWords);
if ( m < 0 ) m = findmatch(s2, OldRouteModelWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
if ( m == NO_ROUTING ) IgnoreRouting = TRUE;
else RouteModel = m;
if ( RouteModel == EKW ) RouteModel = KW;
break;
// --- simulation start date
case START_DATE:
if ( !datetime_strToDate(s2, &StartDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation start time of day
case START_TIME:
if ( !datetime_strToTime(s2, &StartTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation ending date
case END_DATE:
if ( !datetime_strToDate(s2, &EndDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation ending time of day
case END_TIME:
if ( !datetime_strToTime(s2, &EndTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- reporting start date
case REPORT_START_DATE:
if ( !datetime_strToDate(s2, &ReportStartDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- reporting start time of day
case REPORT_START_TIME:
if ( !datetime_strToTime(s2, &ReportStartTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- day of year when street sweeping begins or when it ends
// (year is arbitrarily set to 1947 so that the dayOfYear
// function can be applied)
case SWEEP_START:
case SWEEP_END:
strcpy(strDate, s2);
strcat(strDate, "/1947");
if ( !datetime_strToDate(strDate, &aDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
m = datetime_dayOfYear(aDate);
if ( k == SWEEP_START ) SweepStart = m;
else SweepEnd = m;
break;
// --- number of antecedent dry days
case START_DRY_DAYS:
StartDryDays = atof(s2);
if ( StartDryDays < 0.0 )
{
return error_setInpError(ERR_NUMBER, s2);
}
break;
// --- runoff or reporting time steps
// (input is in hrs:min:sec format, time step saved as seconds)
case WET_STEP:
case DRY_STEP:
case REPORT_STEP:
case RULE_STEP: //(5.1.013)
if ( !datetime_strToTime(s2, &aTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
datetime_decodeTime(aTime, &h, &m, &s);
h += 24*(int)aTime;
s = s + 60*m + 3600*h;
// --- RuleStep allowed to be 0 while other time steps must be > 0 //(5.1.013)
if (k == RULE_STEP) //
{ //
if (s < 0) return error_setInpError(ERR_NUMBER, s2); //
} //
else if ( s <= 0 ) return error_setInpError(ERR_NUMBER, s2); //
switch ( k )
{
case WET_STEP: WetStep = s; break;
case DRY_STEP: DryStep = s; break;
case REPORT_STEP: ReportStep = s; break;
case RULE_STEP: RuleStep = s; break; //(5.1.013)
}
break;
// --- type of damping applied to inertial terms of dynamic wave routing
case INERT_DAMPING:
m = findmatch(s2, InertDampingWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
else InertDamping = m;
break;
// --- Yes/No options (NO = 0, YES = 1)
case ALLOW_PONDING:
case SLOPE_WEIGHTING:
case SKIP_STEADY_STATE:
case IGNORE_RAINFALL:
case IGNORE_SNOWMELT:
case IGNORE_GWATER:
case IGNORE_ROUTING:
case IGNORE_QUALITY:
case IGNORE_RDII:
m = findmatch(s2, NoYesWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
switch ( k )
{
case ALLOW_PONDING: AllowPonding = m; break;
case SLOPE_WEIGHTING: SlopeWeighting = m; break;
case SKIP_STEADY_STATE: SkipSteadyState = m; break;
case IGNORE_RAINFALL: IgnoreRainfall = m; break;
case IGNORE_SNOWMELT: IgnoreSnowmelt = m; break;
case IGNORE_GWATER: IgnoreGwater = m; break;
case IGNORE_ROUTING: IgnoreRouting = m; break;
case IGNORE_QUALITY: IgnoreQuality = m; break;
case IGNORE_RDII: IgnoreRDII = m; break;
}
break;
case NORMAL_FLOW_LTD:
m = findmatch(s2, NormalFlowWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
NormalFlowLtd = m;
break;
case FORCE_MAIN_EQN:
m = findmatch(s2, ForceMainEqnWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
ForceMainEqn = m;
break;
case LINK_OFFSETS:
m = findmatch(s2, LinkOffsetWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
LinkOffsets = m;
break;
// --- compatibility option for selecting solution method for
// dynamic wave flow routing (NOT CURRENTLY USED)
case COMPATIBILITY:
if ( strcomp(s2, "3") ) Compatibility = SWMM3;
else if ( strcomp(s2, "4") ) Compatibility = SWMM4;
else if ( strcomp(s2, "5") ) Compatibility = SWMM5;
else return error_setInpError(ERR_KEYWORD, s2);
break;
// --- routing or lengthening time step (in decimal seconds)
// (lengthening time step is used in Courant stability formula
// to artificially lengthen conduits for dynamic wave flow routing
// (a value of 0 means that no lengthening is used))
case ROUTE_STEP:
case LENGTHENING_STEP:
if ( !getDouble(s2, &tStep) )
{
if ( !datetime_strToTime(s2, &aTime) )
{
return error_setInpError(ERR_NUMBER, s2);
}
else
{
datetime_decodeTime(aTime, &h, &m, &s);
h += 24*(int)aTime;
s = s + 60*m + 3600*h;
tStep = s;
}
}
if ( k == ROUTE_STEP )
{
if ( tStep <= 0.0 ) return error_setInpError(ERR_NUMBER, s2);
RouteStep = tStep;
}
else LengtheningStep = MAX(0.0, tStep);
break;
// --- minimum variable time step for dynamic wave routing
case MIN_ROUTE_STEP:
if ( !getDouble(s2, &MinRouteStep) || MinRouteStep < 0.0 )
return error_setInpError(ERR_NUMBER, s2);
break;
case NUM_THREADS:
m = atoi(s2);
if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2);
NumThreads = m;
break;
// --- safety factor applied to variable time step estimates under
// dynamic wave flow routing (value of 0 indicates that variable
// time step option not used)
case VARIABLE_STEP:
if ( !getDouble(s2, &CourantFactor) )
return error_setInpError(ERR_NUMBER, s2);
if ( CourantFactor < 0.0 || CourantFactor > 2.0 )
return error_setInpError(ERR_NUMBER, s2);
break;
// --- minimum surface area (ft2 or sq. meters) associated with nodes
// under dynamic wave flow routing
case MIN_SURFAREA:
if (!getDouble(s2, &MinSurfArea)) //(5.1.013)
return error_setInpError(ERR_NUMBER, s2); //(5.1.013)
if (MinSurfArea < 0.0) //(5.1.013)
return error_setInpError(ERR_NUMBER, s2); //(5.1.013)
break;
// --- minimum conduit slope (%)
case MIN_SLOPE:
if ( !getDouble(s2, &MinSlope) )
return error_setInpError(ERR_NUMBER, s2);
if ( MinSlope < 0.0 || MinSlope >= 100 )
return error_setInpError(ERR_NUMBER, s2);
MinSlope /= 100.0;
break;
// --- maximum trials / time step for dynamic wave routing
case MAX_TRIALS:
m = atoi(s2);
if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2);
MaxTrials = m;
break;
// --- head convergence tolerance for dynamic wave routing
case HEAD_TOL:
if ( !getDouble(s2, &HeadTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
break;
// --- steady state tolerance on system inflow - outflow
case SYS_FLOW_TOL:
if ( !getDouble(s2, &SysFlowTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
SysFlowTol /= 100.0;
break;
// --- steady state tolerance on nodal lateral inflow
case LAT_FLOW_TOL:
if ( !getDouble(s2, &LatFlowTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
LatFlowTol /= 100.0;
break;
// --- method used for surcharging in dynamic wave flow routing //(5.1.013)
case SURCHARGE_METHOD:
m = findmatch(s2, SurchargeWords);
if (m < 0) return error_setInpError(ERR_KEYWORD, s2);
SurchargeMethod = m;
break;
case TEMPDIR: // Temporary Directory
sstrncpy(TempDir, s2, MAXFNAME);
break;
}
return 0;
}
//=============================================================================
void initPointers()
//
// Input: none
// Output: none
// Purpose: assigns NULL to all dynamic arrays for a new project.
//
{
Gage = NULL;
Subcatch = NULL;
Node = NULL;
Outfall = NULL;
Divider = NULL;
Storage = NULL;
Link = NULL;
Conduit = NULL;
Pump = NULL;
Orifice = NULL;
Weir = NULL;
Outlet = NULL;
Pollut = NULL;
Landuse = NULL;
Pattern = NULL;
Curve = NULL;
Tseries = NULL;
Transect = NULL;
Shape = NULL;
Aquifer = NULL;
UnitHyd = NULL;
Snowmelt = NULL;
Event = NULL;
MemPoolAllocated = FALSE;
}
//=============================================================================
void setDefaults()
//
// Input: none
// Output: none
// Purpose: assigns default values to project variables.
//
{
int i, j;
// Project title & temp. file path
for (i = 0; i < MAXTITLE; i++) strcpy(Title[i], "");
strcpy(TempDir, "");
// Interface files
Frain.mode = SCRATCH_FILE; // Use scratch rainfall file
Fclimate.mode = NO_FILE;
Frunoff.mode = NO_FILE;
Frdii.mode = NO_FILE;
Fhotstart1.mode = NO_FILE;
Fhotstart2.mode = NO_FILE;
Finflows.mode = NO_FILE;
Foutflows.mode = NO_FILE;
Frain.file = NULL;
Fclimate.file = NULL;
Frunoff.file = NULL;
Frdii.file = NULL;
Fhotstart1.file = NULL;
Fhotstart2.file = NULL;
Finflows.file = NULL;
Foutflows.file = NULL;
Fout.file = NULL;
Fout.mode = NO_FILE;
// Analysis options
UnitSystem = US; // US unit system
FlowUnits = CFS; // CFS flow units
InfilModel = HORTON; // Horton infiltration method
RouteModel = KW; // Kin. wave flow routing method
SurchargeMethod = EXTRAN; // Use EXTRAN method for surcharging //(5.1.013)
CrownCutoff = 0.96; //(5.1.013)
AllowPonding = FALSE; // No ponding at nodes
InertDamping = SOME; // Partial inertial damping
NormalFlowLtd = BOTH; // Default normal flow limitation
ForceMainEqn = H_W; // Hazen-Williams eqn. for force mains
LinkOffsets = DEPTH_OFFSET; // Use depth for link offsets
LengtheningStep = 0; // No lengthening of conduits
CourantFactor = 0.0; // No variable time step
MinSurfArea = 0.0; // Force use of default min. surface area
MinSlope = 0.0; // No user supplied minimum conduit slope
SkipSteadyState = FALSE; // Do flow routing in steady state periods
IgnoreRainfall = FALSE; // Analyze rainfall/runoff
IgnoreRDII = FALSE; // Analyze RDII
IgnoreSnowmelt = FALSE; // Analyze snowmelt
IgnoreGwater = FALSE; // Analyze groundwater
IgnoreRouting = FALSE; // Analyze flow routing
IgnoreQuality = FALSE; // Analyze water quality
WetStep = 300; // Runoff wet time step (secs)
DryStep = 3600; // Runoff dry time step (secs)
RuleStep = 0; // Rules evaluated at each routing step
RouteStep = 300.0; // Routing time step (secs)
MinRouteStep = 0.5; // Minimum variable time step (sec)
ReportStep = 900; // Reporting time step (secs)
StartDryDays = 0.0; // Antecedent dry days
MaxTrials = 0; // Force use of default max. trials
HeadTol = 0.0; // Force use of default head tolerance
SysFlowTol = 0.05; // System flow tolerance for steady state
LatFlowTol = 0.05; // Lateral flow tolerance for steady state
NumThreads = 0; // Number of parallel threads to use
NumEvents = 0; // Number of detailed routing events
// Deprecated options
SlopeWeighting = TRUE; // Use slope weighting
Compatibility = SWMM4; // Use SWMM 4 up/dn weighting method
// Starting & ending date/time
StartDate = datetime_encodeDate(2004, 1, 1);
StartTime = datetime_encodeTime(0,0,0);
StartDateTime = StartDate + StartTime;
EndDate = StartDate;
EndTime = 0.0;
ReportStartDate = NO_DATE;
ReportStartTime = NO_DATE;
SweepStart = 1;
SweepEnd = 365;
// Reporting options
RptFlags.input = FALSE;
RptFlags.continuity = TRUE;
RptFlags.flowStats = TRUE;
RptFlags.controls = FALSE;
RptFlags.subcatchments = FALSE;
RptFlags.nodes = FALSE;
RptFlags.links = FALSE;
RptFlags.nodeStats = FALSE;
RptFlags.averages = FALSE;
// Temperature data
Temp.dataSource = NO_TEMP;
Temp.tSeries = -1;
Temp.ta = 70.0;
Temp.elev = 0.0;
Temp.anglat = 40.0;
Temp.dtlong = 0.0;
Temp.tmax = MISSING;
// Wind speed data
Wind.type = MONTHLY_WIND;
for ( i=0; i<12; i++ ) Wind.aws[i] = 0.0;
// Snowmelt parameters
Snow.snotmp = 34.0;
Snow.tipm = 0.5;
Snow.rnm = 0.6;
// Snow areal depletion curves for pervious and impervious surfaces
for ( i=0; i<2; i++ )
{
for ( j=0; j<10; j++) Snow.adc[i][j] = 1.0;
}
// Evaporation rates
Evap.type = CONSTANT_EVAP;
for (i=0; i<12; i++)
{
Evap.monthlyEvap[i] = 0.0;
Evap.panCoeff[i] = 1.0;
}
Evap.recoveryPattern = -1;
Evap.recoveryFactor = 1.0;
Evap.tSeries = -1;
Evap.dryOnly = FALSE;
// Climate adjustments
for (i = 0; i < 12; i++)
{
Adjust.temp[i] = 0.0; // additive adjustments
Adjust.evap[i] = 0.0; // additive adjustments
Adjust.rain[i] = 1.0; // multiplicative adjustments
Adjust.hydcon[i] = 1.0; // hyd. conductivity adjustments
}
Adjust.rainFactor = 1.0;
Adjust.hydconFactor = 1.0;
}
//=============================================================================
void openFiles(char *f1, char *f2, char *f3)
//
// Input: f1 = name of input file
// f2 = name of report file
// f3 = name of binary output file
// Output: none
// Purpose: opens a project's input and report files.
//
{
// --- initialize file pointers to NULL
Finp.file = NULL;
Frpt.file = NULL;
Fout.file = NULL;
// --- save file names
sstrncpy(Finp.name, f1, MAXFNAME);
sstrncpy(Frpt.name, f2, MAXFNAME);
sstrncpy(Fout.name, f3, MAXFNAME);
// --- check that file names are not identical
if (strcomp(f1, f2) || strcomp(f1, f3) || strcomp(f2, f3))
{
writecon(FMT11);
ErrorCode = ERR_FILE_NAME;
return;
}
// --- open input and report files
if ((Finp.file = fopen(f1,"rt")) == NULL)
{
writecon(FMT12);
writecon(f1);
ErrorCode = ERR_INP_FILE;
return;
}
if ((Frpt.file = fopen(f2,"wt")) == NULL)
{
writecon(FMT13);
ErrorCode = ERR_RPT_FILE;
return;
}
}
//=============================================================================
void createObjects()
//
// Input: none
// Output: none
// Purpose: allocates memory for project's objects.
//
// NOTE: number of each type of object has already been determined in
// project_readInput().
//
{
int j, k;
// --- allocate memory for each category of object
if ( ErrorCode ) return;
Gage = (TGage *) calloc(Nobjects[GAGE], sizeof(TGage));
Subcatch = (TSubcatch *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatch));
Node = (TNode *) calloc(Nobjects[NODE], sizeof(TNode));
Outfall = (TOutfall *) calloc(Nnodes[OUTFALL], sizeof(TOutfall));
Divider = (TDivider *) calloc(Nnodes[DIVIDER], sizeof(TDivider));
Storage = (TStorage *) calloc(Nnodes[STORAGE], sizeof(TStorage));
Link = (TLink *) calloc(Nobjects[LINK], sizeof(TLink));
Conduit = (TConduit *) calloc(Nlinks[CONDUIT], sizeof(TConduit));
Pump = (TPump *) calloc(Nlinks[PUMP], sizeof(TPump));
Orifice = (TOrifice *) calloc(Nlinks[ORIFICE], sizeof(TOrifice));
Weir = (TWeir *) calloc(Nlinks[WEIR], sizeof(TWeir));
Outlet = (TOutlet *) calloc(Nlinks[OUTLET], sizeof(TOutlet));
Pollut = (TPollut *) calloc(Nobjects[POLLUT], sizeof(TPollut));
Landuse = (TLanduse *) calloc(Nobjects[LANDUSE], sizeof(TLanduse));
Pattern = (TPattern *) calloc(Nobjects[TIMEPATTERN], sizeof(TPattern));
Curve = (TTable *) calloc(Nobjects[CURVE], sizeof(TTable));
Tseries = (TTable *) calloc(Nobjects[TSERIES], sizeof(TTable));
Aquifer = (TAquifer *) calloc(Nobjects[AQUIFER], sizeof(TAquifer));
UnitHyd = (TUnitHyd *) calloc(Nobjects[UNITHYD], sizeof(TUnitHyd));
Snowmelt = (TSnowmelt *) calloc(Nobjects[SNOWMELT], sizeof(TSnowmelt));
Shape = (TShape *) calloc(Nobjects[SHAPE], sizeof(TShape));
// --- create array of detailed routing event periods
Event = (TEvent *) calloc(NumEvents+1, sizeof(TEvent));
Event[NumEvents].start = BIG;
Event[NumEvents].end = BIG + 1.0;
// --- create LID objects
lid_create(Nobjects[LID], Nobjects[SUBCATCH]);
// --- create control rules
ErrorCode = controls_create(Nobjects[CONTROL]);
if ( ErrorCode ) return;
// --- create cross section transects
ErrorCode = transect_create(Nobjects[TRANSECT]);
if ( ErrorCode ) return;
// --- allocate memory for infiltration data
infil_create(Nobjects[SUBCATCH], InfilModel);
// --- allocate memory for water quality state variables
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].initBuildup =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].pondedQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].concPonded = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].surfaceBuildup = (double *) calloc(Nobjects[POLLUT], sizeof(double));
}
for (j = 0; j < Nobjects[NODE]; j++)
{
Node[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Node[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Node[j].extInflow = NULL;
Node[j].dwfInflow = NULL;
Node[j].rdiiInflow = NULL;
Node[j].treatment = NULL;
}
for (j = 0; j < Nobjects[LINK]; j++)
{
Link[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Link[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Link[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double));
}
// --- allocate memory for land use buildup/washoff functions
for (j = 0; j < Nobjects[LANDUSE]; j++)
{
Landuse[j].buildupFunc =
(TBuildup *) calloc(Nobjects[POLLUT], sizeof(TBuildup));
Landuse[j].washoffFunc =
(TWashoff *) calloc(Nobjects[POLLUT], sizeof(TWashoff));
}
// --- allocate memory for subcatchment landuse factors
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].landFactor =
(TLandFactor *) calloc(Nobjects[LANDUSE], sizeof(TLandFactor));
for (k = 0; k < Nobjects[LANDUSE]; k++)
{
Subcatch[j].landFactor[k].buildup =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
}
}
// --- initialize buildup & washoff functions
for (j = 0; j < Nobjects[LANDUSE]; j++)
{
for (k = 0; k < Nobjects[POLLUT]; k++)
{
Landuse[j].buildupFunc[k].funcType = NO_BUILDUP;
Landuse[j].buildupFunc[k].normalizer = PER_AREA;
Landuse[j].washoffFunc[k].funcType = NO_WASHOFF;
}
}
// --- initialize rain gage properties
for (j = 0; j < Nobjects[GAGE]; j++)
{
Gage[j].tSeries = -1;
strcpy(Gage[j].fname, "");
}
// --- initialize subcatchment properties
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].outSubcatch = -1;
Subcatch[j].outNode = -1;
Subcatch[j].infil = -1;
Subcatch[j].groundwater = NULL;
Subcatch[j].gwLatFlowExpr = NULL;
Subcatch[j].gwDeepFlowExpr = NULL;
Subcatch[j].snowpack = NULL;
Subcatch[j].lidArea = 0.0;
for (k = 0; k < Nobjects[POLLUT]; k++)
{
Subcatch[j].initBuildup[k] = 0.0;
}
}
// --- initialize RDII unit hydrograph properties
for ( j = 0; j < Nobjects[UNITHYD]; j++ ) rdii_initUnitHyd(j);
// --- initialize snowmelt properties
for ( j = 0; j < Nobjects[SNOWMELT]; j++ ) snow_initSnowmelt(j);
// --- initialize storage node exfiltration
for (j = 0; j < Nnodes[STORAGE]; j++) Storage[j].exfil = NULL;
// --- initialize link properties
for (j = 0; j < Nobjects[LINK]; j++)
{
Link[j].xsect.type = -1;
Link[j].cLossInlet = 0.0;
Link[j].cLossOutlet = 0.0;
Link[j].cLossAvg = 0.0;
Link[j].hasFlapGate = FALSE;
}
for (j = 0; j < Nlinks[PUMP]; j++) Pump[j].pumpCurve = -1;
// --- initialize reporting flags
for (j = 0; j < Nobjects[SUBCATCH]; j++) Subcatch[j].rptFlag = FALSE;
for (j = 0; j < Nobjects[NODE]; j++) Node[j].rptFlag = FALSE;
for (j = 0; j < Nobjects[LINK]; j++) Link[j].rptFlag = FALSE;
// --- initialize curves, time series, and time patterns
for (j = 0; j < Nobjects[CURVE]; j++) table_init(&Curve[j]);
for (j = 0; j < Nobjects[TSERIES]; j++) table_init(&Tseries[j]);
for (j = 0; j < Nobjects[TIMEPATTERN]; j++) inflow_initDwfPattern(j);
}
//=============================================================================
void deleteObjects()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for a project's objects.
//
// NOTE: care is taken to first free objects that are properties of another
// object before the latter is freed (e.g., we must free a
// subcatchment's land use factors before freeing the subcatchment).
//
{
int j, k;
// --- free memory for landuse factors & groundwater
if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
for (k = 0; k < Nobjects[LANDUSE]; k++)
{
FREE(Subcatch[j].landFactor[k].buildup);
}
FREE(Subcatch[j].landFactor);
FREE(Subcatch[j].groundwater);
gwater_deleteFlowExpression(j);
FREE(Subcatch[j].snowpack);
}
// --- free memory for buildup/washoff functions
if ( Landuse ) for (j = 0; j < Nobjects[LANDUSE]; j++)
{
FREE(Landuse[j].buildupFunc);
FREE(Landuse[j].washoffFunc)
}
// --- free memory for water quality state variables
if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
FREE(Subcatch[j].initBuildup);
FREE(Subcatch[j].oldQual);
FREE(Subcatch[j].newQual);
FREE(Subcatch[j].pondedQual);
FREE(Subcatch[j].totalLoad);
}
if ( Node ) for (j = 0; j < Nobjects[NODE]; j++)
{
FREE(Node[j].oldQual);
FREE(Node[j].newQual);
}
if ( Link ) for (j = 0; j < Nobjects[LINK]; j++)
{
FREE(Link[j].oldQual);
FREE(Link[j].newQual);
FREE(Link[j].totalLoad);
}
// --- free memory used for rainfall infiltration
infil_delete();
// --- free memory used for storage exfiltration
if ( Node ) for (j = 0; j < Nnodes[STORAGE]; j++)
{
if ( Storage[j].exfil )
{
FREE(Storage[j].exfil->btmExfil);
FREE(Storage[j].exfil->bankExfil);
FREE(Storage[j].exfil);
}
}
// --- free memory used for outfall pollutants loads
if ( Node ) for (j = 0; j < Nnodes[OUTFALL]; j++)
FREE(Outfall[j].wRouted);
// --- free memory used for nodal inflows & treatment functions
if ( Node ) for (j = 0; j < Nobjects[NODE]; j++)
{
inflow_deleteExtInflows(j);
inflow_deleteDwfInflows(j);
rdii_deleteRdiiInflow(j);
treatmnt_delete(j);
}
// --- delete table entries for curves and time series
if ( Tseries ) for (j = 0; j < Nobjects[TSERIES]; j++)
table_deleteEntries(&Tseries[j]);
if ( Curve ) for (j = 0; j < Nobjects[CURVE]; j++)
table_deleteEntries(&Curve[j]);
// --- delete cross section transects
transect_delete();
// --- delete control rules
controls_delete();
// --- delete LIDs
lid_delete();
// --- now free each major category of object
FREE(Gage);
FREE(Subcatch);
FREE(Node);
FREE(Outfall);
FREE(Divider);
FREE(Storage);
FREE(Link);
FREE(Conduit);
FREE(Pump);
FREE(Orifice);
FREE(Weir);
FREE(Outlet);
FREE(Pollut);
FREE(Landuse);
FREE(Pattern);
FREE(Curve);
FREE(Tseries);
FREE(Aquifer);
FREE(UnitHyd);
FREE(Snowmelt);
FREE(Shape);
FREE(Event);
}
//=============================================================================
void createHashTables()
//
// Input: none
// Output: returns error code
// Purpose: allocates memory for object ID hash tables
//
{ int j;
MemPoolAllocated = FALSE;
for (j = 0; j < MAX_OBJ_TYPES ; j++)
{
Htable[j] = HTcreate();
if ( Htable[j] == NULL ) report_writeErrorMsg(ERR_MEMORY, "");
}
// --- initialize memory pool used to store object ID's
if ( AllocInit() == NULL ) report_writeErrorMsg(ERR_MEMORY, "");
else MemPoolAllocated = TRUE;
}
//=============================================================================
void deleteHashTables()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for object ID hash tables
//
{
int j;
for (j = 0; j < MAX_OBJ_TYPES; j++)
{
if ( Htable[j] != NULL ) HTfree(Htable[j]);
}
// --- free object ID memory pool
if ( MemPoolAllocated ) AllocFreePool();
}
//=============================================================================
|
GB_unop__minv_fp32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_fp32_fp32)
// op(A') function: GB (_unop_tran__minv_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = (1.0F)/z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = (1.0F)/z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = (1.0F)/z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
serial_tree_learner.h
|
/*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <string>
#include <cmath>
#include <cstdio>
#include <memory>
#include <random>
#include <vector>
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/random.h>
#include "col_sampler.hpp"
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "monotone_constraints.hpp"
#include "split_info.hpp"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
namespace LightGBM {
using json11::Json;
/*! \brief forward declaration */
class CostEfficientGradientBoosting;
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
friend CostEfficientGradientBoosting;
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data,
bool is_constant_hessian) override {
ResetTrainingDataInner(train_data, is_constant_hessian, true);
}
void ResetIsConstantHessian(bool is_constant_hessian) override {
share_state_->is_constant_hessian = is_constant_hessian;
}
virtual void ResetTrainingDataInner(const Dataset* train_data,
bool is_constant_hessian,
bool reset_multi_val_bin);
void ResetConfig(const Config* config) override;
inline void SetForcedSplit(const Json* forced_split_json) override {
if (forced_split_json != nullptr && !forced_split_json->is_null()) {
forced_split_json_ = forced_split_json;
} else {
forced_split_json_ = nullptr;
}
}
Tree* Train(const score_t* gradients, const score_t *hessians) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override {
if (subset == nullptr) {
data_partition_->SetUsedDataIndices(used_indices, num_data);
share_state_->is_use_subrow = false;
} else {
ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false);
share_state_->is_use_subrow = true;
share_state_->is_subrow_copied = false;
share_state_->bagging_use_indices = used_indices;
share_state_->bagging_indices_cnt = num_data;
}
}
void AddPredictionToScore(const Tree* tree,
double* out_score) const override {
if (tree->num_leaves() <= 1) {
return;
}
CHECK_LE(tree->num_leaves(), data_partition_->num_leaves());
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
protected:
void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_,
int feature_index, int real_fidx,
bool is_feature_used, int num_data,
const LeafSplits* leaf_splits,
SplitInfo* best_split);
void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time);
void RecomputeBestSplitForLeaf(int leaf, SplitInfo* split);
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits();
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf,
int* right_leaf) {
SplitInner(tree, best_leaf, left_leaf, right_leaf, true);
}
void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf,
bool update_cnt);
/* Force splits with forced_split_json dict and then return num splits forced.*/
int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf,
int* cur_depth);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores minimum and maximum constraints for each leaf */
std::unique_ptr<LeafConstraintsBase> constraints_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_;
#endif
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
ColSampler col_sampler_;
const Json* forced_split_json_;
std::unique_ptr<TrainingShareStates> share_state_;
std::unique_ptr<CostEfficientGradientBoosting> cegb_;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
openmp-ex32.c
|
/* Even named critical regions can't prevent deadlocks() */
#include <stdio.h>
#include <omp.h>
int main(void)
{
#pragma omp parallel
{
int id = omp_get_thread_num();
#pragma omp critical(A)
{
printf("I am thread %d and I am in A, waiting for B...",id);
fflush(stdout);
#pragma omp critical(B)
{
printf("got it!\n");
fflush(stdout);
}
}
#pragma omp critical(B)
{
printf("I am thread %d and I am in B, waiting for A...\n",id);
fflush(stdout);
#pragma omp critical(A)
{
printf("got it!\n");
fflush(stdout);
}
}
}
return 0;
}
|
common_main.h
|
#ifndef COMMON_MAIN_H
#define COMMON_MAIN_H
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#include <chrono>
#if defined(__GNUC__) || defined(__SUNPRO_CC)
#include <pthread.h>
#endif
#include "gm.h"
class main_t
{
protected:
gm_graph G;
int num_threads;
bool is_all_thread_mode() {return num_threads == -1;}
public:
main_t() {time_to_exclude = 0; num_threads = 0;}
void pin_CPU()
{
#if (defined(__GNUC__) || defined(__SUNPRO_CC)) && !defined(_WIN32)
#pragma omp parallel
{
pthread_t thread;
thread = pthread_self();
cpu_set_t CPU;
CPU_ZERO(&CPU);
CPU_SET(omp_get_thread_num(), &CPU);
pthread_setaffinity_np(thread, sizeof(CPU), &CPU);
}
#endif
}
virtual void main(int argc, char** argv)
{
bool b;
if (argc < 3) {
printf("%s <graph_name> <num_threads> ", argv[0]);
print_arg_info();
printf("\n");
exit(EXIT_FAILURE);
}
int new_argc = argc - 3;
char** new_argv = &(argv[3]);
b = check_args(new_argc, new_argv);
if (!b) {
printf("error procesing argument\n");
printf("%s <graph_name> <num_threads> ", argv[0]);
print_arg_info();
printf("\n");
exit(EXIT_FAILURE);
}
int num = atoi(argv[2]);
num_threads = num;
if (num == -1)
{
printf("exploration mode\n");
}
else
{
printf("running with %d threads\n", num);
}
//--------------------------------------------
// Load graph and creating reverse edges
//--------------------------------------------
char *fname = argv[1];
auto T1 = std::chrono::high_resolution_clock::now();
b = G.load_binary(fname);
if (!b) {
printf("error reading graph\n");
exit(EXIT_FAILURE);
}
auto T2 = std::chrono::high_resolution_clock::now();
printf("graph loading time(ms)=%lf\n",
std::chrono::duration_cast<std::chrono::nanoseconds>(T2 - T1).count() / 1000000.0
);
T1 = std::chrono::high_resolution_clock::now();
G.make_reverse_edges();
T2 = std::chrono::high_resolution_clock::now();
printf("reverse edge creation time(ms)=%lf\n",
std::chrono::duration_cast<std::chrono::nanoseconds>(T2 - T1).count() / 1000000.0
);
//------------------------------------------------
// Any extra preperation Step (provided by the user)
//------------------------------------------------
if (num == -1)
{
int max=32;
for(int i =1; i <=max; i=i*2)
{
gm_rt_set_num_threads(i); // gm_runtime.h
do_main_steps();
}
}
else {
gm_rt_set_num_threads(num); // gm_runtime.h
do_main_steps();
}
}
void do_main_steps()
{
printf("\n");
pin_CPU();
bool b = prepare();
if (!b) {
printf("Error prepare data\n");
exit(EXIT_FAILURE);
}
auto T1 = std::chrono::high_resolution_clock::now();
b = run();
auto T2 = std::chrono::high_resolution_clock::now();
printf("[%d]running_time(ms)=%lf\n",
gm_rt_get_num_threads(),
std::chrono::duration_cast<std::chrono::nanoseconds>(T2 - T1).count() / 1000000.0
- time_to_exclude
);
fflush(stdout);
if (!b) {
printf("Error runing algortihm\n");
exit(EXIT_FAILURE);
}
b = post_process();
if (!b) {
printf("Error post processing\n");
exit(EXIT_FAILURE);
}
//----------------------------------------------
// Clean up routine
//----------------------------------------------
b = cleanup();
if (!b) exit(EXIT_FAILURE);
}
virtual bool check_answer() { return true; }
virtual bool run() = 0;
virtual bool prepare() { return true;}
virtual bool post_process() { return true;}
virtual bool cleanup() {return true;}
// check remaining arguments
virtual bool check_args(int argc, char** argv) {return true;}
virtual void print_arg_info() {}
protected:
gm_graph& get_graph() {return G;}
void add_time_to_exlude(double ms) {time_to_exclude += ms;}
double time_to_exclude;
};
#endif
|
ptsrc.c
|
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#include <float.h>
#include "fastsphere.h"
#include "fsht.h"
#include "translator.h"
#include "util.h"
/* Compute directivity envelope dot(s, ax) * exp(alpha * (1 - dot(s, ax)**2)),
* where s and ax are both unit vectors. If ax is NULL or has zero magnitude,
* 1.0 is always returned to indicate an omnidirectional directivity envelope. */
double directivity (double *ax, double *s, double alpha) {
double ca;
/* No directivity axis means the pattern is omnidirectional. */
if (!ax || DVDOT(ax, ax) < DBL_EPSILON) return 1.0;
ca = DVDOT(ax, s);
return ca * exp(alpha * (1. - ca * ca));
}
/* Compute the incoming far-field expansion of a point source embedded in a
* material with background wave number bgk and located at coordinates loc
* relative to the center of the insonified sphere described by the shdata
* structure sphere. The expansion is added to the representations in spec. If
* ax is not null and does not have zero magnitude, it represents the
* directivity axis of the source that has a beam width parameter alpha. The
* axis must be normalized by the caller. If ax is NULL or has zero magnitude,
* an undirected point source is used regardless of the value of alpha. */
int ptsrcexp (complex double *spec, complex double bgk, shdata *sphere,
complex double mag, double *loc, double *ax, double alpha) {
trdesc trans;
int nterm, i, j;
double s[3], dist, phi, dphi, st;
complex double *tptr, *sptr, scale = mag * bgk / (4 * M_PI);
/* Allocate space to hold the far-field expansion of a point source. */
nterm = sphere->ntheta * sphere->nphi;
/* The translation direction is the negative of the source direction. */
trans.sdir[0] = -loc[0];
trans.sdir[1] = -loc[1];
trans.sdir[2] = -loc[2];
/* Translation distance. */
dist = sqrt(DVDOT(trans.sdir, trans.sdir));
trans.kr = bgk * dist;
/* Normalize translation direction. */
trans.sdir[0] /= dist; trans.sdir[1] /= dist; trans.sdir[2] /= dist;
trans.trunc = sphere->deg;
trans.type = TRPLANE;
/* Build the translator representing an undirected point-source. */
trans.trdata = malloc (nterm * sizeof(complex double));
translator (&trans, sphere->ntheta, sphere->nphi, sphere->theta);
/* There is no directivity axis if it has zero magnitude. */
if (ax && DVDOT(ax, ax) < DBL_EPSILON) ax = NULL;
dphi = 2 * M_PI / MAX(sphere->nphi, 1);
#pragma omp critical(incaug)
for (i = 0, tptr = trans.trdata, sptr = spec; i < sphere->ntheta; ++i) {
s[2] = sphere->theta[i];
st = sin(acos(s[2]));
for (j = 0; j < sphere->nphi; ++j, ++tptr, ++sptr) {
phi = j * dphi;
s[0] = st * cos(phi);
s[1] = st * sin(phi);
*sptr += (*tptr) * scale * directivity (ax, s, alpha);
}
}
free (trans.trdata);
return nterm;
}
|
rawSHA1_fmt_plug.c
|
/*
* This software is Copyright (c) 2004 bartavelle, <simon at banquise.net>, and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Optimised set_key() and reduced binary size by magnum, 2012
*
* OMP added May 2013, JimF
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA1);
#else
#include <string.h>
#include "arch.h"
#include "sha.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#ifdef MMX_COEF
#define OMP_SCALE 1024
#else
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "sse-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-SHA1"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#ifdef MMX_COEF
# define NBKEYS (MMX_COEF * SHA1_SSE_PARA)
# define DO_MMX_SHA1(in,out,n) SSESHA1body(in, out, NULL, n)
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define FORMAT_TAG "$dynamic_26$"
#define TAG_LENGTH 12
#define HASH_LENGTH 40
#define CIPHERTEXT_LENGTH (HASH_LENGTH + TAG_LENGTH)
#define DIGEST_SIZE 20
#define BINARY_SIZE 20 // source()
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
static struct fmt_tests tests[] = {
{"c3e337f070b64a50e9d31ac3f9eda35120e29d6c", "digipalmw221u"},
{"2fbf0eba37de1d1d633bc1ed943b907f9b360d4c", "azertyuiop1"},
{"A9993E364706816ABA3E25717850C26C9CD0D89D", "abc"},
{FORMAT_TAG "A9993E364706816ABA3E25717850C26C9CD0D89D", "abc"},
{"f879f8090e92232ed07092ebed6dc6170457a21d", "azertyuiop2"},
{"1813c12f25e64931f3833b26e999e26e81f9ad24", "azertyuiop3"},
{"095bec1163897ac86e393fa16d6ae2c2fce21602", "7850"},
{"dd3fbb0ba9e133c4fd84ed31ac2e5bc597d61774", "7858"},
{NULL}
};
#ifdef MMX_COEF
#define PLAINTEXT_LENGTH 55
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + (3-((i)&3)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*4*MMX_COEF ) //for endianity conversion
#else
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef MMX_COEF
static ARCH_WORD_32 (*saved_key)[SHA_BUF_SIZ*NBKEYS];
static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4*NBKEYS];
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE / 4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT;
#endif
#ifndef MMX_COEF
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
#else
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt/NBKEYS, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt/NBKEYS, MEM_ALIGN_SIMD);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
if (strlen(ciphertext) != HASH_LENGTH)
return 0;
for (i = 0; i < HASH_LENGTH; i++){
if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
strncpy(out, FORMAT_TAG, sizeof(out));
memcpy(&out[TAG_LENGTH], ciphertext, HASH_LENGTH);
out[CIPHERTEXT_LENGTH] = 0;
strlwr(&out[TAG_LENGTH]);
return out;
}
#ifdef MMX_COEF
#define HASH_OFFSET (index&(MMX_COEF-1))+((index%NBKEYS)/MMX_COEF)*MMX_COEF*5
static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xf; }
static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xff; }
static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xfff; }
static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xffff; }
static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xfffff; }
static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xffffff; }
static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0x7ffffff; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_key[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_key[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_key[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_key[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_key[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_key[index][0] & 0x7ffffff; }
#endif
#ifdef MMX_COEF
static void set_key(char *key, int index)
{
const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key;
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP(temp | (0x80 << 24));
len+=3;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
len += 4;
keybuf_word += MMX_COEF;
}
*keybuf_word = 0x80000000;
key_cleaning:
keybuf_word += MMX_COEF;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF;
}
keybuffer[15*MMX_COEF] = len << 3;
}
#else
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
}
#endif
#ifdef MMX_COEF
static char *get_key(int index)
{
static char out[PLAINTEXT_LENGTH + 1];
unsigned int i;
ARCH_WORD_32 len = ((ARCH_WORD_32*)saved_key)[15*MMX_COEF + (index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF] >> 3;
for(i=0;i<len;i++)
out[i] = ((char*)saved_key)[GETPOS(i, index)];
out[i] = 0;
return (char*)out;
}
#else
static char *get_key(int index) {
return saved_key[index];
}
#endif
static void *binary(char *ciphertext)
{
static unsigned char *realcipher;
int i;
if (!realcipher)
realcipher = mem_alloc_tiny(DIGEST_SIZE, MEM_ALIGN_WORD);
ciphertext += TAG_LENGTH;
for(i=0;i<DIGEST_SIZE;i++)
{
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
}
#ifdef MMX_COEF
alter_endianity(realcipher, DIGEST_SIZE);
#endif
return (void*)realcipher;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#pragma omp parallel for
for (index = 0; index < loops; ++index)
#endif
{
#if MMX_COEF
DO_MMX_SHA1(saved_key[index], crypt_key[index], SSEi_MIXED_IN);
#else
SHA_CTX ctx;
SHA1_Init( &ctx );
SHA1_Update( &ctx, (unsigned char*) saved_key[index], strlen( saved_key[index] ) );
SHA1_Final( (unsigned char*) crypt_key[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
#ifdef MMX_COEF
if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*5*MMX_COEF])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_key[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef MMX_COEF
int i;
for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_32); i++)
if (((ARCH_WORD_32 *) binary)[i] != ((ARCH_WORD_32*)crypt_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*5*MMX_COEF+i*MMX_COEF])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static char *source(char *source, void *binary)
{
static char Buf[CIPHERTEXT_LENGTH + 1];
unsigned char realcipher[BINARY_SIZE];
unsigned char *cpi;
char *cpo;
int i;
memcpy(realcipher, binary, BINARY_SIZE);
#ifdef MMX_COEF
alter_endianity(realcipher, BINARY_SIZE);
#endif
strcpy(Buf, FORMAT_TAG);
cpo = &Buf[TAG_LENGTH];
cpi = realcipher;
for (i = 0; i < BINARY_SIZE; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
struct fmt_main fmt_rawSHA1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
relu6_ref.c
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: [email protected]
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
int ref_relu6_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = src[i];
if (dst[i] > 6)
dst[i] = 6;
if (dst[i] < 0)
dst[i] = 0;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
ref_relu6_fp32(input_tensor, output_tensor, exec_graph->num_thread);
return 0;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* node = exec_node->ir_node;
struct ir_graph* ir_graph = node->graph;
struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_relu6_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_RELU6, &hcl_node_ops);
}
static int unreg_relu6_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_RELU6, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_relu6_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_relu6_hcl_ops);
|
convolution_quantize_arm.h
|
// SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv_quantize_neon(const Mat &bottom_blob, Mat &bottom_blob_s8, const float dataScale)
{
float ufDataFactor = dataScale;
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int size = w * h;
int FPSCR_value = 0;
#if NCNN_INT8_INFO
fprintf(stderr, "scale %f\n", dataScale);
#endif
asm volatile(
"VMRS r10, FPSCR \n"
"MOV %0, r10 \n"
"BIC r10, r10,#0x00c00000 \n"
"VMSR FPSCR,r10 \n"
: "=r"(FPSCR_value)
: "0"(FPSCR_value)
: "cc", "r10"
);
size = w*h;
#pragma omp parallel for
for (int qidx=0; qidx<inch; qidx++)
{
const float* img0 = bottom_blob.channel(qidx);
signed char* img0_s8 = bottom_blob_s8.channel(qidx);
int nn = size >> 3;
int remain = size & 7;
if(nn > 0)
{
asm volatile(
"PLD [%1, #256] \n"
"VLD1.F32 {D0-D3}, [%1]! \n"
"VDUP.32 Q10, %3 \n"
"0: \n"
"VMUL.F32 Q0,Q0,Q10 \n"
"VMUL.F32 Q1,Q1,Q10 \n"
"VCVTR.S32.F32 S0,S0 \n"
"VCVTR.S32.F32 S1,S1 \n"
"VCVTR.S32.F32 S2,S2 \n"
"VCVTR.S32.F32 S3,S3 \n"
"VCVTR.S32.F32 S4,S4 \n"
"VCVTR.S32.F32 S5,S5 \n"
"VCVTR.S32.F32 S6,S6 \n"
"VCVTR.S32.F32 S7,S7 \n"
"VQMOVN.S32 D4,Q0 \n"
"VQMOVN.S32 D5,Q1 \n"
"PLD [%1, #256] \n"
"VLD1.F32 {D0-D3}, [%1]! \n"
"VQMOVN.S16 D4,Q2 \n"
"VST1.8 {D4}, [%2]! \n"
"SUBS %0, #1 \n"
"BNE 0b \n"
"SUB %1, #32 \n"
: "=r"(nn), // %0
"=r"(img0), // %1
"=r"(img0_s8), // %2
"=r"(ufDataFactor) // %3
: "0"(nn),
"1"(img0),
"2"(img0_s8),
"3"(ufDataFactor)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q10", "q11"
);
}
if(remain > 0)
{
asm volatile(
"VLD1.F32 {D0[0]}, [%1]! \n"
"VDUP.32 Q10, %3 \n"
"0: \n"
"VMUL.F32 Q0,Q0,Q10 \n"
"VCVTR.S32.F32 S0,S0 \n"
"VQMOVN.S32 D4,Q0 \n"
"VLD1.F32 {D0[0]}, [%1]! \n"
"VQMOVN.S16 D4,Q2 \n"
"VST1.8 {D4[0]}, [%2]! \n"
"SUBS %0, #1 \n"
"BNE 0b \n"
: "=r"(remain), // %0
"=r"(img0), // %1
"=r"(img0_s8), // %2
"=r"(ufDataFactor) // %3
: "0"(remain),
"1"(img0),
"2"(img0_s8),
"3"(ufDataFactor)
: "cc", "memory", "q0", "q1", "q2", "q10"
);
}
}
//ncnn_comm_print_blob(bottom_blob, PRINT_BLOB_TYPE_S16);
asm volatile(
"MOV r10, %0 \n"
"VMSR FPSCR, r10 \n"
: "=r"(FPSCR_value)
: "0"(FPSCR_value)
: "cc", "r10"
);
}
static void conv_dequantize_neon(Mat &top_blob, const Mat &_bias, const float dataScale, const float weightScale)
{
float ufReverseFactor = 0.f;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
int size = outh * outw;
const float *bias = _bias;
if (0 != dataScale * weightScale)
{
ufReverseFactor = 1 / (dataScale * weightScale);
}
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
const float* img0 = top_blob.channel(p);
int* img0_s32 = (int*)img0;
float* img0_f32 = (float*)img0;
float bias0 = bias ? bias[p] : 0.f;
int nn = size >> 3;
int remain = size & 7;
if(nn > 0)
{
asm volatile(
"PLD [%1, #256] \n"
"VLD1.S32 {D0-D3}, [%1]! \n" //Q0-Q1 data
"VDUP.F32 Q10, %3 \n" //Q10 scale
"VDUP.F32 Q12, %4 \n" //Q12 bias
"0: \n"
"VCVTR.F32.S32 Q0,Q0 \n"
"VCVTR.F32.S32 Q1,Q1 \n"
"VMUL.F32 Q0,Q0,Q10 \n"
"VMUL.F32 Q1,Q1,Q10 \n"
"VADD.F32 Q2,Q0,Q12 \n"
"VADD.F32 Q3,Q1,Q12 \n"
"PLD [%1, #256] \n"
"VLD1.S32 {D0-D3}, [%1]! \n"
"VST1.F32 {D4-D7}, [%2]! \n"
"SUBS %0, #1 \n"
"BNE 0b \n"
"SUB %1, #32 \n"
: "=r"(nn), // %0
"=r"(img0_s32), // %1
"=r"(img0_f32), // %2
"=r"(ufReverseFactor), // %3
"=r"(bias0) // %4
: "0"(nn),
"1"(img0_s32),
"2"(img0_f32),
"3"(ufReverseFactor),
"4"(bias0)
: "cc", "memory", "q0", "q1", "q2", "q4", "q10", "q12"
);
}
if(remain > 0)
{
asm volatile(
"VLD1.F32 {D0[0]}, [%1]! \n" //D0 data
"VDUP.32 Q10, %3 \n" //Q10 scale
"VDUP.32 Q12, %4 \n" //Q12 bias
"0: \n"
"VCVTR.F32.S32 S0,S0 \n"
"VMUL.F32 Q0,Q0,Q10 \n"
"VADD.F32 Q2,Q0,Q12 \n"
//store
"VLD1.F32 {D0[0]}, [%1]! \n"
"VST1.F32 {D4[0]}, [%2]! \n"
"SUBS %0, #1 \n"
"BNE 0b \n"
: "=r"(remain), // %0
"=r"(img0_s32), // %1
"=r"(img0_f32), // %2
"=r"(ufReverseFactor), // %3
"=r"(bias0) // %4
: "0"(remain),
"1"(img0_s32),
"2"(img0_f32),
"3"(ufReverseFactor),
"4"(bias0)
: "cc", "memory", "q0", "q1", "q2", "q4", "q10", "q12"
);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.