source
stringlengths
3
92
c
stringlengths
26
2.25M
opencl_agilekeychain_fmt_plug.c
/* 1Password Agile Keychain cracker patch for JtR. Hacked together during * July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012 Lukas Odzioba <[email protected]> and * Copyright (c) 2012 Dhiru Kholia <dhiru.kholia at gmail.com>, and it is * hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This software is based on "agilekeychain" project but no actual code is * borrowed from it. * * "agilekeychain" project is at https://bitbucket.org/gwik/agilekeychain */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_agilekeychain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_agilekeychain); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "aes.h" #include "common-opencl.h" #include "options.h" #include "jumbo.h" #define FORMAT_LABEL "agilekeychain-opencl" #define FORMAT_NAME "1Password Agile Keychain" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 #define SALTLEN 8 #define CTLEN 1040 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } keychain_password; typedef struct { uint32_t v[16/4]; } keychain_hash; typedef struct { int iterations; int outlen; uint8_t length; uint8_t salt[SALTLEN]; } keychain_salt; static int *cracked; static int any_cracked; static struct fmt_tests keychain_tests[] = { {"$agilekeychain$2*1000*8*7146eaa1cca395e5*1040*e7eb81496717d35f12b83024bb055dec00ea82843886cbb8d0d77302a85d89b1d2c0b5b8275dca44c168cba310344be6eea3a79d559d0846a9501f4a012d32b655047673ef66215fc2eb4e944a9856130ee7cd44523017bbbe2957e6a81d1fd128434e7b83b49b8a014a3e413a1d76b109746468070f03f19d361a21c712ef88e05b04f8359f6dd96c1c4487ea2c9df22ea9029e9bc8406d37850a5ead03062283a42218c134d05ba40cddfe46799c931291ec238ee4c11dc71d2b7e018617d4a2bf95a0c3c1f98ea14f886d94ee2a65871418c7c237f1fe52d3e176f8ddab6dfd4bc039b6af36ab1bc9981689c391e71703e31979f732110b84d5fccccf59c918dfcf848fcd80c6da62ced6e231497b9cbef22d5edca439888556bae5e7b05571ac34ea54fafc03fb93e4bc17264e50a1d04b688fcc8bc715dd237086c2537c32de34bbb8a29de0208800af2a9b561551ae6561099beb61045f22dbe871fab5350e40577dd58b4c8fb1232f3f85b8d2e028e5535fd131988a5df4c0408929b8eac6d751dcc698aa1d79603251d90a216ae5e28bffc0610f61fefe0a23148dcc65ab88b117dd3b8d311157424867eb0261b8b8c5b11def85d434dd4c6dc7036822a279a77ec640b28da164bea7abf8b634ba0e4a13d9a31fdcfebbdbe53adcdf2564d656e64923f76bc2619428abdb0056ce20f47f3ece7d4d11dc55d2969684ca336725561cb27ce0504d57c88a2782daccefb7862b385d494ce70fef93d68e673b12a68ba5b8c93702be832d588ac935dbf0a7b332e42d1b6da5f87aed03498a37bb41fc78fcdbe8fe1f999fe756edf3a375beb54dd508ec45af07985f1430a105e552d9817106ae12d09906c4c28af575d270308a950d05c07da348f59571184088d46bbef3e7a2ad03713e90b435547b23f340f0f5d00149838d9919d40dac9b337920c7e577647fe4e2811f05b8e888e3211d9987cf922883aa6e53a756e579f7dff91c297fcc5cda7d10344545f64099cfd2f8fd59ee5c580ca97cf8b17e0222b764df25a2a52b81ee9db41b3c296fcea1203b367e55d321c3504aeda8913b0cae106ccf736991030088d581468264b8486968e868a44172ad904d97e3e52e8370aaf52732e6ee6cc46eb33a901afc6b7c687b8f6ce0b2b4cdfe19c7139615195a052051becf39383ab83699a383a26f8a36c78887fe27ea7588c0ea21a27357ff9923a3d23ca2fb04ad671b63f8a8ec9b7fc969d3bece0f5ff19a40bc327b9905a6de2193ffe3aa1997e9266205d083776e3b94869164abcdb88d64b8ee5465f7165b75e1632abd364a24bb1426889955b8f0354f75c6fb40e254f7de53d8ef7fee9644bf2ebccd934a72bb1cc9c19d354d66996acbddd60d1241657359d9074a4b313b21af2ee4f10cf20f4122a5fad4ee4f37a682ffb7234bea61985d1ad130bfb9f4714461fb574dbf851c*1000*8*c05f3bc3e7f3cad7*1040*f3e3d091b64da1529b04b2795898b717faad59f7dae4bda25e6e267c28a56a7702e51991b2a3fb034cdda2d9bfd531dfd2c3af00f39fdfe8bcbdde02ab790415bcf071d133b15f647f55ff512730ae4914ce20b72184c827f6350ac768b00c9eab0e3322e084bb3e9e9439a10030950f5504dcc4f7ba614b27fde99bd0d743a58341e90ec313395486eb8068df205b7bdf25134ed97dd2e2883d7eb3e63b659602ada765084a69d7ed8fc55b60aa67718cc9e5bf31ab8f3029b32a4b001071848d2b76b5f4b921d2169ca287e9e78ecd904d040c817c7c7cde4ba8510b462e139c16519962ca0adb7d5f89d431cd4541a9a7aaec8d799697f4d3947d87884bed32ada13db725c72ab6450ac8fe989a94917cca784bcf6ffbe756f19d4e8897e0f80d8c318e13e5b30fc356646aaf038a952b0781f12dfef1f4bd6922ae05a573eeff4dbb064cfbb0fd62962a6a53a8de308da2b8e83baebfe261cb127f874a5eff3f05cda123ab2ba559cf444ce33b6845f4c902733b8982044151a8aa1859769082ade5928f2d4f616ce972ae8dde1f2be37d496ad16057008dfe678c75cbdc53db25ed311edbcf8b2a73bcd2809f6bd1d389aaeed82a75fa15676d08aa5390efdc189c180be6a52ec5a7371304d26e477039197671377d1ea3d6ee41e68a42348a4fe9a1d2400eaeba8ed0a7419b9694d780456d96378c00318a5be0f41afa887476b3bebb7cf30d61ca8fc77de35671a3053a517aa39444e01e1752da3146dc97eec5849d6f025c3d4bc6e0499b901f629d8a081ad35ed33602cbef5e9a68f090170fcc1f285eb094e3dc619740a067fd2aeeb20abbb17926c3ad097f3f0bad4de540d1829a985cd7e700100622ec47da046071c11a1597e5f093268b4ed79ffcf2450b9ba2b649b932fbce912bdb4da010581bd9c731be792c8f75177f6c8c4e1756d63a1491a8aae4bb11beeca118e7d08073b500dd82b81e4bdbeb15625afca8f1c8e06b2360da972587516ef62e91d1d9aad90e62226d53363bff318f5af21f69c234731ac22b09506a1b807d2366e88905668d960c7963daa93046e9a56db1d7a437e9a37aa7a2945197265478b264ec14d383030ef73504fd26d4be9e72ebddb14a00bf6bd66a3adaa1d17cada378a2b0bc852f961af52333f7966f8a60738dfd47e79ce537082f187117ffd31f54f53356b671154dfa245671c4cd054c1a8d303a202fccfae6d3f9e3646838cef38703b5e660b5ce7679f5898d801908f90092dbec335c98e4002041287fe9bfa7d7828a29ab240ec2cedc9fa12cfd7c3ef7b61dad4fbf2ef9c0a904dbde1b3792fb5178607608dc9fc2fbc85addf89fa3df94317e729810b508356b5bb176cdb022afb0ec5eeff4d5081b66733d1be1b54cc4f080bfc33187663b5ab185472b35dc8812e201472e6af376c43ee23aa2db6cd04bddd79b99b0c28c48a5ae", "openwall"}, {"$agilekeychain$1*1000*8*54434b3047723444*1040*316539685a36617546544a61466e35743970356559624464304467394a4a41615459594a6b66454c5462417a7a694b5751474e4748595036344f3945374b414b676b6b7278673658794e63734a316c48656b496a3156346a544c6861797537347032466b4d6b416d31704a6b5063547a44703152544f72696e6e38347732597672774f6476414c70346462595a7678656b6e5958716b7a61746d5874514e575965564735627a437578584e4a573050567939413073306c377a4d726e6d576a6655424455394f4934696c48454f4d536e635567393950686d4171364f76747749446130454c6d74783069704d30456d45374f56736e486a5534667877327a526e52596e55454452393544437042646e6739355938714836584968664c4d7a726a4f63544c6858385141464c71565463664270493761664d633055447879613169456a72664479346438305641417054754775477a475266766c4774543668673848624d31636c37624e73743549634457655375507138535139396c4c39364c4f6f757a43305535586161364b47676a61713971394459526a78744e547459797a6a57715a3575534364487a4430306d4e4e39483277674c733238726463616d4f5146467957374234727252774b6d6161664b6d67414d5854496444665848684c376c6c776d47477a4b57566d5a3646346e775441446f3659745038646d336b6370494d50676742797a41325630716e794833793237494152496477556e4d6c4751497367346672635364486e6e71504f6e6264575953584462586c6e573947347a567163535333366e3253504d65656b45483841544f6952384d6170724471706c4a307863713653707265624f544a4d5139377562454a334b776e4879746a37704e37694557484d69696d436f484973613443754d484b4f51484833545a364654694a6d31783061665536796c444f7257666964397243444f684d305a324c6b75693953716664354b435963703559354978757a64354a755158394136663744435a674e4c73484a7935737a707739724c783077316631637349757a6d696252576244396a537730593143633348385a775734534b646569684f634f4c35323364734b7179625750364b76344a4a56626c4f727069366f575a386432745375684c464e42643173445a6a50745743696e666a4458325058644d57654c596d326f5763516a7951524a566372354d4d58435877765172596b734c59354476455156746d75504830444a4e47624e31524f4d544b4a6b4d675835305a7a56736758794c475057714e78496452725269484c75424f4d6d793550677277727453597045566e304c5642764c5a6732504c7a4e71584c4c67634979637369554a3446497655795a78583547306b365a4e337477786c7961796b4d787463796971596f516fcb3584235d7ecde5f8b7bc2b8f1e9e2e*46c3b75f6e4cf139e92f683f32107271", "123"}, {"$agilekeychain$1*1000*8*7a697868444e7458*1040*773954704874444d4d523043546b44375135544f74675a754532624a45794848305949436e4e724d336c524c39316247426a7843317131614152736d50724c6474586a4d4d445954786c31376d363155437130777a414d36586c7045555457424a5a436a657541456742417961654472745a73576e4b7a7a344d547043567846526655524b4339573631756f3850465a3878306b7176644c4253787071764c58376e716a50674f526d4a4e4b546e3359575175614b304a3964756f756935675a77544f4e6770654855776f79553465786e41364d6376496b7651624762424d62756746796a6753514c37793069783869683773454c533559365946584f545246616d48495730464e634d42466e51367856797a4368517335674a755972434b545944633270764e54775879563542776675386b6e4462506b743138694a756d63447134745361526a32373167366e787375514e346a73574e77796b4b49376d3677653448754c364b5a41514633626e71786130634458544e484a436551386e7679304b786d73346f774a383268665167596b466e39317a307269714434546d4d6173416e344b6a74455a584846526a6659746742504262495958386336755241386c496633417666696d7a5036425745757461736b684574794a5230436d50466d4b536375764674674562315679766a43453077356e614b476d345849395a726b7037626153496b6a66634f355261795157645941487731516f564c6764516d4e3074394b3839526341626f6b6b38324465497068624553646f4177786e6f68347779523338394f4e6561315271635236374d424d695978304b336b4a6966776e74614f4b43483237434b596a6630774e79394a4b7153714a48616b4b364455596a454b31433767786a72303450706d44666373574c5a61324f335852474b756c456b76483349754e3156654f417342324d6f75346d4b78774e43424863566e344c4c6c6c6d4e446b617550415a6f3337764f55484b4156344d4769336267344f4737794c354c5567636a565a6b7369616730383377744d69513431333032305a4a3747794944714d67396a5651444132424e79507a34726d346c333552757a764b6c543073437562534376714f346a5939784a546f683358517348623378716677313231383261685357743236455a6a6b6674365870554642386436574c374430635177347278736a744a6e463530756365684c7779497557366550356936514e704e4863353863437165397163496146794a726555714c623438543235396371416154326c66375276746e3550727453306b7042335961364239586c3359384b464865564e677636537234414e4d6c55583867456376686e43646e6e776a6f656d7152613453725148503462744b4a334565714f6e624a774a65623258552fff2bf0505a0bc88b9cbc9073a74586*a6f6556c971bd3ad40b52751ba025713", ""}, {"$agilekeychain$1*1000*8*7a65613743636950*1040*524a397449393859696b4a576e437763716a574947544a6d306e32474442343355764a7a6948517a45686d7569636631514745347448424e4e6b32564239656a55596f724671547638736d4e66783949504b6f38746b6f49426d4d6b794c7a6d3077327639365a4b515934357774664a477247366b5539486135495863766845714146317458356b725a6a50376f726e55734b3136533756706a4b42516165656a50336e4558616450794f59506f4771347268454730784555485a4f5a4772526a76354f45417470616258375a386436474b366f7653583257335939516d4f5364446a414b674e467a31374f716d73516b3362795776305a414a314f63324d616a6c6472413939443879414c523733794c47467654734d7a6a4734733461674353357a4456527841486233646d446e797448696837377364784344704831784f6a5975666168626b5534796678576c59584d4b3448704a784a4f675a6d7672636b5a4b567071445a345a376648624b55414b7262694972384531336c7a6875725a6f44627571775361774b66417743336230614e4166564954334a6c3477666b4254374f747565394b32667266566d3263416a656c79416c45724b3035504a4e42307a33303632483466664272705765415a4f3552416a36544e5a54415a5976666a4b53675a68493071394a6563426964544a4f564d304a773976394944444339516e564a78587539366974586c4f6132717937354c554b65384b7638585132596832417a5271314e4b5653766d4d50506d3554463762763961554e45695a51436e79504f6e7146617a755231373574455365305446624c636450424a43526a49384b32365967496a734c324e525574526e36714c533065694f536c6c37795a456945476d4a6e327262646942416c485046616e384e4d7869427571777355714e7638305267537752726245696c734d68664b53793836684b39445a716b47546d4b59747176474c6b6a6d52513368796b367a356449706c64385541614236546e426a6b4f64766d33493972763941765a71776345686b734c594a7254446c796f46444b6d557441305a636b414e437245587a63487a30304c50564e4e73694d634d5a6f4f74414534424f53685879374e62545734487a555054774a7056686f6a7453666a664e696d354548345631374c61396862586659666332304e465a5678656a304b4d59586d586547634d67474c6d31794a4b546473474c755a697579625779503259726d6d5248544f6f704b575046556e3438415a48474168396d787136327230367248774e73493439693049794b3765314b4f74547265556c564b6e6d594a5959355a7476334b546f75375a6a676c755a557a39744b54747745583948314a37366e6c6d5a53345079555856696438336876596141617a394438711ee66b990b013609582733309b01df00*444f4656a5ec58e8a75204fb25fd5ae5", "PASSWORD"}, {NULL} }; static struct custom_salt { unsigned int nkeys; unsigned int iterations[2]; unsigned int saltlen[2]; unsigned char salt[2][SALTLEN]; unsigned int ctlen[2]; unsigned char ct[2][CTLEN]; } *cur_salt; static cl_int cl_error; static keychain_password *inbuffer; static keychain_hash *outbuffer; static keychain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; static struct fmt_main *self; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(keychain_password) * gws; outsize = sizeof(keychain_hash) * gws; settingsize = sizeof(keychain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(keychain_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; int ctlen; int saltlen; char *p; if (strncmp(ciphertext, "$agilekeychain$", 15) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 15; if ((p = strtokm(ctcopy, "*")) == NULL) /* nkeys */ goto err; if (!isdec(p)) goto err; if (atoi(p) > 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; saltlen = atoi(p); if(saltlen > SALTLEN) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if(hexlenl(p) != saltlen * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ct length */ goto err; if (!isdec(p)) goto err; ctlen = atoi(p); if (ctlen > CTLEN) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */ goto err; if(hexlenl(p) != ctlen * 2) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 15; /* skip over "$agilekeychain$" */ p = strtokm(ctcopy, "*"); cs.nkeys = atoi(p); p = strtokm(NULL, "*"); cs.iterations[0] = atoi(p); p = strtokm(NULL, "*"); cs.saltlen[0] = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.saltlen[0]; i++) cs.salt[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.ctlen[0] = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.ctlen[0]; i++) cs.ct[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->saltlen[0]); currentsalt.length = cur_salt->saltlen[0]; currentsalt.iterations = cur_salt->iterations[0]; currentsalt.outlen = 16; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int akcdecrypt(unsigned char *derived_key, unsigned char *data) { unsigned char out[CTLEN]; int n, key_size; AES_KEY akey; unsigned char iv[16]; memcpy(iv, data + CTLEN - 32, 16); if (AES_set_decrypt_key(derived_key, 128, &akey) < 0) fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); AES_cbc_encrypt(data + CTLEN - 16, out + CTLEN - 16, 16, &akey, iv, AES_DECRYPT); n = check_pkcs_pad(out, CTLEN, 16); if (n < 0) return -1; key_size = n / 8; if (key_size != 128 && key_size != 192 && key_size != 256) // "invalid key size" return -1; return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!akcdecrypt((unsigned char*)outbuffer[index].v, cur_salt->ct[0])) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations[0]; } struct fmt_main fmt_opencl_agilekeychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { "iteration count", }, keychain_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_unop__identity_fp32_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_uint64) // op(A') function: GB (_unop_tran__identity_fp32_uint64) // C type: float // A type: uint64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_uint64) ( float *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
8310.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <[email protected]> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop #pragma omp parallel { #pragma omp parallel for schedule(dynamic, 1) num_threads(1) for (i = 0; i < _PB_NY; i++) y[i] = 0; #pragma omp parallel for private (j) schedule(dynamic, 1) num_threads(1) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
rotlet_rsrc.c
#include "mex.h" #include "math.h" #define X prhs[0] // Source locations #define F prhs[1] // Source strengths #define IDX prhs[2] // Source indeces #define DIS prhs[3] // Distances #define XI prhs[4] // Ewald Param #define U plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif #define PI 3.141592653589793 inline void cross(double * a,double * b, double *c) { c[0] = a[1]*b[2] - a[2]*b[1]; c[1] = a[2]*b[0] - a[0]*b[2]; c[2] = a[0]*b[1] - a[1]*b[0]; } void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { // input target const int M = mxGetM(X); const double xi = (double) mxGetScalar(XI); const double* restrict x = mxGetPr(X); const double* restrict f = mxGetPr(F); // output U = mxCreateDoubleMatrix(M, 3, mxREAL); double* restrict u = mxGetPr(U); if(VERBOSE) mexPrintf("[FS Rotlet Real space ] MEX N=%d ",M); // Loop through the cell #ifdef _OPENMP #pragma omp parallel for #endif for (int m=0; m<M; m++) { const mxArray * _IDX = mxGetCell(IDX, m); const mxArray * _DIS = mxGetCell(DIS, m); const int N= mxGetN(_IDX); // number of the source points in nblist double* idx= (double*) mxGetPr(_IDX);// index pointer of the source points in nblist double* dis= mxGetPr(_DIS); // distance pointer of the source points in nblist double p[3], fxr[3]; p[0] = 0; p[1] = 0; p[2] = 0; // First element is the target itself; see MATLAB doc for rangesearch. for(int n = 1; n<N; n++){ int idxn = (int) idx[n]-1; double fn[] = {f[idxn], f[idxn+M], f[idxn+2*M]}; double rvec[] = {x[m]-x[idxn], x[m+M]-x[idxn+M],x[m+2*M]-x[idxn+2*M]}; double r = dis[n]; double r2= r*r; cross(fn,rvec,fxr); double A = ( erfc(xi*r)/r + 2.0*xi*exp(-xi*xi*r2)/sqrt(PI) )/r2; p[0] += A*fxr[0]; p[1] += A*fxr[1]; p[2] += A*fxr[2]; } u[m ] = p[0]; u[m+ M] = p[1]; u[m+2*M] = p[2]; } }
test-mempool.c
/* xZTL: Zone Translation Layer User-space Library * * Copyright 2019 Samsung Electronics * * Written by Ivan L. Picoli <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <omp.h> #include <pthread.h> #include <xztl-mempool.h> #include <xztl.h> #include <ztl-media.h> #include "CUnit/Basic.h" static const char **devname; static void cunit_mempool_assert_ptr(char *fn, void *ptr) { CU_ASSERT((uint64_t)ptr != 0); if (!ptr) printf("\n %s: ptr %p\n", fn, ptr); } static void cunit_mempool_assert_int(char *fn, int status) { CU_ASSERT(status == 0); if (status) printf(" %s: %x\n", fn, status); } static int cunit_mempool_init(void) { return 0; } static int cunit_mempool_exit(void) { return 0; } static void test_mempool_init(void) { int ret; ret = znd_media_register(*devname); cunit_mempool_assert_int("znd_media_register", ret); if (ret) return; ret = xztl_media_init(); cunit_mempool_assert_int("xztl_media_init", ret); if (ret) return; cunit_mempool_assert_int("xztl_mempool_init", xztl_mempool_init()); } static void test_mempool_create(void) { uint16_t type, tid, ents; uint32_t ent_sz; type = XZTL_MEMPOOL_MCMD; tid = 0; ents = 32; ent_sz = 1024; cunit_mempool_assert_int( "xztl_mempool_create", xztl_mempool_create(type, tid, ents, ent_sz, NULL, NULL)); } static void test_mempool_destroy(void) { uint16_t type, tid; type = XZTL_MEMPOOL_MCMD; tid = 0; cunit_mempool_assert_int("xztl_mempool_destroy", xztl_mempool_destroy(type, tid)); } static void test_mempool_create_mult(void) { uint16_t type, tid, ents; uint32_t ent_sz; type = XZTL_MEMPOOL_MCMD; ents = 32; ent_sz = 128; #pragma omp parallel for for (tid = 0; tid < 8; tid++) { cunit_mempool_assert_int( "xztl_mempool_create", xztl_mempool_create(type, tid, ents, ent_sz, NULL, NULL)); } } static void test_mempool_get_put(void) { uint16_t ent_i, ents = 30; uint32_t ent_sz = 128; struct xztl_mp_entry *ent[ents]; /* Get entries */ for (ent_i = 0; ent_i < ents; ent_i++) { ent[ent_i] = xztl_mempool_get(XZTL_MEMPOOL_MCMD, 0); cunit_mempool_assert_ptr("xztl_mempool_get", ent[ent_i]); } /* Modify entry bytes */ for (ent_i = 0; ent_i < ents; ent_i++) memset(ent[ent_i]->opaque, 0x0, ent_sz); /* Put entries */ for (ent_i = 0; ent_i < ents; ent_i++) { xztl_mempool_put(ent[ent_i], XZTL_MEMPOOL_MCMD, 0); CU_PASS("xztl_mempool_put"); } /* Repeat the process */ for (ent_i = 0; ent_i < ents; ent_i++) { ent[ent_i] = xztl_mempool_get(XZTL_MEMPOOL_MCMD, 0); cunit_mempool_assert_ptr("xztl_mempool_get", ent[ent_i]); } for (ent_i = 0; ent_i < ents; ent_i++) memset(ent[ent_i]->opaque, 0x0, ent_sz); for (ent_i = 0; ent_i < ents; ent_i++) { xztl_mempool_put(ent[ent_i], XZTL_MEMPOOL_MCMD, 0); CU_PASS("xztl_mempool_put"); } } static void test_mempool_exit(void) { cunit_mempool_assert_int("xztl_mempool_exit", xztl_mempool_exit()); cunit_mempool_assert_int("xztl_media_exit", xztl_media_exit()); } int main(int argc, const char **argv) { int failed; if (argc < 2) { printf("Please provide the device path. e.g. liou:/dev/nvme0n2\n"); return -1; } devname = &argv[1]; printf("Device: %s\n", *devname); CU_pSuite pSuite = NULL; if (CUE_SUCCESS != CU_initialize_registry()) return CU_get_error(); pSuite = CU_add_suite("Suite_mempool", cunit_mempool_init, cunit_mempool_exit); if (pSuite == NULL) { CU_cleanup_registry(); return CU_get_error(); } if ((CU_add_test(pSuite, "Initialize", test_mempool_init) == NULL) || (CU_add_test(pSuite, "Create a mempool", test_mempool_create) == NULL) || (CU_add_test(pSuite, "Destroy a mempool", test_mempool_destroy) == NULL) || (CU_add_test(pSuite, "Create parallel mempools", test_mempool_create_mult) == NULL) || (CU_add_test(pSuite, "Get and put entries", test_mempool_get_put) == NULL) || (CU_add_test(pSuite, "Closes the module", test_mempool_exit) == NULL)) { CU_cleanup_registry(); return CU_get_error(); } CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); failed = CU_get_number_of_tests_failed(); CU_cleanup_registry(); return failed; }
make_general_basis.h
#ifndef _MAKE_GENERAL_BASIS_H #define _MAKE_GENERAL_BASIS_H #include <iostream> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "openmp.h" #include "misc.h" #include <cmath> #include <cfloat> #include <vector> #include <utility> #include <algorithm> #include <functional> #if defined(_WIN64) #elif defined(_WIN32) #else #include <boost/sort/sort.hpp> #endif namespace basis_general { template<class I,class P> int general_make_basis_blocks(general_basis_core<I,P> *B,const int N_p,const npy_intp Ns,const I basis[],npy_intp basis_begin[],npy_intp basis_end[]){ if(N_p==0){ basis_begin[0] = 0; basis_end[0] = Ns; return 0; } npy_intp begin = 0; npy_intp end = 0; npy_intp s_p = B->get_prefix(basis[0],N_p); npy_intp s_p_next = 0; if(s_p < 0){ return -1; } for(npy_intp i=0;i<Ns;i++){ s_p_next = B->get_prefix(basis[i],N_p); if(s_p_next < 0){ return -1; } else if(s_p_next == s_p){ end++; } else{ basis_begin[s_p] = begin; basis_end[s_p] = end; begin = end++; s_p = s_p_next; } } basis_begin[s_p_next] = begin; basis_end[s_p_next] = end; return 0; } template<class I,class J,class P=signed char> npy_intp make_basis_sequential(general_basis_core<I,P> *B,npy_intp MAX,npy_intp mem_MAX,I basis[],J n[]){ npy_intp Ns = 0; I s = 0; bool insuff_mem = false; while(MAX != 0){ if(Ns>=mem_MAX){ insuff_mem = true; break; } double norm = B->check_state(s); npy_intp int_norm = norm; if(!check_nan(norm) && int_norm>0 ){ basis[Ns] = s; n[Ns] = norm; Ns++; } s++; MAX--; } if(insuff_mem){ return -1; } else{ std::reverse(basis,basis+Ns); std::reverse(n,n+Ns); return Ns; } } template<class I,class J,class P=signed char> npy_intp make_basis_pcon_sequential(general_basis_core<I,P> *B,npy_intp MAX,npy_intp mem_MAX,I s,I basis[],J n[]){ npy_intp Ns = 0; I nns = 0; // number of next_state calls bool insuff_mem = false; while(MAX!=0){ if(Ns>=mem_MAX){ insuff_mem = true; break; } double norm = B->check_state(s); npy_intp int_norm = norm; if(!check_nan(norm) && int_norm>0 ){ basis[Ns] = s; n[Ns] = norm; Ns++; } s = B->next_state_pcon(s,nns++); MAX--; } if(insuff_mem){ return -1; } else{ std::reverse(basis,basis+Ns); std::reverse(n,n+Ns); return Ns; } } template<class I, class J> struct compare_pair : std::binary_function<std::pair<I,J>,std::pair<I,J>,bool> { bool operator()(const std::pair<I,J> &a, const std::pair<I,J> &b) const {return a.first > b.first;} // bool operator()(const std::pair<I,J> &a, const std::pair<I,J> &b) const {return a.first < b.first;} }; template<class I,class J,class P=signed char> npy_intp make_basis_parallel(general_basis_core<I,P> *B,const npy_intp MAX,const npy_intp mem_MAX,I basis[],J n[]){ npy_intp Ns = 0; bool insuff_mem = false; std::vector<std::pair<I,J> > master_block(mem_MAX); std::vector<npy_intp> master_pos(omp_get_max_threads()+1); std::pair<I,J> * master_block_data = &master_block[0]; npy_intp * master_pos_data = &master_pos[0]; #pragma omp parallel firstprivate(MAX,mem_MAX) shared(master_block_data,master_pos_data,Ns,insuff_mem) { const int nthread = omp_get_num_threads(); const int threadn = omp_get_thread_num(); std::vector<std::pair<I,J> > thread_block(0); const npy_intp block_size = 1.1*mem_MAX/nthread; thread_block.reserve(block_size); npy_intp chunk = MAX - threadn; I s = threadn; while(chunk>0){ double norm = B->check_state(s); npy_intp int_norm = norm; if(!check_nan(norm) && int_norm>0 ){ thread_block.push_back(std::make_pair(s,int_norm)); } s += nthread; chunk-=nthread; } master_pos_data[threadn+1] = thread_block.size(); // get sizes for each thread block into shared memory #pragma omp barrier #pragma omp single // calculate the cumulative sum to get data paritions of master_block { for(int i=0;i<nthread;i++){ master_pos_data[i+1] += master_pos_data[i]; } Ns = master_pos_data[nthread]; insuff_mem = Ns > mem_MAX; } if(!insuff_mem){ // load data into master block in parallel const npy_intp start = master_pos_data[threadn]; const npy_intp end = master_pos_data[threadn+1]; npy_intp i = 0; for(npy_intp j=start;j<end;j++){ master_block_data[j] = thread_block[i++]; } #pragma omp barrier #pragma omp master { #if defined(_WIN64) // x64 version std::sort(master_block_data, master_block_data + Ns, compare_pair<I,J>()); #elif defined(_WIN32) std::sort(master_block_data, master_block_data + Ns, compare_pair<I,J>()); #else boost::sort::block_indirect_sort(master_block_data, master_block_data + Ns, compare_pair<I,J>(),nthread); #endif } #pragma omp barrier #pragma omp for schedule(static) for(npy_intp i=0;i<Ns;i++){ basis[i] = master_block_data[i].first; n[i] = master_block_data[i].second; } } } if(insuff_mem){ return -1; } else{ return Ns; } } template<class I,class J,class P=signed char> npy_intp make_basis_pcon_parallel(general_basis_core<I,P> *B,const npy_intp MAX,const npy_intp mem_MAX,I s,I basis[],J n[]){ npy_intp Ns = 0; bool insuff_mem = false; std::vector<std::pair<I,J> > master_block(mem_MAX); std::vector<npy_intp> master_pos(omp_get_max_threads()+1); std::pair<I,J> * master_block_data = &master_block[0]; npy_intp * master_pos_data = &master_pos[0]; #pragma omp parallel firstprivate(MAX,mem_MAX,s) shared(master_block_data,master_pos_data,Ns,insuff_mem) { const int nthread = omp_get_num_threads(); const int threadn = omp_get_thread_num(); std::vector<std::pair<I,J> > thread_block(0); // local array to store values found by each thread. this reduces the number of critical sections. const npy_intp block_size = 1.1*mem_MAX/nthread; thread_block.reserve(block_size); // preallocate memory for each block so that it does not have to expand during search. npy_intp chunk = MAX - threadn; I nns = 0;// number of next_state calls for(int i=0;i<threadn;i++){s=B->next_state_pcon(s,nns++);} while(chunk>0){ double norm = B->check_state(s); npy_intp int_norm = norm; if(!check_nan(norm) && int_norm>0 ){ thread_block.push_back(std::make_pair(s,int_norm)); } for(int i=0;i<nthread;i++){s=B->next_state_pcon(s,nns++);} chunk-=nthread; } master_pos_data[threadn+1] = thread_block.size(); // get sizes for each thread block into shared memory #pragma omp barrier #pragma omp single // calculate the cumulative sum to get data paritions of master_block { for(int i=0;i<nthread;i++){ master_pos_data[i+1] += master_pos_data[i]; } Ns = master_pos_data[nthread]; insuff_mem = Ns > mem_MAX; } if(!insuff_mem){ const npy_intp start = master_pos_data[threadn]; const npy_intp end = master_pos_data[threadn+1]; npy_intp i = 0; for(npy_intp j=start;j<end;j++){ master_block_data[j] = thread_block[i++]; } #pragma omp barrier #pragma omp master { #if defined(_WIN64) // x64 version std::sort(master_block_data, master_block_data + Ns, compare_pair<I,J>()); #elif defined(_WIN32) std::sort(master_block_data, master_block_data + Ns, compare_pair<I,J>()); #else boost::sort::block_indirect_sort(master_block_data, master_block_data + Ns, compare_pair<I,J>(),nthread); #endif } #pragma omp barrier #pragma omp for schedule(static) for(npy_intp i=0;i<Ns;i++){ basis[i] = master_block_data[i].first; n[i] = master_block_data[i].second; } } } if(insuff_mem){ return -1; } else{ // sort list based on basis and then fill ndarray values with the sorted list. // master_block.resize(Ns); // std::sort(master_block.begin(),master_block.end(), compare_pair<I,J>()); // for(npy_intp i=0;i<Ns;i++){ // basis[i] = master_block[i].first; // n[i] = master_block[i].second; // } return Ns; } } template<class I,class J,class P=signed char> npy_intp make_basis(general_basis_core<I,P> *B,npy_intp MAX,npy_intp mem_MAX,I basis[],J n[]){ const int nt = B->get_nt(); const int nthreads = omp_get_max_threads(); if(nthreads>1 && MAX > nthreads && (nt>0 || B->pre_check)){ return make_basis_parallel(B,MAX,mem_MAX,basis,n); } else{ // If there are no symmetries it does not make sense to use parallel version. // This is because it requires extra memory as well as extra time to sort // the basis states that are produced by the parallel code. return make_basis_sequential(B,MAX,mem_MAX,basis,n); } } template<class I,class J,class P=signed char> npy_intp make_basis_pcon(general_basis_core<I,P> *B,npy_intp MAX,npy_intp mem_MAX,I s,I basis[],J n[]){ const int nt = B->get_nt(); const int nthreads = omp_get_max_threads(); if(nthreads>1 && MAX > nthreads && (nt>0 || B->pre_check)){ return make_basis_pcon_parallel(B,MAX,mem_MAX,s,basis,n); } else{ // If there are no symmetries it does not make sense to use parallel version. // This is because it requires extra memory as well as extra time to sort // the basis states that are produced by the parallel code. return make_basis_pcon_sequential(B,MAX,mem_MAX,s,basis,n); } } // template<class I,class J> // npy_intp inline make_basis_wrapper(void *B,npy_intp MAX,npy_intp mem_MAX,void * basis,J n[]){ // return make_basis(reinterpret_cast<general_basis_core<I> *>(B),MAX,mem_MAX,(I*)basis,n); // } // template<class I,class J> // npy_intp inline make_basis_pcon_wrapper(void *B,npy_intp MAX,npy_intp mem_MAX,npy_uint64 s,void * basis,J n[]){ // return make_basis_pcon(reinterpret_cast<general_basis_core<I> *>(B),MAX,mem_MAX,(I)s,(I*)basis,n); // } } #endif
shallow_water_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // #ifndef KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED #define KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED // System includes // External includes // Project includes #include "includes/node.h" namespace Kratos { ///@addtogroup ShallowWaterApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ class ModelPart; // forward declaration /** * @ingroup ShallowWaterApplication * @class ShallowWaterUtilities * @brief This class is a wrapper of useful utilities for shallow water computations */ class KRATOS_API(SHALLOW_WATER_APPLICATION) ShallowWaterUtilities { public: ///@name Type Definitions ///@{ /// Pointer definition of ShallowWaterUtilities KRATOS_CLASS_POINTER_DEFINITION(ShallowWaterUtilities); typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; ///@} ///@name Life Cycle ///@{ /// Default constructor. /// Destructor. ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void ComputeFreeSurfaceElevation(ModelPart& rModelPart); void ComputeHeightFromFreeSurface(ModelPart& rModelPart); void ComputeVelocity(ModelPart& rModelPart, bool PerformProjection = false); void ComputeSmoothVelocity(ModelPart& rModelPart); void ComputeMomentum(ModelPart& rModelPart); void ComputeEnergy(ModelPart& rModelPart); void ComputeAccelerations(ModelPart& rModelPart); void FlipScalarVariable(Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable, ModelPart& rModelPart); void IdentifySolidBoundary(ModelPart& rModelPart, double SeaWaterLevel, Flags SolidBoundaryFlag); void IdentifyWetDomain(ModelPart& rModelPart, Flags WetFlag, double Thickness = 0.0); void ResetDryDomain(ModelPart& rModelPart, double Thickness = 0.0); template<class TContainerType> void CopyFlag(Flags OriginFlag, Flags DestinationFlag, TContainerType& rContainer) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) { auto it = rContainer.begin() + i; it->Set(DestinationFlag, it->Is(OriginFlag)); } } void NormalizeVector(ModelPart& rModelPart, Variable<array_1d<double,3>>& rVariable); template<class TVarType> void CopyVariableToPreviousTimeStep(ModelPart& rModelPart, const TVarType& rVariable) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rModelPart.NumberOfNodes()); ++i) { auto const it_node = rModelPart.NodesBegin() + i; it_node->FastGetSolutionStepValue(rVariable,1) = it_node->FastGetSolutionStepValue(rVariable); } } void SetMinimumValue(ModelPart& rModelPart, const Variable<double>& rVariable, double MinValue); /* * @brief This method sets the z-coordinate of the mesh to zero */ void SetMeshZCoordinateToZero(ModelPart& rModelPart); /* * @brief This method moves the z-coordinate of the mesh according to a variable */ void SetMeshZCoordinate(ModelPart& rModelPart, const Variable<double>& rVariable); ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ ///@} private: ///@name Operations ///@{ double InverseHeight(const double Height, const double Epsilon); void CalculateMassMatrix(Matrix& rMassMatrix, const GeometryType& rGeometry); ///@} }; // Class ShallowWaterUtilities ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED defined
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(8*t2-Nz-508,512)),ceild(4*t3-Ny-508,512));t4<=min(min(min(min(floord(4*t3+Nx,512),floord(Nt+Nx-4,512)),floord(4*t1+Nx+5,512)),floord(8*t2+Nx+4,512)),floord(8*t1-8*t2+Nz+Nx+3,512));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),512*t4+510),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unaryop__abs_int16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_bool // op(A') function: GB_tran__abs_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_bool ( int16_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isge_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint32) // A*D function (colscale): GB (_AxD__isge_uint32) // D*A function (rowscale): GB (_DxB__isge_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint32) // C=scalar+B GB (_bind1st__isge_uint32) // C=scalar+B' GB (_bind1st_tran__isge_uint32) // C=A+scalar GB (_bind2nd__isge_uint32) // C=A'+scalar GB (_bind2nd_tran__isge_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT32 || GxB_NO_ISGE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
feature_histogram.h
#ifndef LIGHTGBM_FEATURE_HISTOGRAM_H_ #define LIGHTGBM_FEATURE_HISTOGRAM_H_ #include <LightGBM/split_info.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/dataset.h> #include <cstring> #include <cmath> #include <vector> namespace LightGBM { class FeatureMetainfo { public: int num_bin; MissingType missing_type; int8_t bias = 0; uint32_t default_bin; int8_t monotone_type; double penalty; /*! \brief pointer of tree config */ const Config* config; BinType bin_type; }; /*! * \brief FeatureHistogram is used to construct and store a histogram for a feature. */ class FeatureHistogram { public: FeatureHistogram() { data_ = nullptr; } ~FeatureHistogram() { } /*! \brief Disable copy */ FeatureHistogram& operator=(const FeatureHistogram&) = delete; /*! \brief Disable copy */ FeatureHistogram(const FeatureHistogram&) = delete; /*! * \brief Init the feature histogram * \param feature the feature data for this histogram * \param min_num_data_one_leaf minimal number of data in one leaf */ void Init(HistogramBinEntry* data, const FeatureMetainfo* meta); HistogramBinEntry* RawData() { return data_; } /*! * \brief Subtract current histograms with other * \param other The histogram that want to subtract */ void Subtract(const FeatureHistogram& other) { for (int i = 0; i < meta_->num_bin - meta_->bias; ++i) { data_[i].cnt -= other.data_[i].cnt; data_[i].sum_gradients -= other.data_[i].sum_gradients; data_[i].sum_hessians -= other.data_[i].sum_hessians; } } void FindBestThreshold(double sum_gradient, double sum_hessian, data_size_t num_data, double min_constraint, double max_constraint, SplitInfo* output) { output->default_left = true; output->gain = kMinScore; find_best_threshold_fun_(sum_gradient, sum_hessian + 2 * kEpsilon, num_data, min_constraint, max_constraint, output); output->gain *= meta_->penalty; } void FindBestThresholdNumerical(double sum_gradient, double sum_hessian, data_size_t num_data, double min_constraint, double max_constraint, SplitInfo* output); void FindBestThresholdCategorical(double sum_gradient, double sum_hessian, data_size_t num_data, double min_constraint, double max_constraint, SplitInfo* output); void GatherInfoForThreshold(double sum_gradient, double sum_hessian, uint32_t threshold, data_size_t num_data, SplitInfo *output) { if (meta_->bin_type == BinType::NumericalBin) { GatherInfoForThresholdNumerical(sum_gradient, sum_hessian, threshold, num_data, false, output); } else { std::vector<uint32_t> thresholds(1, threshold); GatherInfoForThresholdCategorical(sum_gradient, sum_hessian, thresholds, num_data, output); } } void GatherInfoForThresholdNumerical(double sum_gradient, double sum_hessian, uint32_t threshold, data_size_t num_data, bool default_left, SplitInfo *output); void GatherInfoForThresholdCategorical(double sum_gradient, double sum_hessian, std::vector<uint32_t> threshold, data_size_t num_data, SplitInfo *output); /*! * \brief Binary size of this histogram */ int SizeOfHistgram() const { return (meta_->num_bin - meta_->bias) * sizeof(HistogramBinEntry); } /*! * \brief Restore histogram from memory */ void FromMemory(char* memory_data) { std::memcpy(data_, memory_data, (meta_->num_bin - meta_->bias) * sizeof(HistogramBinEntry)); } /*! * \brief True if this histogram can be splitted */ bool is_splittable() { return is_splittable_; } /*! * \brief Set splittable to this histogram */ void set_is_splittable(bool val) { is_splittable_ = val; } static double ThresholdL1(double s, double l1) { const double reg_s = std::max(0.0, std::fabs(s) - l1); return Common::Sign(s) * reg_s; } static double CalculateSplittedLeafOutput(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step) { double ret = -ThresholdL1(sum_gradients, l1) / (sum_hessians + l2); if (max_delta_step <= 0.0f || std::fabs(ret) <= max_delta_step) { return ret; } else { return Common::Sign(ret) * max_delta_step; } } BinType bin_type(){ return meta_->bin_type;} int num_bins(){ return meta_->num_bin;} int bias(){return meta_->bias;} data_size_t count(int bin){ return data_[bin].cnt;} double sum_gradients(int bin){ return data_[bin].sum_gradients;} double sum_hessians(int bin){ return data_[bin].sum_hessians;} int8_t monotone_constraint(){ return meta_->monotone_type;} static double GetSplitGains(double sum_left_gradients, double sum_left_hessians, double sum_right_gradients, double sum_right_hessians, double l1, double l2, double max_delta_step, double min_constraint, double max_constraint, int8_t monotone_constraint) { double left_output = CalculateSplittedLeafOutput(sum_left_gradients, sum_left_hessians, l1, l2, max_delta_step, min_constraint, max_constraint); double right_output = CalculateSplittedLeafOutput(sum_right_gradients, sum_right_hessians, l1, l2, max_delta_step, min_constraint, max_constraint); if (((monotone_constraint > 0) && (left_output > right_output)) || ((monotone_constraint < 0) && (left_output < right_output))) { return 0; } return GetLeafSplitGainGivenOutput(sum_left_gradients, sum_left_hessians, l1, l2, left_output) + GetLeafSplitGainGivenOutput(sum_right_gradients, sum_right_hessians, l1, l2, right_output); } private: /*! * \brief Calculate the output of a leaf based on regularized sum_gradients and sum_hessians * \param sum_gradients * \param sum_hessians * \return leaf output */ static double CalculateSplittedLeafOutput(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step, double min_constraint, double max_constraint) { double ret = CalculateSplittedLeafOutput(sum_gradients, sum_hessians, l1, l2, max_delta_step); if (ret < min_constraint) { ret = min_constraint; } else if (ret > max_constraint) { ret = max_constraint; } return ret; } /*! * \brief Calculate the split gain based on regularized sum_gradients and sum_hessians * \param sum_gradients * \param sum_hessians * \return split gain */ static double GetLeafSplitGain(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step) { double output = CalculateSplittedLeafOutput(sum_gradients, sum_hessians, l1, l2, max_delta_step); return GetLeafSplitGainGivenOutput(sum_gradients, sum_hessians, l1, l2, output); } static double GetLeafSplitGainGivenOutput(double sum_gradients, double sum_hessians, double l1, double l2, double output) { const double sg_l1 = ThresholdL1(sum_gradients, l1); return -(2.0 * sg_l1 * output + (sum_hessians + l2) * output * output); } void FindBestThresholdSequence(double sum_gradient, double sum_hessian, data_size_t num_data, double min_constraint, double max_constraint, double min_gain_shift, SplitInfo* output, int dir, bool skip_default_bin, bool use_na_as_missing); const FeatureMetainfo* meta_; /*! \brief sum of gradient of each bin */ HistogramBinEntry* data_; // std::vector<HistogramBinEntry> data_; bool is_splittable_ = true; std::function<void(double, double, data_size_t, double, double, SplitInfo*)> find_best_threshold_fun_; }; class HistogramPool { public: /*! * \brief Constructor */ HistogramPool() { cache_size_ = 0; total_size_ = 0; } /*! * \brief Destructor */ ~HistogramPool() { } /*! * \brief Reset pool size * \param cache_size Max cache size * \param total_size Total size will be used */ void Reset(int cache_size, int total_size) { cache_size_ = cache_size; // at least need 2 bucket to store smaller leaf and larger leaf CHECK(cache_size_ >= 2); total_size_ = total_size; if (cache_size_ > total_size_) { cache_size_ = total_size_; } is_enough_ = (cache_size_ == total_size_); if (!is_enough_) { mapper_.resize(total_size_); inverse_mapper_.resize(cache_size_); last_used_time_.resize(cache_size_); ResetMap(); } } /*! * \brief Reset mapper */ void ResetMap() { if (!is_enough_) { cur_time_ = 0; std::fill(mapper_.begin(), mapper_.end(), -1); std::fill(inverse_mapper_.begin(), inverse_mapper_.end(), -1); std::fill(last_used_time_.begin(), last_used_time_.end(), 0); } } void DynamicChangeSize(const Dataset* train_data, const Config* config, int cache_size, int total_size) { if (feature_metas_.empty()) { int num_feature = train_data->num_features(); feature_metas_.resize(num_feature); #pragma omp parallel for schedule(static, 512) if (num_feature >= 1024) for (int i = 0; i < num_feature; ++i) { feature_metas_[i].num_bin = train_data->FeatureNumBin(i); feature_metas_[i].default_bin = train_data->FeatureBinMapper(i)->GetDefaultBin(); feature_metas_[i].missing_type = train_data->FeatureBinMapper(i)->missing_type(); feature_metas_[i].monotone_type = train_data->FeatureMonotone(i); feature_metas_[i].penalty = train_data->FeaturePenalte(i); if (train_data->FeatureBinMapper(i)->GetDefaultBin() == 0) { feature_metas_[i].bias = 1; } else { feature_metas_[i].bias = 0; } feature_metas_[i].config = config; feature_metas_[i].bin_type = train_data->FeatureBinMapper(i)->bin_type(); } } uint64_t num_total_bin = train_data->NumTotalBin(); Log::Info("Total Bins %d", num_total_bin); int old_cache_size = static_cast<int>(pool_.size()); Reset(cache_size, total_size); if (cache_size > old_cache_size) { pool_.resize(cache_size); data_.resize(cache_size); } OMP_INIT_EX(); #pragma omp parallel for schedule(static) for (int i = old_cache_size; i < cache_size; ++i) { OMP_LOOP_EX_BEGIN(); pool_[i].reset(new FeatureHistogram[train_data->num_features()]); data_[i].resize(num_total_bin); uint64_t offset = 0; for (int j = 0; j < train_data->num_features(); ++j) { offset += static_cast<uint64_t>(train_data->SubFeatureBinOffset(j)); pool_[i][j].Init(data_[i].data() + offset, &feature_metas_[j]); auto num_bin = train_data->FeatureNumBin(j); if (train_data->FeatureBinMapper(j)->GetDefaultBin() == 0) { num_bin -= 1; } offset += static_cast<uint64_t>(num_bin); } CHECK(offset == num_total_bin); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } void ResetConfig(const Config* config) { int size = static_cast<int>(feature_metas_.size()); #pragma omp parallel for schedule(static, 512) if (size >= 1024) for (int i = 0; i < size; ++i) { feature_metas_[i].config = config; } } /*! * \brief Get data for the specific index * \param idx which index want to get * \param out output data will store into this * \return True if this index is in the pool, False if this index is not in the pool */ bool Get(int idx, FeatureHistogram** out) { if (is_enough_) { *out = pool_[idx].get(); return true; } else if (mapper_[idx] >= 0) { int slot = mapper_[idx]; *out = pool_[slot].get(); last_used_time_[slot] = ++cur_time_; return true; } else { // choose the least used slot int slot = static_cast<int>(ArrayArgs<int>::ArgMin(last_used_time_)); *out = pool_[slot].get(); last_used_time_[slot] = ++cur_time_; // reset previous mapper if (inverse_mapper_[slot] >= 0) mapper_[inverse_mapper_[slot]] = -1; // update current mapper mapper_[idx] = slot; inverse_mapper_[slot] = idx; return false; } } /*! * \brief Move data from one index to another index * \param src_idx * \param dst_idx */ void Move(int src_idx, int dst_idx) { if (is_enough_) { std::swap(pool_[src_idx], pool_[dst_idx]); return; } if (mapper_[src_idx] < 0) { return; } // get slot of src idx int slot = mapper_[src_idx]; // reset src_idx mapper_[src_idx] = -1; // move to dst idx mapper_[dst_idx] = slot; last_used_time_[slot] = ++cur_time_; inverse_mapper_[slot] = dst_idx; } private: std::vector<std::unique_ptr<FeatureHistogram[]>> pool_; std::vector<std::vector<HistogramBinEntry>> data_; std::vector<FeatureMetainfo> feature_metas_; int cache_size_; int total_size_; bool is_enough_ = false; std::vector<int> mapper_; std::vector<int> inverse_mapper_; std::vector<int> last_used_time_; int cur_time_ = 0; }; } // namespace LightGBM #endif // LightGBM_TREELEARNER_FEATURE_HISTOGRAM_H_
csrmv_merge.h
#ifndef __CSRMV_MERGE_H__ #define __CSRMV_MERGE_H__ #include <algorithm> #include <omp.h> // See work my Merrill et. al. (http://ieeexplore.ieee.org/abstract/document/7877136/) for original work and implementation. // This code contains modified versions of algorithms 2 and 3. template<class I> class CountingInputIterator{ const I init; public: CountingInputIterator(I _init) : init(_init) {} I operator[](I i){return init+i;} }; template<class I> struct CoordinateT{ I x,y; CoordinateT(I _x,I _y) : x(_x), y(_y) {} }; template<class I,class AIteratorT,class BIteratorT> CoordinateT<I> MergePathSearch(I diagonal, I a_len, I b_len, AIteratorT a, BIteratorT b) { // Diagonal search range (in x coordinate space) I zero = 0; I x_min = std::max(diagonal - b_len, zero); I x_max = std::min(diagonal, a_len); // 2D binary-search along the diagonal search range while (x_min < x_max) { I pivot = (x_min + x_max) >> 1; if (a[pivot] <= b[diagonal - pivot - 1]) { // Keep top-right half of diagonal range x_min = pivot + 1; } else { // Keep bottom-left half of diagonal range x_max = pivot; } } return CoordinateT<I>( std::min(x_min, a_len), // x coordinate in A diagonal - x_min); // y coordinate in B } template<class I,class T1,class T2,class T3> void csrmv_merge(const bool overwrite_y, const I num_rows, const I row_offsets[], const I column_indices[], const T1 values[], const T2 alpha, const T3 x[], I row_carry_out[], T3 value_carry_out[], T3 y[]) { const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets const I num_nonzeros = row_offsets[num_rows]; int num_threads = omp_get_num_threads(); CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices) I num_merge_items = num_rows + num_nonzeros; // Merge path total length I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread if(overwrite_y){ #pragma omp for schedule(static) for(I i=0;i<num_rows;i++){ y[i] = 0; } } // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // Consume merge items, whole rows first T3 running_total = 0.0; for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { for (; thread_coord.y < row_end_offsets[thread_coord.x]; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; y[thread_coord.x] += alpha*running_total; running_total = 0.0; } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } // Carry-out fix-up (rows spanning multiple threads) #pragma omp single { for (int tid = 0; tid < num_threads - 1; ++tid) if (row_carry_out[tid] < num_rows) y[row_carry_out[tid]] += alpha*value_carry_out[tid]; } #pragma omp barrier } #endif
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 64; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[8][8][4]; __m128 _v5_25 = _mm_set1_ps(5.25f); __m128 _vm4_25 = _mm_set1_ps(-4.25f); __m128 _vm1_25 = _mm_set1_ps(-1.25f); __m128 _v0_25 = _mm_set1_ps(0.25f); __m128 _vm2_5 = _mm_set1_ps(-2.5f); __m128 _v0_5 = _mm_set1_ps(0.5f); __m128 _v2 = _mm_set1_ps(2.f); __m128 _v4 = _mm_set1_ps(4.f); // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _r06 = _mm_load_ps(r0 + 4 * 6); __m128 _r07 = _mm_load_ps(r0 + 4 * 7); __m128 _tmp0m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r04, _r02), _mm_sub_ps(_r00, _r06)); __m128 _tmp7m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r03, _r05), _mm_sub_ps(_r07, _r01)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[7][m], _tmp7m); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _r04, _mm_add_ps(_r02, _r06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _r03, _mm_add_ps(_r01, _r05)); __m128 _tmp1m = _mm_add_ps(_tmp12a, _tmp12b); __m128 _tmp2m = _mm_sub_ps(_tmp12a, _tmp12b); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _r04, _mm_comp_fmadd_ps(_v0_25, _r02, _r06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v0_5))); __m128 _tmp3m = _mm_add_ps(_tmp34a, _tmp34b); __m128 _tmp4m = _mm_sub_ps(_tmp34a, _tmp34b); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _r04, _r02), _r06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v2))); __m128 _tmp5m = _mm_add_ps(_tmp56a, _tmp56b); __m128 _tmp6m = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(tmp[5][m], _tmp5m); _mm_store_ps(tmp[6][m], _tmp6m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp04, _tmp02), _mm_sub_ps(_tmp00, _tmp06)); __m128 _r0tm7 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp03, _tmp05), _mm_sub_ps(_tmp07, _tmp01)); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _tmp04, _mm_add_ps(_tmp02, _tmp06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _tmp03, _mm_add_ps(_tmp01, _tmp05)); __m128 _r0tm1 = _mm_add_ps(_tmp12a, _tmp12b); __m128 _r0tm2 = _mm_sub_ps(_tmp12a, _tmp12b); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _tmp04, _mm_comp_fmadd_ps(_v0_25, _tmp02, _tmp06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v0_5))); __m128 _r0tm3 = _mm_add_ps(_tmp34a, _tmp34b); __m128 _r0tm4 = _mm_sub_ps(_tmp34a, _tmp34b); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _tmp04, _tmp02), _tmp06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v2))); __m128 _r0tm5 = _mm_add_ps(_tmp56a, _tmp56b); __m128 _r0tm6 = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); _mm_store_ps(r0_tm_6, _r0tm6); _mm_store_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x12 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); __m128 _r8 = _mm_load_ps(r0 + 4 * 8); __m128 _r9 = _mm_load_ps(r0 + 4 * 9); __m128 _ra = _mm_load_ps(r0 + 4 * 10); __m128 _rb = _mm_load_ps(r0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r1); _mm_store_ps(tmpptr + 4 * 3, _r5); _mm_store_ps(tmpptr + 4 * 4, _r2); _mm_store_ps(tmpptr + 4 * 5, _r6); _mm_store_ps(tmpptr + 4 * 6, _r3); _mm_store_ps(tmpptr + 4 * 7, _r7); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); _mm_store_ps(tmpptr + 4 * 2, _r2); _mm_store_ps(tmpptr + 4 * 3, _r3); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1); __m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1); _mm_store_ps(tmpptr, _r01_0); _mm_store_ps(tmpptr + 4, _r01_1); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { __m128 _val = _mm_load_ps(r0); _mm_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); __m128 _sum8 = _mm_setzero_ps(); __m128 _sum9 = _mm_setzero_ps(); __m128 _suma = _mm_setzero_ps(); __m128 _sumb = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); __m128 _val8 = _mm_load1_ps(r0 + 8); __m128 _val9 = _mm_load1_ps(r0 + 9); __m128 _vala = _mm_load1_ps(r0 + 10); __m128 _valb = _mm_load1_ps(r0 + 11); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); _sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9); _suma = _mm_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); _mm_store_ps(output0_tm + 4 * 8, _sum8); _mm_store_ps(output0_tm + 4 * 9, _sum9); _mm_store_ps(output0_tm + 4 * 10, _suma); _mm_store_ps(output0_tm + 4 * 11, _sumb); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); r0 += 1; k0 += 4; } _mm_store_ps(output0_tm, _sum); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; __m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][8][4]; __m128 _v32 = _mm_set1_ps(32.f); __m128 _v16 = _mm_set1_ps(16.f); __m128 _v8 = _mm_set1_ps(8.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _v2 = _mm_set1_ps(2.f); // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float* output0 = out0.row<float>(i * 6) + (j * 6) * 4; // TODO msa optimize for (int m = 0; m < 8; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _out0tm6 = _mm_load_ps(output0_tm_6); __m128 _out0tm7 = _mm_load_ps(output0_tm_7); __m128 _tmp024a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp135a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp024b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp135b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp024c = _mm_add_ps(_out0tm5, _out0tm6); __m128 _tmp135c = _mm_sub_ps(_out0tm5, _out0tm6); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp1m = _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a)); __m128 _tmp3m = _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a)); __m128 _tmp5m = _mm_add_ps(_mm_add_ps(_out0tm7, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c)); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _tmp024a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp135a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp024b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp135b = _mm_sub_ps(_tmp03, _tmp04); __m128 _tmp024c = _mm_add_ps(_tmp05, _tmp06); __m128 _tmp135c = _mm_sub_ps(_tmp05, _tmp06); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b))); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a))); __m128 _out04 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 4, _out04); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a))); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a))); __m128 _out05 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp07, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c))); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 3, _out03); _mm_store_ps(output0 + 4 * 5, _out05); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][6][4]; __m128 _vm5 = _mm_set1_ps(-5.f); __m128 _vm4 = _mm_set1_ps(-4.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _vm2 = _mm_set1_ps(-2.f); __m128 _v2 = _mm_set1_ps(2.f); // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _tmp0m = _mm_comp_fmadd_ps(_vm5, _r02, _mm_comp_fmadd_ps(_v4, _r00, _r04)); __m128 _tmp1m = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_r01, _r02), _mm_add_ps(_r04, _r03)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_r01, _r02), _mm_sub_ps(_r04, _r03)); __m128 _tmp3m = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp5m = _mm_comp_fmadd_ps(_vm5, _r03, _mm_comp_fmadd_ps(_v4, _r01, _r05)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); _mm_store_ps(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_vm5, _tmp02, _mm_comp_fmadd_ps(_v4, _tmp00, _tmp04)); __m128 _r0tm1 = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_tmp01, _tmp02), _mm_add_ps(_tmp04, _tmp03)); __m128 _r0tm2 = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_tmp01, _tmp02), _mm_sub_ps(_tmp04, _tmp03)); __m128 _r0tm3 = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm4 = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm5 = _mm_comp_fmadd_ps(_vm5, _tmp03, _mm_comp_fmadd_ps(_v4, _tmp01, _tmp05)); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x12 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); __m128 _r8 = _mm_load_ps(r0 + 4 * 8); __m128 _r9 = _mm_load_ps(r0 + 4 * 9); __m128 _ra = _mm_load_ps(r0 + 4 * 10); __m128 _rb = _mm_load_ps(r0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r1); _mm_store_ps(tmpptr + 4 * 3, _r5); _mm_store_ps(tmpptr + 4 * 4, _r2); _mm_store_ps(tmpptr + 4 * 5, _r6); _mm_store_ps(tmpptr + 4 * 6, _r3); _mm_store_ps(tmpptr + 4 * 7, _r7); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); _mm_store_ps(tmpptr + 4 * 2, _r2); _mm_store_ps(tmpptr + 4 * 3, _r3); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1); __m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1); _mm_store_ps(tmpptr, _r01_0); _mm_store_ps(tmpptr + 4, _r01_1); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { __m128 _val = _mm_load_ps(r0); _mm_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); __m128 _sum8 = _mm_setzero_ps(); __m128 _sum9 = _mm_setzero_ps(); __m128 _suma = _mm_setzero_ps(); __m128 _sumb = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); __m128 _val8 = _mm_load1_ps(r0 + 8); __m128 _val9 = _mm_load1_ps(r0 + 9); __m128 _vala = _mm_load1_ps(r0 + 10); __m128 _valb = _mm_load1_ps(r0 + 11); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); _sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9); _suma = _mm_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); _mm_store_ps(output0_tm + 4 * 8, _sum8); _mm_store_ps(output0_tm + 4 * 9, _sum9); _mm_store_ps(output0_tm + 4 * 10, _suma); _mm_store_ps(output0_tm + 4 * 11, _sumb); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row<const float>(r); int nn = inch * 4; // inch always > 0 __m128 _sum = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); r0 += 1; k0 += 4; } _mm_store_ps(output0_tm, _sum); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; __m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[4][6][4]; __m128 _v2 = _mm_set1_ps(2.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _v8 = _mm_set1_ps(8.f); // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float* output0 = out0.row<float>(i * 4) + (j * 4) * 4; // TODO msa optimize for (int m = 0; m < 6; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _tmp02a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp13a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp02b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp13b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp02a), _tmp02b); __m128 _tmp1m = _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a); __m128 _tmp3m = _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_out0tm5, _tmp13a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp02a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp13a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp02b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp13b = _mm_sub_ps(_tmp03, _tmp04); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp02a), _tmp02b)); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a)); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a)); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_tmp05, _tmp13a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 3, _out03); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
matrix.c
#include "matrix.h" #include "utils.h" #include <assert.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <string.h> matrix_t *matrix_create(int rows, int cols) { matrix_t *ret = NULL; double *bloco = NULL; int i = 0; ret = (matrix_t *)malloc(sizeof(matrix_t)); bloco = (double *)malloc(sizeof(double) * rows * cols); ret->data = (double **)malloc(sizeof(double *) * rows); ret->cols = cols; ret->rows = rows; for (i = 0; i < rows; i++) { ret->data[i] = &bloco[i * cols]; } return ret; } void matrix_destroy(matrix_t *m) { int i; free(m->data[0]); for (i = 0; i < m->rows; i++) { m->data[i] = NULL; } free(m->data); m->data = NULL; m->rows = 0; m->cols = 0; free(m); m = NULL; return; } void matrix_randfill(matrix_t *m) { int i, j; for (i = 0; i < m->rows; i++) { for (j = 0; j < m->cols; j++) { m->data[i][j] = random(); } } } void matrix_fill(matrix_t *m, double val) { int i, j; for (i = 0; i < m->rows; i++) { for (j = 0; j < m->cols; j++) { m->data[i][j] = val; } } } matrix_t *matrix_multiply_threaded(matrix_t *A, matrix_t *B, matrix_t *ret, int num_threads) { // Checar se a multiplicação é possível if (A->cols != B->rows) { printf("Matrizes de formato incompativel\n"); return NULL; } int i, j, k; int newRows = A->rows; int newCols = B->cols; omp_set_num_threads(num_threads); #pragma omp parallel for private(j, k) schedule(static) for (i = 0; i < newRows; i++) { for (j = 0; j < newCols; j++) { double sum = 0; for (k = 0; k < A->rows; k++) { sum += A->data[i][k] * B->data[k][j]; } ret->data[i][j] = sum; } } return ret; } matrix_t *matrix_multiply(matrix_t *A, matrix_t *B, matrix_t *ret) { // Checar se a multiplicação é possível if (A->cols != B->rows) { printf("Matrizes de formato incompativel\n"); return NULL; } int i, j, k; int newRows = A->rows; int newCols = B->cols; for (i = 0; i < newRows; i++) { for (j = 0; j < newCols; j++) { double sum = 0; for (k = 0; k < A->rows; k++) { sum += A->data[i][k] * B->data[k][j]; } ret->data[i][j] = sum; } } return ret; } matrix_t *matrix_sum_threaded(matrix_t *A, matrix_t *B, matrix_t *ret, int num_threads) { // Checar se a soma é possível if (A->rows != B->rows || A->cols != B->cols) { printf("Matrizes de formato incompativel\n"); return NULL; } int i; int newRows = A->rows; int newCols = A->cols; omp_set_num_threads(num_threads); #pragma omp parallel for for (i = 0; i < newCols * newRows; i++) { ret->data[0][i] = A->data[0][i] + B->data[0][i]; } return ret; } matrix_t *matrix_sum(matrix_t *A, matrix_t *B, matrix_t *ret) { // Checar se a soma é possível if (A->rows != B->rows || A->cols != B->cols) { printf("Matrizes de formato incompativel\n"); return NULL; } int i; int newRows = A->rows; int newCols = A->cols; for (i = 0; i < newCols * newRows; i++) { ret->data[0][i] = A->data[0][i] + B->data[0][i]; } return ret; } matrix_t *matrix_sort_threaded(matrix_t *A, matrix_t *ret, int num_threads) { merge_sort_threaded(ret->data[0], A->rows * A->cols, num_threads); return ret; } matrix_t *matrix_sort(matrix_t *A, matrix_t *ret) { merge_sort(ret->data[0], A->rows * A->cols); return ret; } void matrix_print(matrix_t *m) { int i, j; for (i = 0; i < m->rows; i++) { for (j = 0; j < m->cols; j++) { printf("%.17f ", m->data[i][j]); } printf("\n"); } fflush(stdout); }
gi_extrema_region_builder.h
/* * * Copyright (C) 2018 Attila Gyulassy <[email protected]> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef EXTREMAL_REGION_BUILDER #define EXTREMAL_REGION_BUILDER #include <set> #include <queue> #include <vector> #include <stack> #include <unordered_map> #include "gi_basic_types.h" #include "gi_vectors.h" #include "gi_labeling.h" //#include "gi_regular_grid.h" //#include "gi_regular_grid_trilinear_function.h" #include "gi_timing.h" #include "gi_union_find_labeling.h" //#include "gi_topological_regular_grid.h" #include "gi_array_index_partition.h" #include "omp.h" //#include "gi_numeric_integrator_expanding_region_stop_filtered2.h" //#define OUTPUTINTERMEDIATE namespace GInt { template<class FUNC_TYPE, class MESH_TYPE> class UFMergeGraph { protected: FUNC_TYPE* mFunc; MESH_TYPE* mMesh; struct ExtremumSet { INDEX_TYPE extremum_cell_index; INT_TYPE representative_list_id; typename FUNC_TYPE::DType extremum_fval; ExtremumSet(INDEX_TYPE gid, INT_TYPE lid, typename FUNC_TYPE::DType val) : extremum_cell_index(gid), representative_list_id(lid), extremum_fval(val) {} void printme() const { printf("extremum_cell_index=%llu, rep=%d, val=%f\n", extremum_cell_index, representative_list_id, extremum_fval); } }; struct MergeArc { INDEX_TYPE saddle_cell_index; INT_TYPE extremum1_list_id; INT_TYPE extremum2_list_id; typename FUNC_TYPE::DType merge_persistence; typename FUNC_TYPE::DType merge_saddle_value; void printme() const { printf("saddle_cell_index=%llu, e1=%d, e2=%d, p=%f, s=%f\n", saddle_cell_index, extremum1_list_id, extremum2_list_id, merge_persistence, merge_saddle_value); } bool operator()(const MergeArc& _Left, const MergeArc& _Right) { if (_Left.merge_persistence < _Right.merge_persistence) return false; if (_Left.merge_persistence > _Right.merge_persistence) return true; return _Left.saddle_cell_index > _Right.saddle_cell_index; } }; int countcancels; public: std::unordered_map<INDEX_TYPE, INT_TYPE> mCellIndexToListIdMap; std::vector<ExtremumSet> mExtrema; std::priority_queue<MergeArc, std::vector< MergeArc>, MergeArc > mMergesToCancel; void MakeSet(INDEX_TYPE gid) { //printf("%llu gid\n", gid); INT_TYPE lid = mExtrema.size(); //printf("%d lid\n", lid); mExtrema.push_back(ExtremumSet{ gid, lid, mFunc->cellValue(gid) }); //printf("extrema size %d\n", mExtrema.size()); //printf("value = %f\n", mExtrema[mExtrema.size()-1].extremum_fval); mCellIndexToListIdMap[gid] = lid; //printf("idmap size %d\n", mCellIndexToListIdMap.size()); } INT_TYPE FindRepresentative(INT_TYPE lid) { ExtremumSet& e = mExtrema[lid]; //printf("%d->%d\n", lid, e.representative_list_id); if (e.representative_list_id == lid) return lid; INT_TYPE i = FindRepresentative(e.representative_list_id); e.representative_list_id = i; return i; } bool mDoMinHierarchy; void MergeByVal(INT_TYPE lid1, INT_TYPE lid2) { ExtremumSet& e1 = mExtrema[lid1]; ExtremumSet& e2 = mExtrema[lid2]; // if reverse order is false, set the lower one to point // to the higher one - so reverse order true used for basins if (mFunc->lessThan(e1.extremum_cell_index, e2.extremum_cell_index) ^ mDoMinHierarchy) { e1.representative_list_id = e2.representative_list_id; //printf("%f -> %f\n", e1.extremum_fval, e2.extremum_fval); } else { e2.representative_list_id = e1.representative_list_id; //printf("%f -> %f\n", e2.extremum_fval, e1.extremum_fval); } } void PerformCancel(MergeArc& m) { if (m.merge_persistence > gThreshold) { #ifdef DEBUGGSERB printf("threshold too great %f %f\n", m.merge_persistence, gThreshold); #endif return; } //printf("e1=%d, e2=%d\n", m.extremum1_list_id, m.extremum2_list_id); INT_TYPE current_extrep1_lid = FindRepresentative(m.extremum1_list_id); INT_TYPE current_extrep2_lid = FindRepresentative(m.extremum2_list_id); //printf("found %d %d\n", current_extrep1_lid, current_extrep2_lid); if (current_extrep1_lid == current_extrep2_lid) return; // do nothing for loops ExtremumSet& current_extrep1 = mExtrema[current_extrep2_lid]; ExtremumSet& current_extrep2 = mExtrema[current_extrep1_lid]; // if this arc is no longer valid then insert the new arc // if the current_extrep2 is not the same then we need to modify if (m.extremum1_list_id != current_extrep1_lid || m.extremum2_list_id != current_extrep2_lid) { // reinsert? typename FUNC_TYPE::DType d1 = abs(m.merge_saddle_value - current_extrep1.extremum_fval); typename FUNC_TYPE::DType d2 = abs(m.merge_saddle_value - current_extrep2.extremum_fval); if (d1 < d2) { if (d1 > gThreshold) { #ifdef DEBUGGSERB printf("thresha too big %f, %f, %f\n", d1, d2, gThreshold); #endif return; } m.merge_persistence = d1; m.extremum1_list_id = current_extrep2_lid; m.extremum2_list_id = current_extrep1_lid; mMergesToCancel.push(m); //struct MergeArc { // INDEX_TYPE saddle_cell_index; // INT_TYPE extremum1_list_id; // INT_TYPE headExtId; // typename FUNC_TYPE::DType merge_persistence; // typename FUNC_TYPE::DType merge_saddle_value; } else { if (d2 > gThreshold) { #ifdef DEBUGGSERB printf("threshb too big %f, %f, %f\n", d1, d2, gThreshold); #endif return; } m.merge_persistence = d2; m.extremum1_list_id = current_extrep1_lid; m.extremum2_list_id = current_extrep2_lid; mMergesToCancel.push(m); } //printf("merged pers %f\n", m.merge_persistence); return; } //if (m.merge_saddle_value == -1) { // printf("pers = %f, sad = %f\n", m.merge_persistence, m.merge_saddle_value); // m.printme(); // current_extrep1.printme(); // current_extrep2.printme(); // printf("frommesh: dim=%d, val=%f, boundary=%d\n", mMesh->dimension(m.saddle_cell_index), mFunc->cellValue(m.saddle_cell_index), mMesh->boundaryValue(m.saddle_cell_index)); // MESH_TYPE::FacetsIterator fit(mMesh); // for (fit.begin(m.saddle_cell_index); fit.valid(); fit.advance()) { // printf(" --> %f\n", mFunc->cellValue(fit.value())); // } //} //return; // otherwise actually do the merge MergeByVal(current_extrep1_lid, current_extrep2_lid); countcancels++; } typename FUNC_TYPE::DType gThreshold; public: enum Direction { MAXIMAL, MINIMAL }; UFMergeGraph(FUNC_TYPE* func, MESH_TYPE* mesh, Direction d ) : mFunc(func), mMesh(mesh) { if (d == MINIMAL) { mDoMinHierarchy = true; } else { mDoMinHierarchy = false; } countcancels = 0; } INT_TYPE NumExtrema() const { return this->mExtrema.size(); } INT_TYPE Representative(INT_TYPE nid) const { return this->mExtrema[nid].representative_list_id; } INDEX_TYPE TopoIndex(INT_TYPE nid) const { return this->mExtrema[nid].extremum_cell_index; } void AddNode(INDEX_TYPE key) { MakeSet(key); } void AddArc(INDEX_TYPE a, INDEX_TYPE b, INDEX_TYPE s) { if (a == b) return; // ignore these // ignore connectors that span different boundary types DIM_TYPE bva = mMesh->boundaryValue(a); DIM_TYPE bvb = mMesh->boundaryValue(b); //if (bvb != bva) return; DIM_TYPE bvs = mMesh->boundaryValue(s); //if (bva != bvs) return; // so this could be a valid cancellation so add the mergearc MergeArc m; m.saddle_cell_index = s; m.extremum1_list_id = mCellIndexToListIdMap[a]; m.extremum2_list_id = mCellIndexToListIdMap[b]; m.merge_saddle_value = mFunc->cellValue(s); //printf("sadval = %f\n", m.merge_saddle_value); // reinsert? typename FUNC_TYPE::DType d1 = mExtrema[m.extremum1_list_id].extremum_fval - m.merge_saddle_value ; typename FUNC_TYPE::DType d2 = mExtrema[m.extremum2_list_id].extremum_fval - m.merge_saddle_value ; if (d1 < 0) d1 *= -1; if (d2 < 0) d2 *= -1; if (d1 < d2) { m.merge_persistence = d1; } else { m.merge_persistence = d2; } // printf("found %f pers d1=%f d2=%f s=%f e1=%f e2=%f\n", m.merge_persistence, d1, d2, m.merge_saddle_value, mExtrema[m.extremum1_list_id].extremum_fval, mExtrema[m.extremum2_list_id].extremum_fval); mMergesToCancel.push(m); } int SimplifyToThreshold(typename FUNC_TYPE::DType threshold) { gThreshold = threshold; ///printf("simplifying to %f\n", threshold); while (!mMergesToCancel.empty()) { MergeArc edge = mMergesToCancel.top(); mMergesToCancel.pop(); //printf("got %d left\n", mMergesToCancel.size()); PerformCancel(edge); } //printf("gothere - siplified now flattening\n"); // flatten UF so all subsequent queries are CONST for (INT_TYPE esid = 0; esid < mExtrema.size(); esid++) { FindRepresentative(esid); } //printf("done!\n"); //printf("performed %d merges\n", countcancels); return countcancels; } }; // we will use FUNC_TYPE to access index comparator template<class MESH_TYPE, class FUNC_TYPE, class GRAD_TYPE> class SimplifiedExtremumGraph { public: // global context GRAD_TYPE* mGrad; MESH_TYPE* mMesh; FUNC_TYPE* mFunc; UFMergeGraph<FUNC_TYPE, MESH_TYPE>* mMinGraph; UFMergeGraph<FUNC_TYPE, MESH_TYPE>* mMaxGraph; bool do_mins; bool do_maxs; INDEX_TYPE rec_td( INDEX_TYPE cellid) const { //printf("a %llu d=%d\n", cellid, mMesh->dimension(cellid)); INDEX_TYPE current = cellid; if (mGrad->getCritical(current)) return current; //printf("b\n"); INDEX_TYPE pair = mGrad->getPair(current); //printf("c pair = %llu, d=%d\n", pair, mMesh->dimension(pair)); if (mGrad->getCritical(pair)) return pair; // should NEVER happen //printf("c.1\n"); typename MESH_TYPE::FacetsIterator facets(mMesh); facets.begin(pair); INDEX_TYPE next = facets.value(); //printf("next = %llu\n", next); if (next == current) { facets.advance(); next = facets.value(); } //printf("next = %llu\n", next); //printf("d\n"); return rec_td(next); } void trace_down_1saddle(INDEX_TYPE start, INDEX_TYPE& min1, INDEX_TYPE& min2) const { typename MESH_TYPE::FacetsIterator facets(mMesh); facets.begin(start); min1 = rec_td(facets.value()); facets.advance(); if (!facets.valid()) { // error printf("should never get here in tarce down 1saddle\n"); min2 = min1; return; } min2 = rec_td(facets.value()); return; } INDEX_TYPE rec_tu(INDEX_TYPE cellid) const { INDEX_TYPE current = cellid; if (mGrad->getCritical(current)) return current; INDEX_TYPE pair = mGrad->getPair(current); if (mGrad->getCritical(pair)) return pair; // should NEVER happen typename MESH_TYPE::CofacetsIterator cofacets(mMesh); cofacets.begin(pair); INDEX_TYPE next = cofacets.value(); if (next == current) { cofacets.advance(); if (!cofacets.valid()) { printf("WHOATHERE\n"); } next = cofacets.value(); } return rec_tu(next); } void trace_up_2saddle(const INDEX_TYPE& start, INDEX_TYPE& max1, INDEX_TYPE& max2) const { if (mMesh->boundaryValue(start) != 0) { max1 = max2 = -1; return; } typename MESH_TYPE::CofacetsIterator cofacets(mMesh); cofacets.begin(start); max1 = rec_tu(cofacets.value()); cofacets.advance(); if (!cofacets.valid()) { // error printf("should never get here in tarce up 2saddle\n"); max2 = max1; return; } max2 = rec_tu(cofacets.value()); return; } public: SimplifiedExtremumGraph(MESH_TYPE* mesh, FUNC_TYPE* func, GRAD_TYPE* grad) : mGrad(grad), mMesh(mesh), mFunc(func){ mMinGraph = new UFMergeGraph < FUNC_TYPE, MESH_TYPE >(func, mesh, UFMergeGraph < FUNC_TYPE, MESH_TYPE >::MINIMAL); mMaxGraph = new UFMergeGraph < FUNC_TYPE, MESH_TYPE >(func, mesh, UFMergeGraph < FUNC_TYPE, MESH_TYPE >::MAXIMAL); } enum EXTGRAPHMODE { NONE, BOTH, MINS, MAXS }; void SetMode(EXTGRAPHMODE m) { switch (m) { case NONE: this->do_maxs = false; this->do_mins = false; return; case BOTH: this->do_maxs = true; this->do_mins = true; return; case MINS: this->do_maxs = false; this->do_mins = true; return; case MAXS: this->do_maxs = true; this->do_mins = false; return; } } void ComputeMinMapFromGradient(typename FUNC_TYPE::DType THRESHOLD) { std::vector<INDEX_TYPE> saddles1; std::vector<INDEX_TYPE> saddles2; std::vector<INDEX_TYPE> topo_index_partition; int num_threads; #pragma omp parallel { // START PARALLEL CONSTRUCTION // divide index space #pragma omp single { num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(mMesh->numCells(), num_threads, topo_index_partition); } #pragma omp barrier // SCAN ALL CELLS COLLECT MINIMA, MAXIMA, 1SADDLES, 2SADDLES int thread_num = omp_get_thread_num(); // in parallel go through and find all 2-saddles std::vector<INDEX_TYPE> lminima; std::vector<INDEX_TYPE> l1saddles; std::vector<INDEX_TYPE> lmaxima; std::vector<INDEX_TYPE> l2saddles; //#pragma omp critical // { // printf("thread %d doing index %d-%d\n", thread_num, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); // } typename MESH_TYPE::AllCellsIterator cellit(mMesh, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); for (cellit.begin(); cellit.valid(); cellit.advance()) { INDEX_TYPE cell_id = cellit.value(); if (mGrad->getCritical(cell_id)) { DIM_TYPE d = mMesh->dimension(cell_id); if (do_mins) { if (d == 0) { lminima.push_back(cell_id); } else if (d == 1) { l1saddles.push_back(cell_id); } } if (do_maxs) { if (d == 2) { l2saddles.push_back(cell_id); } else if (d == 3) { lmaxima.push_back(cell_id); } } } } #pragma omp barrier // COMBINE CRITICAL POINT LISTS #pragma omp critical { //printf("gothere mins %d, saddles %d\n", lminima.size(), l1saddles.size()); for (auto id : lminima) { mMinGraph->AddNode(id); } //printf("hothere\n"); saddles1.insert(saddles1.end(), l1saddles.begin(), l1saddles.end() ); //printf("asdfasdfasdfasdf\n"); } #pragma omp critical { //printf("gothere maxs\n"); for (auto id : lmaxima) { mMaxGraph->AddNode(id); } saddles2.insert(saddles2.end(), l2saddles.begin(), l2saddles.end() ); } #pragma omp barrier int num1s = saddles1.size(); #pragma omp single { //printf("gothere2 \n"); ArrayIndexPartitioner::EvenChunkSplit(num1s, num_threads, topo_index_partition); } #pragma omp barrier thread_num = omp_get_thread_num(); for (int index = topo_index_partition[thread_num]; index < topo_index_partition[thread_num + 1]; index++) { INDEX_TYPE sad1 = saddles1[index]; INDEX_TYPE min1, min2; trace_down_1saddle(sad1, min1, min2); if (min1 != min2) { #pragma omp critical { //printf("adding %llu, %llu, %llu\n", sad1, min1, min2); mMinGraph->AddArc(min1, min2, sad1); //printf("added!\n"); } } } #pragma omp barrier int num2s = saddles2.size(); #pragma omp single { //printf("gothere3 \n"); ArrayIndexPartitioner::EvenChunkSplit(num2s, num_threads, topo_index_partition); } #pragma omp barrier thread_num = omp_get_thread_num(); for (int index = topo_index_partition[thread_num]; index < topo_index_partition[thread_num + 1]; index++) { INDEX_TYPE sad2 = saddles2[index]; INDEX_TYPE max1, max2; trace_up_2saddle(sad2, max1, max2); if (max1 != max2) { #pragma omp critical { mMaxGraph->AddArc(max1, max2, sad2); } } } #pragma omp barrier #pragma omp single { printf("built minmaxgraph:\n mingraph = %d nodes, %d arcs\n maxgraph = %d nodes, %d arcs\n", this->mMinGraph->NumExtrema(), this->mMinGraph->mMergesToCancel.size(), this->mMaxGraph->NumExtrema(), this->mMaxGraph->mMergesToCancel.size()); } #pragma omp single { int numc = mMinGraph->SimplifyToThreshold(THRESHOLD); printf("MinGraph did %d cancellations\n", numc); } #pragma omp single { int numc = mMaxGraph->SimplifyToThreshold(THRESHOLD); printf("MaxGraph did %d cancellations\n", numc); } } // END OMP PARALLEL } // phase 0: (build nodes) // -- gather extrema // -- for each extremum, set itself as the representative_list_id // Phase 1: (build directed graph) // -- for each saddle s, take less extreme node nl, if |v(s) - v(nl)| < t // where t is max simp thresh, if s is "closer" to nl than nl's current saddle // replace nl's current saddle with s // Phase 2: (union-find representatives) //need map<INDEX_TYPE> -> int to find extrema when tracing paths //need struct arc { saddle_value; more_extreme_node_id; } }; //typedef IndexCompareLessThan Comparer; template< class Comparer> class GridExtremalRegionBuilder { protected: DenseLabeling<int>* m_dest_label; RegularGrid3D* m_grid; RegularGridTrilinearFunction* m_func; Vec3i m_xyz; Vec3b m_periodic; inline bool AComesBeforeB(INDEX_TYPE a, INDEX_TYPE b) const { return mCompare->Compare(a, b); } bool IsExtremeVertexIn6Neighborhood(INDEX_TYPE id) const { Vec3l t_neighbors[6]; Vec3l t_coords = m_grid->XYZ3d(id); int t_num_neighbors = m_grid->GatherExistingNeighborsSameBdry6(t_coords, t_neighbors); INDEX_TYPE t_current_lowest = id; for (int i = 0; i < t_num_neighbors; i++) { INDEX_TYPE t_neighbor_vertex = m_grid->Index3d(t_neighbors[i]); if (AComesBeforeB(t_neighbor_vertex, t_current_lowest)) { return false; } } return true; } void Enqueue_Later_Neighbors(Vec3l xyz, std::priority_queue<INDEX_TYPE, std::vector<INDEX_TYPE>, Comparer > &expansion, std::set<INDEX_TYPE>&enqueued_set) { INDEX_TYPE currentVID = m_grid->Index3d(xyz); Vec3l neighbors[6]; int num_neighbors = m_grid->GatherExistingNeighborsSameBdry6(xyz, neighbors); for (int i = 0; i < num_neighbors; i++) { INDEX_TYPE neighborVID = m_grid->Index3d(neighbors[i]); if (m_dest_label->GetLabel(neighborVID) == -1 && AComesBeforeB(currentVID, neighborVID) && enqueued_set.count(neighborVID) == 0) { enqueued_set.insert(neighborVID); expansion.push(neighborVID); } } } // look at neighborhood of currentVID, for each neighbor, if it's "earlier" // check if it has been assigned- if not, then this point is unassigned // if it is assigned, check that each point has same assignment, // if there are no neighbors, return its original label int InspectPriorRegions(INDEX_TYPE currentVID) { INDEX_TYPE neighborVID; int extremal_certain = m_dest_label->GetLabel(currentVID); bool has_extremal = false; Vec3l neighbors[6]; int num_neighbors = m_grid->GatherExistingNeighborsSameBdry6(m_grid->XYZ3d(currentVID), neighbors); for (int i = 0; i < num_neighbors; i++) { INDEX_TYPE neighborVID = m_grid->Index3d(neighbors[i]); if (AComesBeforeB(neighborVID, currentVID)) { if (m_dest_label->GetLabel(neighborVID) == -1) return -1; // if a extremal one is uncertain, we are uncertain if (!has_extremal) { extremal_certain = m_dest_label->GetLabel(neighborVID); has_extremal = true; } else { if (extremal_certain != m_dest_label->GetLabel(neighborVID)) return -1; } } } //if (!has_extremal) { // printf("ERROR should never get here2\n"); // Vec3l coords = m_grid->XYZ3d(currentVID); coords.PrintInt(); // return -1; //} return extremal_certain; } void Expand_Lower_Neighborhood(INDEX_TYPE startid, int start_label) { Vec3l xyz = m_grid->XYZ3d(startid); std::set<INDEX_TYPE> enqueued_set; INDEX_TYPE currentVID = startid; // the natural ordering using the < operator on pairs will give us the highest // element first, simulating region growing from high to low std::priority_queue<INDEX_TYPE, std::vector<INDEX_TYPE>, Comparer > growing_front(*mCompare); enqueued_set.insert(startid); m_dest_label->SetLabel(startid, start_label); Enqueue_Later_Neighbors(xyz, growing_front, enqueued_set); while (!growing_front.empty()) { INDEX_TYPE currid = growing_front.top(); growing_front.pop(); int cellvale = InspectPriorRegions(currid); // find extremals // cellvalue >=0 indicates that there is certainty here, so lets expand if (cellvale >= 0) { m_dest_label->SetLabel(currid, cellvale); Enqueue_Later_Neighbors(m_grid->XYZ3d(currid), growing_front, enqueued_set); } } } Comparer* mCompare; std::vector<INDEX_TYPE> mExtrema; public: GridExtremalRegionBuilder(RegularGridTrilinearFunction* func, RegularGrid3D* grid) : m_xyz(func->GetGrid()->XYZ()), m_periodic(func->GetGrid()->Periodic()), m_func(func), m_grid(grid) { mCompare = new Comparer(func); } DenseLabeling<int>* GetOutputLabels() { return m_dest_label; } RegularGrid3D* GetGrid() { return m_grid; } RegularGridTrilinearFunction* GetFunction() { return m_func; } const std::vector<INDEX_TYPE>& GetExtrema() const { return mExtrema; } void BeginIntegration(bool verbose = false) { ThreadedTimer gtimer(1); gtimer.StartGlobal(); m_dest_label = new DenseLabeling<int>(m_grid->NumElements()); const INDEX_TYPE t_num_vertices = m_grid->NumElements(); #pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { m_dest_label->SetLabel(i, -1); if (IsExtremeVertexIn6Neighborhood(i)) { #pragma omp critical { mExtrema.push_back(i); } } } int num_extrema = mExtrema.size(); printf("Found and expanding %d regions\n", num_extrema); #pragma omp parallel { #pragma omp for schedule(dynamic) nowait for (int m = 0; m < num_extrema; m++) { INDEX_TYPE maximum = mExtrema[m]; Expand_Lower_Neighborhood(maximum, m); } } } }; //#define DEBUGGSERB //typedef IndexCompareLessThan Comparer; template< class Comparer, class GridFuncType, class TopoGridType> class GridSimplifiedExtremalRegionBuilder { protected: DenseLabeling<int>* m_dest_label; RegularGrid3D* m_grid; GridFuncType* m_func; inline bool AComesBeforeB(INDEX_TYPE a, INDEX_TYPE b) const { return mCompare->Compare(a, b); } bool IsExtremeVertexIn6Neighborhood(INDEX_TYPE id) const { Vec3l t_neighbors[6]; Vec3l t_coords = m_grid->XYZ3d(id); int t_num_neighbors = m_grid->GatherExistingNeighborsSameBdry6(t_coords, t_neighbors); INDEX_TYPE t_current_lowest = id; for (int i = 0; i < t_num_neighbors; i++) { INDEX_TYPE t_neighbor_vertex = m_grid->Index3d(t_neighbors[i]); if (AComesBeforeB(t_neighbor_vertex, t_current_lowest)) { return false; } } return true; } void Enqueue_Later_Neighbors(Vec3l xyz, std::priority_queue<INDEX_TYPE, std::vector<INDEX_TYPE>, Comparer > &expansion, std::set<INDEX_TYPE>&enqueued_set) const { INDEX_TYPE currentVID = m_grid->Index3d(xyz); Vec3l neighbors[6]; int num_neighbors = m_grid->GatherExistingNeighborsAll6(xyz, neighbors); // we want to grow towards the middle? for (int i = 0; i < num_neighbors; i++) { INDEX_TYPE neighborVID = m_grid->Index3d(neighbors[i]); if (m_dest_label->GetLabel(neighborVID) == -1 && AComesBeforeB(currentVID, neighborVID) && enqueued_set.count(neighborVID) == 0) { #ifdef DEBUGGSERB testout->SetLabel(neighborVID, 1); #endif enqueued_set.insert(neighborVID); expansion.push(neighborVID); } } } // look at neighborhood of currentVID, for each neighbor, if it's "earlier" // check if it has been assigned- if not, then this point is unassigned // if it is assigned, check that each point has same assignment, // if there are no neighbors, return its original label #ifdef DEBUGGSERB int InspectPriorRegions(INDEX_TYPE currentVID, std::set<INDEX_TYPE>& enqueued_set) #else int InspectPriorRegions(INDEX_TYPE currentVID) #endif { INDEX_TYPE neighborVID; int extremal_certain = m_dest_label->GetLabel(currentVID); bool has_extremal = false; Vec3l neighbors[6]; int num_neighbors = m_grid->GatherExistingNeighborsAll6(m_grid->XYZ3d(currentVID), neighbors); for (int i = 0; i < num_neighbors; i++) { INDEX_TYPE neighborVID = m_grid->Index3d(neighbors[i]); if (AComesBeforeB(neighborVID, currentVID)) { if (m_dest_label->GetLabel(neighborVID) == -1) { #ifdef DEBUGGSERB //printf("%f 's before neighbor %f has -1 label : c%d, n%d\n", // m_func->SampleImage(currentVID), // m_func->SampleImage(neighborVID), // enqueued_set.count(currentVID), // enqueued_set.count(neighborVID)); //auto v1 = m_grid->XYZ3d(currentVID); //auto v2 = m_grid->XYZ3d(neighborVID); //v1.PrintInt(); //v2.PrintInt(); #endif return -1; // if a extremal one is uncertain, we are uncertain } if (!has_extremal) { extremal_certain = m_dest_label->GetLabel(neighborVID); has_extremal = true; } else { if (extremal_certain != m_dest_label->GetLabel(neighborVID)) { return -1; } } } } //if (!has_extremal) { // printf("ERROR should never get here2\n"); // Vec3l coords = m_grid->XYZ3d(currentVID); coords.PrintInt(); // return -1; //} return extremal_certain; } #ifdef DEBUGGSERB DenseLabeling<int>* testout; #endif void Expand_Lower_Neighborhood(const std::vector<INDEX_TYPE>& startids, int start_label) { std::set<INDEX_TYPE> enqueued_set; std::priority_queue<INDEX_TYPE, std::vector<INDEX_TYPE>, Comparer > growing_front(*mCompare); for (auto startid : startids) { //if (! this->IsExtremeVertexIn6Neighborhood(startid)) { // printf("NONMAX START ID\n"); //} // the natural ordering using the < operator on pairs will give us the highest // element first, simulating region growing from high to low enqueued_set.insert(startid); m_dest_label->SetLabel(startid, start_label); #ifdef DEBUGGSERB testout->SetLabel(startid, 2); #endif } #ifdef DEBUGGSERB //int numvalid = 0; //for (INDEX_TYPE i = 0; i < m_grid->NumElements(); i++) { // if (this->IsExtremeVertexIn6Neighborhood(i)) { // if (m_dest_label->GetLabel(i) == -1) { // printf("UNFOUND EXTREMA!! %llu\n", i); // } // numvalid++;// printf("extremum validated: %llu\n", i); // } // else { // //growing_front.push(i); // //enqueued_set.insert(i); // } //} //printf("validated %d extrema\n", numvalid); //while (!growing_front.empty()) { // INDEX_TYPE currid = growing_front.top(); // growing_front.pop(); // int cellvale = InspectPriorRegions(currid, enqueued_set); // if (cellvale >= 0) { // m_dest_label->SetLabel(currid, cellvale); // testout->SetLabel(currid, 2); // } //} //return; #endif for (auto startid : startids) { #ifdef DEBUGGSERB INDEX_TYPE TESTID = InspectPriorRegions(startid, enqueued_set); if (TESTID == -1) printf("WHOATHERE %ll %llu\n", TESTID, startid); #endif Vec3l xyz = m_grid->XYZ3d(startid); Enqueue_Later_Neighbors(xyz, growing_front, enqueued_set); } #ifdef DEBUGGSERB printf("growing front size %d\n", growing_front.size()); #endif while (!growing_front.empty()) { INDEX_TYPE currid = growing_front.top(); growing_front.pop(); //printf("asdf %f\n", m_func->SampleImage(currid)); #ifdef DEBUGGSERB int cellvale = InspectPriorRegions(currid, enqueued_set); #else int cellvale = InspectPriorRegions(currid); #endif // find extremals //continue; // cellvalue >=0 indicates that there is certainty here, so lets expand if (cellvale >= 0) { m_dest_label->SetLabel(currid, cellvale); #ifdef DEBUGGSERB testout->SetLabel(currid, 2); #endif Enqueue_Later_Neighbors(m_grid->XYZ3d(currid), growing_front, enqueued_set); } } } std::unordered_map<INT_TYPE, std::vector<INDEX_TYPE> > mIdToVIndexMap; Comparer* mCompare; //std::vector<INDEX_TYPE> mExtrema; TopoGridType* mMesh; public: GridSimplifiedExtremalRegionBuilder(GridFuncType* func, RegularGrid3D* grid, TopoGridType* tgrid) : m_func(func), m_grid(grid), mMesh(tgrid) { mCompare = new Comparer(func); #ifdef DEBUGGSERB testout = new DenseLabeling<int>(grid->NumElements()); #endif } DenseLabeling<int>* GetOutputLabels() { return m_dest_label; } RegularGrid3D* GetGrid() { return m_grid; } GridFuncType* GetFunction() { return m_func; } const std::vector<INDEX_TYPE>& GetExtrema() const { return this->mExtrema; } const std::unordered_map<INT_TYPE, std::vector<INDEX_TYPE> >& GetIdMap() { return mIdToVIndexMap; } // the extremum map takes in the GID of a VERTEX (actually any cell but it gets the vertex number by just // rounding down. to get the right vertex in a cell, pass in a map which uses max labeling to replace each // cell with its highest vertex. void BeginIntegration(const std::unordered_map<INDEX_TYPE, INT_TYPE>& extremaGIDtoLabelMap, bool verbose = false) { #ifdef DEBUGGSERB printf("gothere1 %d\n", extremaGIDtoLabelMap.size()); mCompare->PrintRule(); testout->SetAll(0); #endif ThreadedTimer gtimer(1); gtimer.StartGlobal(); m_dest_label = new DenseLabeling<int>(m_grid->NumElements()); mIdToVIndexMap.clear(); const INDEX_TYPE t_num_vertices = m_grid->NumElements(); m_dest_label->SetAll(-1); printf("starting expansion\n"); //#pragma omp parallel for // for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { // m_dest_label->SetLabel(i, -1); // // if (IsExtremeVertexIn6Neighborhood(i)) { //#pragma omp critical // { // mExtrema.push_back(i); // } // } // } #ifdef DEBUGGSERB printf("gothere2\n"); #endif std::vector<INT_TYPE> uniqueids; for (auto p : extremaGIDtoLabelMap) { if (mIdToVIndexMap.count(p.second) == 0) uniqueids.push_back(p.second); mIdToVIndexMap[p.second].push_back(mMesh->VertexNumberFromCellID(p.first)); } #ifdef DEBUGGSERB for (auto vv : mIdToVIndexMap) { printf("%d: %d ids\n", vv.first, vv.second.size()); } #endif //for (auto m : mExtrema) { // INDEX_TYPE tgridid = mMesh->CellIDFromVertexNumber(m); // if (extremaGIDtoLabelMap.count(tgridid) == 0) { // printf("WHOA THERE EXTMAP DOES NOT CONTAIN CP\n"); // } //} int num_unique = uniqueids.size(); printf("Found and expanding %d regions\n", num_unique); #pragma omp parallel { #pragma omp for schedule(dynamic) nowait for (int m = 0; m < num_unique; m++) { Expand_Lower_Neighborhood(mIdToVIndexMap[uniqueids[m]], m); } } #ifdef DEBUGGSERB printf("gothere3\n"); testout->OutputToIntFile("testoutasdfasdfasdf.raw"); #endif } }; } #endif
stereo_costs.h
#ifndef RECONSTRUCTION_BASE_STEREO_COSTS_ #define RECONSTRUCTION_BASE_STEREO_COSTS_ #include <iostream> #include <unordered_map> #include <opencv2/core/core.hpp> #include <Eigen/Core> #include "../../core/types.h" namespace recon { typedef Eigen::Matrix<uint32_t, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixCensusCosts; namespace StereoCosts { void calcPatchMeans(const cv::Mat& img, cv::Mat& means, int wsz); template<typename T> uint8_t hamming_dist(T x, T y); uint32_t get_cost_SAD(const cv::Mat& left_img, const cv::Mat& right_img, int wsz, int cx, int cy, int d); float get_cost_ZSAD(const cv::Mat& left_img, const cv::Mat& right_img, const cv::Mat& left_means, const cv::Mat& right_means, int wsz, int cx, int cy, int d); double get_cost_NCC(const core::DescriptorNCC& d1, const core::DescriptorNCC& d2); void census_transform(const cv::Mat& img, int wsz, cv::Mat& census); uint32_t census_transform_point(const core::Point& pt, const cv::Mat& img, int wsz); template<typename T> void compute_ncc_descriptor(const cv::Mat& img, const core::Point& feat, const int window_sz, const int cv_type, core::DescriptorNCC& desc); template<typename T> void compute_ncc_descriptor(const cv::Mat& img, const int cx, const int cy, const int window_sz, const int cv_type, core::DescriptorNCC& desc); void compute_image_ncc_descriptors(const cv::Mat& img, int window_sz, std::vector<core::DescriptorNCC>& desciptors); } // end namespace: StereoCosts template<typename T> inline void StereoCosts::compute_ncc_descriptor(const cv::Mat& img, const int cx, const int cy, const int window_sz, const int cv_type, core::DescriptorNCC& desc) { int desc_sz = window_sz * window_sz; int margin_sz = (window_sz - 1) / 2; //desc.vec.create(desc_sz, 1, CV_8U); //desc.vec.create(desc_sz, 1, CV_32F); desc.vec.create(desc_sz, 1, cv_type); desc.A = 0.0; desc.B = 0.0; desc.C = 0.0; assert(cx >= margin_sz && cx < (img.cols - margin_sz)); assert(cy >= margin_sz && cy < (img.rows - margin_sz)); int vpos = 0; for(int y = cy - margin_sz; y <= cy + margin_sz; y++) { for(int x = cx - margin_sz; x <= cx + margin_sz; x++) { T val = img.at<T>(y,x); desc.vec.at<T>(vpos) = val; double dval = static_cast<double>(val); desc.A += dval; desc.B += dval*dval; vpos++; } } // var - variance * N^2 double var = std::sqrt((desc_sz * desc.B) - (desc.A * desc.A)); // C = N^2 / variance if (var > 0.0) desc.C = 1.0 / var; // if variance equals 0 set negative C else desc.C = -1.0; assert(!std::isnan(desc.C)); assert(!std::isinf(desc.C)); } template<typename T> inline void StereoCosts::compute_ncc_descriptor(const cv::Mat& img, const core::Point& feat, const int window_sz, const int cv_type, core::DescriptorNCC& desc) { int cx = static_cast<int>(feat.x_); int cy = static_cast<int>(feat.y_); compute_ncc_descriptor<T>(img, cx, cy, window_sz, cv_type, desc); } inline void StereoCosts::compute_image_ncc_descriptors(const cv::Mat& img, int window_sz, std::vector<core::DescriptorNCC>& desciptors) { int margin_sz = (window_sz-1) / 2; int width = img.cols - 2*margin_sz; int height = img.rows - 2*margin_sz; int num_of_desc = width * height; desciptors.resize(num_of_desc); #pragma omp parallel for for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { core::Point pt; pt.x_ = x + margin_sz; pt.y_ = y + margin_sz; compute_ncc_descriptor<uint8_t>(img, pt, window_sz, CV_8U, desciptors[y*width + x]); } } } template<typename T> inline uint8_t StereoCosts::hamming_dist(T x, T y) { uint8_t dist = 0; T val = x ^ y; // XOR // Count the number of set bits while(val) { ++dist; val &= val - 1; } return dist; } inline uint32_t StereoCosts::get_cost_SAD(const cv::Mat& left_img, const cv::Mat& right_img, int wsz, int cx, int cy, int d) { int ssz = (wsz-1) / 2; int SAD = 0; for(int y = (cy - ssz); y <= (cy + ssz); y++) { for(int x = (cx - ssz); x <= (cx + ssz); x++) { int idiff = static_cast<int>(left_img.at<uint8_t>(y,x) - right_img.at<uint8_t>(y,x-d)); SAD += std::abs(idiff); } } //std::cout << "SAD = " << sad << "\n"; return SAD; } inline float StereoCosts::get_cost_ZSAD(const cv::Mat& left_img, const cv::Mat& right_img, const cv::Mat& left_means, const cv::Mat& right_means, int wsz, int cx, int cy, int d) { assert(left_img.rows == (left_means.rows + (wsz-1))); int ssz = (wsz-1) / 2; float zsad = 0.0f; int cx2 = cx - ssz; int cy2 = cy - ssz; float mean_diff = left_means.at<float>(cy2,cx2) - right_means.at<float>(cy2,cx2-d); for(int y = (cy - ssz); y <= (cy + ssz); y++) { for(int x = (cx - ssz); x <= (cx + ssz); x++) { float idiff = left_img.at<uint8_t>(y,x) - right_img.at<uint8_t>(y,x-d); zsad += std::abs(idiff - mean_diff); } } //std::cout << "ZSAD = " << zsad << "\n"; return zsad; } inline double StereoCosts::get_cost_NCC(const core::DescriptorNCC& d1, const core::DescriptorNCC& d2) { assert(d1.vec.rows == d2.vec.rows); if (d1.C < 0.0 || d2.C < 0.0) return -1.0; double n = d1.vec.rows; double D = d1.vec.dot(d2.vec); double ncc = (n * D - (d1.A * d2.A)) * d1.C * d2.C; //if(std::isnan(ncc) || std::isinf(ncc)) { // printf("TRUE! NCC = %f\n", ncc); // printf("D = %f\n", D); // ncc = 0.0; // throw 1; //} return ncc; } } #endif
GB_unaryop__minv_int8_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_fp32 // op(A') function: GB_tran__minv_int8_fp32 // C type: int8_t // A type: float // cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8) // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ float #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ int8_t z ; GB_CAST_SIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_fp32 ( int8_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
section.c
// OpenMP Section Example #include <omp.h> #include <stdio.h> #include <stdlib.h> void functionA( void ) { int a = omp_get_thread_num( ); printf( "This is function A! Executed by Core %d\n", a ); } void functionB( void ) { int b = omp_get_thread_num( ); printf( "This is function B! Executed by Core %d\n", b ); } int main( int argc, char** argv ) { int i = 0; #pragma omp parallel private( i ) { i = omp_get_thread_num( ); #pragma omp sections { #pragma omp section functionA( ); #pragma omp section functionB( ); } printf( "All cores again. This one is %d.\n", i ); } return 0; } // End section.c - EWG SDG
atomic_messages.c
// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized int foo() { L1: foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); goto L1; // expected-error {{use of undeclared label 'L1'}} } goto L2; // expected-error {{use of undeclared label 'L2'}} #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint() { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} expected-error@+1 {{unexpected OpenMP clause 'allocate' in directive '#pragma omp atomic'}} #pragma omp atomic read read allocate(a) // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint() { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int updateint() { int a = 0, b = 0; // Test for atomic update #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary operator}} a = b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = b || a; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = a && b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = (float)a + b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = 2 * b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = b + *&a; #pragma omp atomic update *&a = *&a + 2; #pragma omp atomic update a++; #pragma omp atomic ++a; #pragma omp atomic update a--; #pragma omp atomic --a; #pragma omp atomic update a += b; #pragma omp atomic a %= b; #pragma omp atomic update a *= b; #pragma omp atomic a -= b; #pragma omp atomic update a /= b; #pragma omp atomic a &= b; #pragma omp atomic update a ^= b; #pragma omp atomic a |= b; #pragma omp atomic update a <<= b; #pragma omp atomic a >>= b; #pragma omp atomic update a = b + a; #pragma omp atomic a = a * b; #pragma omp atomic update a = b - a; #pragma omp atomic a = a / b; #pragma omp atomic update a = b & a; #pragma omp atomic a = a ^ b; #pragma omp atomic update a = b | a; #pragma omp atomic a = a << b; #pragma omp atomic a = b >> a; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}} #pragma omp atomic update update a /= b; return 0; } int captureint() { int a = 0, b = 0, c = 0; // Test for atomic capture #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected compound statement}} ; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} foo(); #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} a = b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b || a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} b = a = a && b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b + *&a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} { a = b; } #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} {} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b;a = b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b; a = b || a;} #pragma omp atomic capture {b = a; a = a && b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = b + *&a; #pragma omp atomic capture c = *&a = *&a + 2; #pragma omp atomic capture c = a++; #pragma omp atomic capture c = ++a; #pragma omp atomic capture c = a--; #pragma omp atomic capture c = --a; #pragma omp atomic capture c = a += b; #pragma omp atomic capture c = a %= b; #pragma omp atomic capture c = a *= b; #pragma omp atomic capture c = a -= b; #pragma omp atomic capture c = a /= b; #pragma omp atomic capture c = a &= b; #pragma omp atomic capture c = a ^= b; #pragma omp atomic capture c = a |= b; #pragma omp atomic capture c = a <<= b; #pragma omp atomic capture c = a >>= b; #pragma omp atomic capture c = a = b + a; #pragma omp atomic capture c = a = a * b; #pragma omp atomic capture c = a = b - a; #pragma omp atomic capture c = a = a / b; #pragma omp atomic capture c = a = b & a; #pragma omp atomic capture c = a = a ^ b; #pragma omp atomic capture c = a = b | a; #pragma omp atomic capture c = a = a << b; #pragma omp atomic capture c = a = b >> a; #pragma omp atomic capture { c = *&a; *&a = *&a + 2;} #pragma omp atomic capture { *&a = *&a + 2; c = *&a;} #pragma omp atomic capture {c = a; a++;} #pragma omp atomic capture {c = a; (a)++;} #pragma omp atomic capture {++a;c = a;} #pragma omp atomic capture {c = a;a--;} #pragma omp atomic capture {--a;c = a;} #pragma omp atomic capture {c = a; a += b;} #pragma omp atomic capture {c = a; (a) += b;} #pragma omp atomic capture {a %= b; c = a;} #pragma omp atomic capture {c = a; a *= b;} #pragma omp atomic capture {a -= b;c = a;} #pragma omp atomic capture {c = a; a /= b;} #pragma omp atomic capture {a &= b; c = a;} #pragma omp atomic capture {c = a; a ^= b;} #pragma omp atomic capture {a |= b; c = a;} #pragma omp atomic capture {c = a; a <<= b;} #pragma omp atomic capture {a >>= b; c = a;} #pragma omp atomic capture {c = a; a = b + a;} #pragma omp atomic capture {a = a * b; c = a;} #pragma omp atomic capture {c = a; a = b - a;} #pragma omp atomic capture {a = a / b; c = a;} #pragma omp atomic capture {c = a; a = b & a;} #pragma omp atomic capture {a = a ^ b; c = a;} #pragma omp atomic capture {c = a; a = b | a;} #pragma omp atomic capture {a = a << b; c = a;} #pragma omp atomic capture {c = a; a = b >> a;} #pragma omp atomic capture {c = a; a = foo();} // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}} #pragma omp atomic capture capture b = a /= b; return 0; }
GB_unop__identity_uint8_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_int16) // op(A') function: GB (_unop_tran__identity_uint8_int16) // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_int16) ( uint8_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
explicit.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #include "callback.h" #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { #pragma omp atomic x++; #pragma omp barrier print_current_address(); #pragma omp atomic x++; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // master thread explicit barrier // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // master thread implicit barrier at parallel end // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}} // worker thread explicit barrier // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]] // CHECK: {{^}}[[THREAD_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // worker thread implicit barrier at parallel end // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] return 0; }
force.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2004, The GROMACS development team, * check out http://www.gromacs.org for more information. * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <math.h> #include <string.h> #include <assert.h> #include "sysstuff.h" #include "typedefs.h" #include "macros.h" #include "smalloc.h" #include "macros.h" #include "physics.h" #include "force.h" #include "nonbonded.h" #include "names.h" #include "network.h" #include "pbc.h" #include "ns.h" #include "nrnb.h" #include "bondf.h" #include "mshift.h" #include "txtdump.h" #include "coulomb.h" #include "pme.h" #include "mdrun.h" #include "domdec.h" #include "partdec.h" #include "qmmm.h" #include "mpelogging.h" #include "gmx_omp_nthreads.h" void ns(FILE *fp, t_forcerec *fr, rvec x[], matrix box, gmx_groups_t *groups, t_grpopts *opts, gmx_localtop_t *top, t_mdatoms *md, t_commrec *cr, t_nrnb *nrnb, real *lambda, real *dvdlambda, gmx_grppairener_t *grppener, gmx_bool bFillGrid, gmx_bool bDoLongRangeNS) { char *ptr; int nsearch; GMX_MPE_LOG(ev_ns_start); if (!fr->ns.nblist_initialized) { init_neighbor_list(fp, fr, md->homenr); } if (fr->bTwinRange) { fr->nlr = 0; } nsearch = search_neighbours(fp, fr, x, box, top, groups, cr, nrnb, md, lambda, dvdlambda, grppener, bFillGrid, bDoLongRangeNS, TRUE); if (debug) { fprintf(debug, "nsearch = %d\n", nsearch); } /* Check whether we have to do dynamic load balancing */ /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0)) count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr, &(top->idef),opts->ngener); */ if (fr->ns.dump_nl > 0) { dump_nblist(fp, cr, fr, fr->ns.dump_nl); } GMX_MPE_LOG(ev_ns_finish); } static void reduce_thread_forces(int n, rvec *f, tensor vir, real *Vcorr, int efpt_ind, real *dvdl, int nthreads, f_thread_t *f_t) { int t, i; /* This reduction can run over any number of threads */ #pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntBonded)) private(t) schedule(static) for (i = 0; i < n; i++) { for (t = 1; t < nthreads; t++) { rvec_inc(f[i], f_t[t].f[i]); } } for (t = 1; t < nthreads; t++) { *Vcorr += f_t[t].Vcorr; *dvdl += f_t[t].dvdl[efpt_ind]; m_add(vir, f_t[t].vir, vir); } } void do_force_lowlevel(FILE *fplog, gmx_large_int_t step, t_forcerec *fr, t_inputrec *ir, t_idef *idef, t_commrec *cr, t_nrnb *nrnb, gmx_wallcycle_t wcycle, t_mdatoms *md, t_grpopts *opts, rvec x[], history_t *hist, rvec f[], rvec f_longrange[], gmx_enerdata_t *enerd, t_fcdata *fcd, gmx_mtop_t *mtop, gmx_localtop_t *top, gmx_genborn_t *born, t_atomtypes *atype, gmx_bool bBornRadii, matrix box, t_lambda *fepvals, real *lambda, t_graph *graph, t_blocka *excl, rvec mu_tot[], int flags, float *cycles_pme) { int i, j, status; int donb_flags; gmx_bool bDoEpot, bSepDVDL, bSB; int pme_flags; matrix boxs; rvec box_size; real Vsr, Vlr, Vcorr = 0; t_pbc pbc; real dvdgb; char buf[22]; double clam_i, vlam_i; real dvdl_dum[efptNR], dvdl, dvdl_nb[efptNR], lam_i[efptNR]; real dvdlsum; #ifdef GMX_MPI double t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */ #endif #define PRINT_SEPDVDL(s, v, dvdlambda) if (bSepDVDL) {fprintf(fplog, sepdvdlformat, s, v, dvdlambda); } GMX_MPE_LOG(ev_force_start); set_pbc(&pbc, fr->ePBC, box); /* reset free energy components */ for (i = 0; i < efptNR; i++) { dvdl_nb[i] = 0; dvdl_dum[i] = 0; } /* Reset box */ for (i = 0; (i < DIM); i++) { box_size[i] = box[i][i]; } bSepDVDL = (fr->bSepDVDL && do_per_step(step, ir->nstlog)); debug_gmx(); /* do QMMM first if requested */ if (fr->bQMMM) { enerd->term[F_EQM] = calculate_QMMM(cr, x, f, fr, md); } if (bSepDVDL) { fprintf(fplog, "Step %s: non-bonded V and dVdl for node %d:\n", gmx_step_str(step, buf), cr->nodeid); } /* Call the short range functions all in one go. */ GMX_MPE_LOG(ev_do_fnbf_start); #ifdef GMX_MPI /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/ #define TAKETIME FALSE if (TAKETIME) { MPI_Barrier(cr->mpi_comm_mygroup); t0 = MPI_Wtime(); } #endif if (ir->nwall) { /* foreign lambda component for walls */ dvdl = do_walls(ir, fr, box, md, x, f, lambda[efptVDW], enerd->grpp.ener[egLJSR], nrnb); PRINT_SEPDVDL("Walls", 0.0, dvdl); enerd->dvdl_lin[efptVDW] += dvdl; } /* If doing GB, reset dvda and calculate the Born radii */ if (ir->implicit_solvent) { wallcycle_sub_start(wcycle, ewcsNONBONDED); for (i = 0; i < born->nr; i++) { fr->dvda[i] = 0; } if (bBornRadii) { calc_gb_rad(cr, fr, ir, top, atype, x, &(fr->gblist), born, md, nrnb); } wallcycle_sub_stop(wcycle, ewcsNONBONDED); } where(); /* We only do non-bonded calculation with group scheme here, the verlet * calls are done from do_force_cutsVERLET(). */ if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED)) { donb_flags = 0; /* Add short-range interactions */ donb_flags |= GMX_NONBONDED_DO_SR; if (flags & GMX_FORCE_FORCES) { donb_flags |= GMX_NONBONDED_DO_FORCE; } if (flags & GMX_FORCE_ENERGY) { donb_flags |= GMX_NONBONDED_DO_POTENTIAL; } if (flags & GMX_FORCE_DO_LR) { donb_flags |= GMX_NONBONDED_DO_LR; } wallcycle_sub_start(wcycle, ewcsNONBONDED); do_nonbonded(cr, fr, x, f, f_longrange, md, excl, &enerd->grpp, box_size, nrnb, lambda, dvdl_nb, -1, -1, donb_flags); /* If we do foreign lambda and we have soft-core interactions * we have to recalculate the (non-linear) energies contributions. */ if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0) { for (i = 0; i < enerd->n_lambda; i++) { for (j = 0; j < efptNR; j++) { lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]); } reset_foreign_enerdata(enerd); do_nonbonded(cr, fr, x, f, f_longrange, md, excl, &(enerd->foreign_grpp), box_size, nrnb, lam_i, dvdl_dum, -1, -1, (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA); sum_epot(&ir->opts, &(enerd->foreign_grpp), enerd->foreign_term); enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT]; } } wallcycle_sub_stop(wcycle, ewcsNONBONDED); where(); } /* If we are doing GB, calculate bonded forces and apply corrections * to the solvation forces */ /* MRS: Eventually, many need to include free energy contribution here! */ if (ir->implicit_solvent) { wallcycle_sub_start(wcycle, ewcsBONDED); calc_gb_forces(cr, md, born, top, atype, x, f, fr, idef, ir->gb_algorithm, ir->sa_algorithm, nrnb, bBornRadii, &pbc, graph, enerd); wallcycle_sub_stop(wcycle, ewcsBONDED); } #ifdef GMX_MPI if (TAKETIME) { t1 = MPI_Wtime(); fr->t_fnbf += t1-t0; } #endif if (fepvals->sc_alpha != 0) { enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW]; } else { enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW]; } if (fepvals->sc_alpha != 0) /* even though coulomb part is linear, we already added it, beacuse we need to go through the vdw calculation anyway */ { enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL]; } else { enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL]; } Vsr = 0; if (bSepDVDL) { for (i = 0; i < enerd->grpp.nener; i++) { Vsr += (fr->bBHAM ? enerd->grpp.ener[egBHAMSR][i] : enerd->grpp.ener[egLJSR][i]) + enerd->grpp.ener[egCOULSR][i] + enerd->grpp.ener[egGB][i]; } dvdlsum = dvdl_nb[efptVDW] + dvdl_nb[efptCOUL]; PRINT_SEPDVDL("VdW and Coulomb SR particle-p.", Vsr, dvdlsum); } debug_gmx(); GMX_MPE_LOG(ev_do_fnbf_finish); if (debug) { pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS); } /* Shift the coordinates. Must be done before bonded forces and PPPM, * but is also necessary for SHAKE and update, therefore it can NOT * go when no bonded forces have to be evaluated. */ /* Here sometimes we would not need to shift with NBFonly, * but we do so anyhow for consistency of the returned coordinates. */ if (graph) { shift_self(graph, box, x); if (TRICLINIC(box)) { inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes); } else { inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes); } } /* Check whether we need to do bondeds or correct for exclusions */ if (fr->bMolPBC && ((flags & GMX_FORCE_BONDED) || EEL_RF(fr->eeltype) || EEL_FULL(fr->eeltype))) { /* Since all atoms are in the rectangular or triclinic unit-cell, * only single box vector shifts (2 in x) are required. */ set_pbc_dd(&pbc, fr->ePBC, cr->dd, TRUE, box); } debug_gmx(); if (flags & GMX_FORCE_BONDED) { GMX_MPE_LOG(ev_calc_bonds_start); wallcycle_sub_start(wcycle, ewcsBONDED); calc_bonds(fplog, cr->ms, idef, x, hist, f, fr, &pbc, graph, enerd, nrnb, lambda, md, fcd, DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL, atype, born, flags, fr->bSepDVDL && do_per_step(step, ir->nstlog), step); /* Check if we have to determine energy differences * at foreign lambda's. */ if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && idef->ilsort != ilsortNO_FE) { if (idef->ilsort != ilsortFE_SORTED) { gmx_incons("The bonded interactions are not sorted for free energy"); } for (i = 0; i < enerd->n_lambda; i++) { reset_foreign_enerdata(enerd); for (j = 0; j < efptNR; j++) { lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]); } calc_bonds_lambda(fplog, idef, x, fr, &pbc, graph, &(enerd->foreign_grpp), enerd->foreign_term, nrnb, lam_i, md, fcd, DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL); sum_epot(&ir->opts, &(enerd->foreign_grpp), enerd->foreign_term); enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT]; } } debug_gmx(); GMX_MPE_LOG(ev_calc_bonds_finish); wallcycle_sub_stop(wcycle, ewcsBONDED); } where(); *cycles_pme = 0; if (EEL_FULL(fr->eeltype)) { bSB = (ir->nwall == 2); if (bSB) { copy_mat(box, boxs); svmul(ir->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]); box_size[ZZ] *= ir->wall_ewald_zfac; } clear_mat(fr->vir_el_recip); if (fr->bEwald) { Vcorr = 0; dvdl = 0; /* With the Verlet scheme exclusion forces are calculated * in the non-bonded kernel. */ /* The TPI molecule does not have exclusions with the rest * of the system and no intra-molecular PME grid contributions * will be calculated in gmx_pme_calc_energy. */ if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) || ir->ewald_geometry != eewg3D || ir->epsilon_surface != 0) { int nthreads, t; wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION); if (fr->n_tpi > 0) { gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions"); } nthreads = gmx_omp_nthreads_get(emntBonded); #pragma omp parallel for num_threads(nthreads) schedule(static) for (t = 0; t < nthreads; t++) { int s, e, i; rvec *fnv; tensor *vir; real *Vcorrt, *dvdlt; if (t == 0) { fnv = fr->f_novirsum; vir = &fr->vir_el_recip; Vcorrt = &Vcorr; dvdlt = &dvdl; } else { fnv = fr->f_t[t].f; vir = &fr->f_t[t].vir; Vcorrt = &fr->f_t[t].Vcorr; dvdlt = &fr->f_t[t].dvdl[efptCOUL]; for (i = 0; i < fr->natoms_force; i++) { clear_rvec(fnv[i]); } clear_mat(*vir); } *dvdlt = 0; *Vcorrt = ewald_LRcorrection(fplog, fr->excl_load[t], fr->excl_load[t+1], cr, t, fr, md->chargeA, md->nChargePerturbed ? md->chargeB : NULL, ir->cutoff_scheme != ecutsVERLET, excl, x, bSB ? boxs : box, mu_tot, ir->ewald_geometry, ir->epsilon_surface, fnv, *vir, lambda[efptCOUL], dvdlt); } if (nthreads > 1) { reduce_thread_forces(fr->natoms_force, fr->f_novirsum, fr->vir_el_recip, &Vcorr, efptCOUL, &dvdl, nthreads, fr->f_t); } wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION); } if (fr->n_tpi == 0) { Vcorr += ewald_charge_correction(cr, fr, lambda[efptCOUL], box, &dvdl, fr->vir_el_recip); } PRINT_SEPDVDL("Ewald excl./charge/dip. corr.", Vcorr, dvdl); enerd->dvdl_lin[efptCOUL] += dvdl; } status = 0; Vlr = 0; dvdl = 0; switch (fr->eeltype) { case eelPME: case eelPMESWITCH: case eelPMEUSER: case eelPMEUSERSWITCH: case eelP3M_AD: if (cr->duty & DUTY_PME) { assert(fr->n_tpi >= 0); if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED)) { pme_flags = GMX_PME_SPREAD_Q | GMX_PME_SOLVE; if (flags & GMX_FORCE_FORCES) { pme_flags |= GMX_PME_CALC_F; } if (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)) { pme_flags |= GMX_PME_CALC_ENER_VIR; } if (fr->n_tpi > 0) { /* We don't calculate f, but we do want the potential */ pme_flags |= GMX_PME_CALC_POT; } wallcycle_start(wcycle, ewcPMEMESH); status = gmx_pme_do(fr->pmedata, md->start, md->homenr - fr->n_tpi, x, fr->f_novirsum, md->chargeA, md->chargeB, bSB ? boxs : box, cr, DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0, DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0, nrnb, wcycle, fr->vir_el_recip, fr->ewaldcoeff, &Vlr, lambda[efptCOUL], &dvdl, pme_flags); *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH); /* We should try to do as little computation after * this as possible, because parallel PME synchronizes * the nodes, so we want all load imbalance of the rest * of the force calculation to be before the PME call. * DD load balancing is done on the whole time of * the force call (without PME). */ } if (fr->n_tpi > 0) { /* Determine the PME grid energy of the test molecule * with the PME grid potential of the other charges. */ gmx_pme_calc_energy(fr->pmedata, fr->n_tpi, x + md->homenr - fr->n_tpi, md->chargeA + md->homenr - fr->n_tpi, &Vlr); } PRINT_SEPDVDL("PME mesh", Vlr, dvdl); } break; case eelEWALD: Vlr = do_ewald(fplog, FALSE, ir, x, fr->f_novirsum, md->chargeA, md->chargeB, box_size, cr, md->homenr, fr->vir_el_recip, fr->ewaldcoeff, lambda[efptCOUL], &dvdl, fr->ewald_table); PRINT_SEPDVDL("Ewald long-range", Vlr, dvdl); break; default: gmx_fatal(FARGS, "No such electrostatics method implemented %s", eel_names[fr->eeltype]); } if (status != 0) { gmx_fatal(FARGS, "Error %d in long range electrostatics routine %s", status, EELTYPE(fr->eeltype)); } /* Note that with separate PME nodes we get the real energies later */ enerd->dvdl_lin[efptCOUL] += dvdl; enerd->term[F_COUL_RECIP] = Vlr + Vcorr; if (debug) { fprintf(debug, "Vlr = %g, Vcorr = %g, Vlr_corr = %g\n", Vlr, Vcorr, enerd->term[F_COUL_RECIP]); pr_rvecs(debug, 0, "vir_el_recip after corr", fr->vir_el_recip, DIM); pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS); } } else { if (EEL_RF(fr->eeltype)) { /* With the Verlet scheme exclusion forces are calculated * in the non-bonded kernel. */ if (ir->cutoff_scheme != ecutsVERLET && fr->eeltype != eelRF_NEC) { dvdl = 0; enerd->term[F_RF_EXCL] = RF_excl_correction(fplog, fr, graph, md, excl, x, f, fr->fshift, &pbc, lambda[efptCOUL], &dvdl); } enerd->dvdl_lin[efptCOUL] += dvdl; PRINT_SEPDVDL("RF exclusion correction", enerd->term[F_RF_EXCL], dvdl); } } where(); debug_gmx(); if (debug) { print_nrnb(debug, nrnb); } debug_gmx(); #ifdef GMX_MPI if (TAKETIME) { t2 = MPI_Wtime(); MPI_Barrier(cr->mpi_comm_mygroup); t3 = MPI_Wtime(); fr->t_wait += t3-t2; if (fr->timesteps == 11) { fprintf(stderr, "* PP load balancing info: node %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n", cr->nodeid, gmx_step_str(fr->timesteps, buf), 100*fr->t_wait/(fr->t_wait+fr->t_fnbf), (fr->t_fnbf+fr->t_wait)/fr->t_fnbf); } fr->timesteps++; } #endif if (debug) { pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS); } GMX_MPE_LOG(ev_force_finish); } void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd) { int i, n2; for (i = 0; i < F_NRE; i++) { enerd->term[i] = 0; enerd->foreign_term[i] = 0; } for (i = 0; i < efptNR; i++) { enerd->dvdl_lin[i] = 0; enerd->dvdl_nonlin[i] = 0; } n2 = ngener*ngener; if (debug) { fprintf(debug, "Creating %d sized group matrix for energies\n", n2); } enerd->grpp.nener = n2; enerd->foreign_grpp.nener = n2; for (i = 0; (i < egNR); i++) { snew(enerd->grpp.ener[i], n2); snew(enerd->foreign_grpp.ener[i], n2); } if (n_lambda) { enerd->n_lambda = 1 + n_lambda; snew(enerd->enerpart_lambda, enerd->n_lambda); } else { enerd->n_lambda = 0; } } void destroy_enerdata(gmx_enerdata_t *enerd) { int i; for (i = 0; (i < egNR); i++) { sfree(enerd->grpp.ener[i]); } for (i = 0; (i < egNR); i++) { sfree(enerd->foreign_grpp.ener[i]); } if (enerd->n_lambda) { sfree(enerd->enerpart_lambda); } } static real sum_v(int n, real v[]) { real t; int i; t = 0.0; for (i = 0; (i < n); i++) { t = t + v[i]; } return t; } void sum_epot(t_grpopts *opts, gmx_grppairener_t *grpp, real *epot) { int i; /* Accumulate energies */ epot[F_COUL_SR] = sum_v(grpp->nener, grpp->ener[egCOULSR]); epot[F_LJ] = sum_v(grpp->nener, grpp->ener[egLJSR]); epot[F_LJ14] = sum_v(grpp->nener, grpp->ener[egLJ14]); epot[F_COUL14] = sum_v(grpp->nener, grpp->ener[egCOUL14]); epot[F_COUL_LR] = sum_v(grpp->nener, grpp->ener[egCOULLR]); epot[F_LJ_LR] = sum_v(grpp->nener, grpp->ener[egLJLR]); /* We have already added 1-2,1-3, and 1-4 terms to F_GBPOL */ epot[F_GBPOL] += sum_v(grpp->nener, grpp->ener[egGB]); /* lattice part of LR doesnt belong to any group * and has been added earlier */ epot[F_BHAM] = sum_v(grpp->nener, grpp->ener[egBHAMSR]); epot[F_BHAM_LR] = sum_v(grpp->nener, grpp->ener[egBHAMLR]); epot[F_EPOT] = 0; for (i = 0; (i < F_EPOT); i++) { if (i != F_DISRESVIOL && i != F_ORIRESDEV) { epot[F_EPOT] += epot[i]; } } } void sum_dhdl(gmx_enerdata_t *enerd, real *lambda, t_lambda *fepvals) { int i, j, index; double dlam; enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW]; /* include dispersion correction */ enerd->term[F_DVDL] = 0.0; for (i = 0; i < efptNR; i++) { if (fepvals->separate_dvdl[i]) { /* could this be done more readably/compactly? */ switch (i) { case (efptMASS): index = F_DKDL; break; case (efptCOUL): index = F_DVDL_COUL; break; case (efptVDW): index = F_DVDL_VDW; break; case (efptBONDED): index = F_DVDL_BONDED; break; case (efptRESTRAINT): index = F_DVDL_RESTRAINT; break; default: index = F_DVDL; break; } enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i]; if (debug) { fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n", efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]); } } else { enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i]; if (debug) { fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n", efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]); } } } /* Notes on the foreign lambda free energy difference evaluation: * Adding the potential and ekin terms that depend linearly on lambda * as delta lam * dvdl to the energy differences is exact. * For the constraints this is not exact, but we have no other option * without literally changing the lengths and reevaluating the energies at each step. * (try to remedy this post 4.6 - MRS) * For the non-bonded LR term we assume that the soft-core (if present) * no longer affects the energy beyond the short-range cut-off, * which is a very good approximation (except for exotic settings). * (investigate how to overcome this post 4.6 - MRS) */ if (fepvals->separate_dvdl[efptBONDED]) { enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR]; } else { enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR]; } enerd->term[F_DVDL_CONSTR] = 0; for (i = 0; i < fepvals->n_lambda; i++) { /* note we are iterating over fepvals here! For the current lam, dlam = 0 automatically, so we don't need to add anything to the enerd->enerpart_lambda[0] */ /* we don't need to worry about dvdl_lin contributions to dE at current lambda, because the contributions to the current lambda are automatically zeroed */ for (j = 0; j < efptNR; j++) { /* Note that this loop is over all dhdl components, not just the separated ones */ dlam = (fepvals->all_lambda[j][i]-lambda[j]); enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j]; if (debug) { fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n", fepvals->all_lambda[j][i], efpt_names[j], (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]), dlam, enerd->dvdl_lin[j]); } } } } void reset_foreign_enerdata(gmx_enerdata_t *enerd) { int i, j; /* First reset all foreign energy components. Foreign energies always called on neighbor search steps */ for (i = 0; (i < egNR); i++) { for (j = 0; (j < enerd->grpp.nener); j++) { enerd->foreign_grpp.ener[i][j] = 0.0; } } /* potential energy components */ for (i = 0; (i <= F_EPOT); i++) { enerd->foreign_term[i] = 0.0; } } void reset_enerdata(t_grpopts *opts, t_forcerec *fr, gmx_bool bNS, gmx_enerdata_t *enerd, gmx_bool bMaster) { gmx_bool bKeepLR; int i, j; /* First reset all energy components, except for the long range terms * on the master at non neighbor search steps, since the long range * terms have already been summed at the last neighbor search step. */ bKeepLR = (fr->bTwinRange && !bNS); for (i = 0; (i < egNR); i++) { if (!(bKeepLR && bMaster && (i == egCOULLR || i == egLJLR))) { for (j = 0; (j < enerd->grpp.nener); j++) { enerd->grpp.ener[i][j] = 0.0; } } } for (i = 0; i < efptNR; i++) { enerd->dvdl_lin[i] = 0.0; enerd->dvdl_nonlin[i] = 0.0; } /* Normal potential energy components */ for (i = 0; (i <= F_EPOT); i++) { enerd->term[i] = 0.0; } /* Initialize the dVdlambda term with the long range contribution */ /* Initialize the dvdl term with the long range contribution */ enerd->term[F_DVDL] = 0.0; enerd->term[F_DVDL_COUL] = 0.0; enerd->term[F_DVDL_VDW] = 0.0; enerd->term[F_DVDL_BONDED] = 0.0; enerd->term[F_DVDL_RESTRAINT] = 0.0; enerd->term[F_DKDL] = 0.0; if (enerd->n_lambda > 0) { for (i = 0; i < enerd->n_lambda; i++) { enerd->enerpart_lambda[i] = 0.0; } } /* reset foreign energy data - separate function since we also call it elsewhere */ reset_foreign_enerdata(enerd); }
omp4.c
// RUN: mlir-clang %s --function=* -fopenmp -S | FileCheck %s int get(int); void square(double* x, int ss) { int i=7; #pragma omp parallel for private(i) for(i=get(ss); i < 10; i+= 2) { x[i] = i; i++; x[i] = i; } } // CHECK: func @square(%arg0: memref<?xf64>, %arg1: i32) // CHECK-NEXT: %c1 = arith.constant 1 : index // CHECK-NEXT: %c11 = arith.constant 11 : index // CHECK-NEXT: %c2 = arith.constant 2 : index // CHECK-NEXT: %c1_i32 = arith.constant 1 : i32 // CHECK-NEXT: %0 = call @get(%arg1) : (i32) -> i32 // CHECK-NEXT: %1 = arith.index_cast %0 : i32 to index // CHECK-NEXT: %2 = arith.subi %c11, %1 : index // CHECK-NEXT: %3 = arith.divui %2, %c2 : index // CHECK-NEXT: %4 = arith.muli %3, %c2 : index // CHECK-NEXT: %5 = arith.addi %1, %4 : index // CHECK-NEXT: scf.parallel (%arg2) = (%1) to (%5) step (%c2) { // CHECK-NEXT: %6 = arith.index_cast %arg2 : index to i32 // CHECK-NEXT: %7 = arith.sitofp %6 : i32 to f64 // CHECK-NEXT: memref.store %7, %arg0[%arg2] : memref<?xf64> // CHECK-NEXT: %8 = arith.addi %6, %c1_i32 : i32 // CHECK-NEXT: %9 = arith.addi %arg2, %c1 : index // CHECK-NEXT: %10 = arith.sitofp %8 : i32 to f64 // CHECK-NEXT: memref.store %10, %arg0[%9] : memref<?xf64> // CHECK-NEXT: scf.yield // CHECK-NEXT: }
GB_binop__ge_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_uint32 // A.*B function (eWiseMult): GB_AemultB__ge_uint32 // A*D function (colscale): GB_AxD__ge_uint32 // D*A function (rowscale): GB_DxB__ge_uint32 // C+=B function (dense accum): GB_Cdense_accumB__ge_uint32 // C+=b function (dense accum): GB_Cdense_accumb__ge_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_uint32 // C=scalar+B GB_bind1st__ge_uint32 // C=scalar+B' GB_bind1st_tran__ge_uint32 // C=A+scalar GB_bind2nd__ge_uint32 // C=A'+scalar GB_bind2nd_tran__ge_uint32 // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT32 || GxB_NO_GE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ge_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===--------------------------------------------------------------------===// // ExprIterator - Iterators for iterating over Stmt* arrays that contain // only Expr*. This is needed because AST nodes use Stmt* arrays to store // references to children (to be compatible with StmtIterator). //===--------------------------------------------------------------------===// class Stmt; class Expr; class ExprIterator { Stmt** I; public: ExprIterator(Stmt** i) : I(i) {} ExprIterator() : I(nullptr) {} ExprIterator& operator++() { ++I; return *this; } ExprIterator operator-(size_t i) { return I-i; } ExprIterator operator+(size_t i) { return I+i; } Expr* operator[](size_t idx); // FIXME: Verify that this will correctly return a signed distance. signed operator-(const ExprIterator& R) const { return I - R.I; } Expr* operator*() const; Expr* operator->() const; bool operator==(const ExprIterator& R) const { return I == R.I; } bool operator!=(const ExprIterator& R) const { return I != R.I; } bool operator>(const ExprIterator& R) const { return I > R.I; } bool operator>=(const ExprIterator& R) const { return I >= R.I; } }; class ConstExprIterator { const Stmt * const *I; public: ConstExprIterator(const Stmt * const *i) : I(i) {} ConstExprIterator() : I(nullptr) {} ConstExprIterator& operator++() { ++I; return *this; } ConstExprIterator operator+(size_t i) const { return I+i; } ConstExprIterator operator-(size_t i) const { return I-i; } const Expr * operator[](size_t idx) const; signed operator-(const ConstExprIterator& R) const { return I - R.I; } const Expr * operator*() const; const Expr * operator->() const; bool operator==(const ConstExprIterator& R) const { return I == R.I; } bool operator!=(const ConstExprIterator& R) const { return I != R.I; } bool operator>(const ConstExprIterator& R) const { return I > R.I; } bool operator>=(const ConstExprIterator& R) const { return I >= R.I; } }; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void* operator new(size_t bytes) throw() { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void* data) throw() { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void* operator new(size_t bytes, void* mem) throw() { return mem; } void operator delete(void*, const ASTContext&, unsigned) throw() { } void operator delete(void*, const ASTContext*, unsigned) throw() { } void operator delete(void*, size_t) throw() { } void operator delete(void*, void*) throw() { } public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef StmtRange child_range; typedef ConstStmtRange const_child_range; child_range children(); const_child_range children() const { return const_cast<Stmt*>(this)->children(); } child_iterator child_begin() { return children().first; } child_iterator child_end() { return children().second; } const_child_iterator child_begin() const { return children().first; } const_child_iterator child_end() const { return children().second; } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return child_range(Body, Body + CompoundStmtBits.NumStmts); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *)); } Attr *const *getAttrArrayPtr() const { return reinterpret_cast<Attr *const *>(this + 1); } Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr) : VarAndKind(Var, Kind), Loc(Loc) { switch (Kind) { case VCK_This: assert(!Var && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; case VCK_VLAType: assert(!Var && "Variable-length array type capture cannot have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert(capturesVariable() && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() const { return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return const_cast<CapturedStmt *>(this)->getCapturedStmt(); } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return const_cast<CapturedStmt *>(this)->getCapturedDecl(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; capture_init_range capture_inits() const { return capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr **>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
openmp.c
#include<stdlib.h> #include<stdio.h> #include<stdbool.h> #include<math.h> #include "omp.h" #define N 21 bool is_prime_optimized(unsigned long long n) { unsigned long long i; if (n % 2 == 0) return n == 2; for (i = 3; i * i <= n; i+=2) { if (n % i == 0) return false; } return n > 1; } bool is_prime(unsigned long long n) { int i; for (i = 2; i < n; i++) { if (n % i == 0) return false; } return n > 1; } int main(int argc, char * argv[]) { unsigned long long i, j, primes = 0, number, prevNumber; double t1, t2; int num_threads = atoi(argv[1]); omp_set_num_threads(num_threads); t1 = omp_get_wtime(); for (i = 0; i < N; i++) { prevNumber = powl(2, i - 1); number = powl(2, i); #pragma omp parallel for reduction(+:primes) schedule(dynamic) for (j = prevNumber + 1; j <= number; j++) { if (is_prime_optimized(j)) { primes++; } } printf("%llu: %llu\n", number, primes); } t2 = omp_get_wtime(); printf("Done in %lf s\n", t2-t1); }
displacement_lagrangemultiplier_mixed_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierMixedContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierMixedContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierMixedContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierMixedContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; /// The epsilon tolerance definition static constexpr double Tolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierMixedContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false); mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierMixedContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The contact solution mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } //* Copy constructor. DisplacementLagrangeMultiplierMixedContactCriteria( DisplacementLagrangeMultiplierMixedContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) { } /// Destructor. ~DisplacementLagrangeMultiplierMixedContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0, lm_solution_norm = 0.0, lm_increase_norm = 0.0; IndexType disp_dof_num(0),lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType residual_dof_value = 0.0, dof_value = 0.0, dof_incr = 0.0; // Loop over Dofs #pragma omp parallel for reduction(+:disp_residual_solution_norm,lm_solution_norm,lm_increase_norm,disp_dof_num,lm_dof_num,dof_id,residual_dof_value,dof_value,dof_incr) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); if (mActiveDofs[dof_id]) { const auto curr_var = it_dof->GetVariable(); if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; lm_solution_norm += dof_value * dof_value; lm_increase_norm += dof_incr * dof_incr; lm_dof_num++; } else { residual_dof_value = rb[dof_id]; disp_residual_solution_norm += residual_dof_value * residual_dof_value; disp_dof_num++; } } } if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; mDispCurrentResidualNorm = disp_residual_solution_norm; const TDataType lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0; const TDataType lm_abs = std::sqrt(lm_increase_norm)/static_cast<TDataType>(lm_dof_num); TDataType residual_disp_ratio; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm < Tolerance) ? 1.0 : disp_residual_solution_norm; residual_disp_ratio = 1.0; mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; // We calculate the absolute norms TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("MIXED CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "MIXED CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tLAGRANGE MUL: RATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > lm_ratio) ? residual_disp_ratio : lm_ratio; r_process_info[RESIDUAL_NORM] = (lm_abs > mLMAbsTolerance) ? lm_abs : mLMAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance); if ( disp_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Initialize flag mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierMixedContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3, false)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H */
GB_unop__conj_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__conj_fc32_fc32) // op(A') function: GB (_unop_tran__conj_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = conjf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = conjf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = conjf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CONJ || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__conj_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = conjf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = conjf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__conj_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ #if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT #include "MagickCore/threshold-map.h" #else static const char *const BuiltinMap= "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; #endif /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if ((width == 0) || (height == 0)) return(threshold_image); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; const Quantum *magick_restrict p, *magick_restrict pixels; Quantum *magick_restrict q; ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); if (IsStringTrue(GetImageArtifact(image,"auto-threshold:verbose")) != MagickFalse) (void) FormatLocaleFile(stdout,"%.*g%%\n",GetMagickPrecision(),threshold); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { ssize_t i; PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorThresholdImage() forces all pixels in the color range to white % otherwise black. % % The format of the ColorThresholdImage method is: % % MagickBooleanType ColorThresholdImage(Image *image, % const PixelInfo *start_color,const PixelInfo *stop_color, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o start_color, stop_color: define the start and stop color range. Any % pixel within the range returns white otherwise black. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorThresholdImage(Image *image, const PixelInfo *start_color,const PixelInfo *stop_color, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo start, stop; ssize_t y; /* Color threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=AcquireImageColormap(image,2,exception); if (status == MagickFalse) return(status); start=(*start_color); stop=(*stop_color); switch (image->colorspace) { case HCLColorspace: { ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSBColorspace: { ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSLColorspace: { ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSVColorspace: { ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HWBColorspace: { ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case LabColorspace: { ConvertRGBToLab(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } default: { start.red*=QuantumScale; start.green*=QuantumScale; start.blue*=QuantumScale; stop.red*=QuantumScale; stop.green*=QuantumScale; stop.blue*=QuantumScale; break; } } start.red*=QuantumRange; start.green*=QuantumRange; start.blue*=QuantumRange; stop.red*=QuantumRange; stop.green*=QuantumRange; stop.blue*=QuantumRange; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickBooleanType foreground = MagickTrue; ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((q[i] < GetPixelInfoChannel(&start,channel)) || (q[i] > GetPixelInfoChannel(&stop,channel))) foreground=MagickFalse; } SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->colorspace=sRGBColorspace; return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels to dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with an ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { (void) GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { ssize_t i; PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum black threshold value. % % o low_white: Define the minimum white threshold value. % % o high_white: Define the maximum white threshold value. % % o high_black: Define the maximum black threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=(Quantum) 0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=(Quantum) 0; else q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
dmml.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/openmp/blrm/dmml.c * @version 1.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" int starsh_blrm__dmml_omp(STARSH_blrm *matrix, int nrhs, double alpha, double *A, int lda, double beta, double *B, int ldb) //! Multiply blr-matrix by dense matrix. /*! Performs `C=alpha*A*B+beta*C` with @ref STARSH_blrm `A` and dense matrices * `B` and `C`. All the integer types are int, since they are used in BLAS * calls. * * @param[in] matrix: Pointer to @ref STARSH_blrm object. * @param[in] nrhs: Number of right hand sides. * @param[in] alpha: Scalar mutliplier. * @param[in] A: Dense matrix, right havd side. * @param[in] lda: Leading dimension of `A`. * @param[in] beta: Scalar multiplier. * @param[in] B: Resulting dense matrix. * @param[in] ldb: Leading dimension of B. * @return Error code @ref STARSH_ERRNO. * @ingroup blrm * */ { STARSH_blrm *M = matrix; STARSH_blrf *F = M->format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nrows = P->shape[0]; STARSH_int ncols = P->shape[P->ndim-1]; // Shorcuts to information about clusters STARSH_cluster *R = F->row_cluster; STARSH_cluster *C = F->col_cluster; void *RD = R->data, *CD = C->data; // Number of far-field and near-field blocks STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near, bi; char symm = F->symm; int maxrank = 100; int maxnb = nrows/F->nbrows; // Setting B = beta*B if(beta == 0.) #pragma omp parallel for schedule(static) for(int i = 0; i < nrows; i++) for(int j = 0; j < nrhs; j++) B[j*ldb+i] = 0.; else #pragma omp parallel for schedule(static) for(int i = 0; i < nrows; i++) for(int j = 0; j < nrhs; j++) B[j*ldb+i] *= beta; double *temp_D, *temp_B; int num_threads; #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); if(M->onfly == 0) { STARSH_MALLOC(temp_D, num_threads*nrhs*maxrank); } else { STARSH_MALLOC(temp_D, num_threads*maxnb*maxnb); } STARSH_MALLOC(temp_B, num_threads*nrhs*nrows); #pragma omp parallel { double *out = temp_B+omp_get_thread_num()*nrhs*nrows; for(int j = 0; j < nrhs*nrows; j++) out[j] = 0.; } int ldout = nrows; // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_far; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = F->block_far[2*bi]; STARSH_int j = F->block_far[2*bi+1]; // Get sizes and rank int nrows = R->size[i]; int ncols = C->size[j]; int rank = M->far_rank[bi]; // Get pointers to data buffers double *U = M->far_U[bi]->data, *V = M->far_V[bi]->data; int info = 0; double *D = temp_D+omp_get_thread_num()*nrhs*maxrank; double *out = temp_B+omp_get_thread_num()*nrhs*ldout; // Multiply low-rank matrix in U*V^T format by a dense matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs, ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, rank, alpha, U, nrows, D, rank, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Multiply low-rank matrix in V*U^T format by a dense matrix // U and V are simply swapped in case of symmetric block cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs, nrows, 1.0, U, nrows, A+R->start[i], lda, 0.0, D, rank); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, ncols, nrhs, rank, alpha, V, ncols, D, rank, 1.0, out+C->start[j], ldout); } } if(M->onfly == 1) // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_near; bi++) { // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; int info = 0; double *D = temp_D+omp_get_thread_num()*maxnb*maxnb; double *out = temp_B+omp_get_thread_num()*nrhs*ldout; // Fill temporary buffer with elements of corresponding block kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j], RD, CD, D, nrows); // Multiply 2 dense matrices cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Repeat in case of symmetric matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols, nrhs, nrows, alpha, D, nrows, A+R->start[i], lda, 1.0, out+C->start[j], ldout); } } else // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_near; bi++) { // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; // Get pointers to data buffers double *D = M->near_D[bi]->data; double *out = temp_B+omp_get_thread_num()*nrhs*ldout; // Multiply 2 dense matrices cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Repeat in case of symmetric matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols, nrhs, nrows, alpha, D, nrows, A+R->start[i], lda, 1.0, out+C->start[j], ldout); } } #pragma omp parallel for schedule(static) for(int i = 0; i < ldout; i++) for(int j = 0; j < nrhs; j++) for(int k = 0; k < num_threads; k++) B[j*ldb+i] += temp_B[(k*nrhs+j)*ldout+i]; free(temp_B); free(temp_D); return 0; }
ChConstraintRigidRigid.h
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2016 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Hammad Mazhar // ============================================================================= // // Handling of rigid contact and calculation of corrections and Jacobians // // ============================================================================= #pragma once #include "chrono_parallel/ChDataManager.h" #include "chrono_parallel/math/ChParallelMath.h" namespace chrono { /// @addtogroup parallel_constraint /// @{ /// Unilateral (contact) constraints. class CH_PARALLEL_API ChConstraintRigidRigid { public: ChConstraintRigidRigid() { data_manager = 0; offset = 3; inv_h = inv_hpa = inv_hhpa = 0; } ~ChConstraintRigidRigid() {} void Setup(ChParallelDataManager* data_container_) { data_manager = data_container_; uint num_contacts = data_manager->num_rigid_contacts; inv_h = 1 / data_manager->settings.step_size; inv_hpa = 1 / (data_manager->settings.step_size + data_manager->settings.solver.alpha); inv_hhpa = inv_h * inv_hpa; if (num_contacts > 0) { contact_active_pairs.resize(int(num_contacts)); data_manager->host_data.coh_rigid_rigid.resize(num_contacts); data_manager->host_data.fric_rigid_rigid.resize(num_contacts); rotated_point_a.resize(num_contacts); rotated_point_b.resize(num_contacts); quat_a.resize(num_contacts); quat_b.resize(num_contacts); #pragma omp parallel for for (int i = 0; i < (signed)num_contacts; i++) { vec2 body = data_manager->host_data.bids_rigid_rigid[i]; uint b1 = body.x; uint b2 = body.y; contact_active_pairs[i] = bool2(data_manager->host_data.active_rigid[b1] != 0, data_manager->host_data.active_rigid[b2] != 0); real coh = data_manager->composition_strategy->CombineCohesion( data_manager->host_data.cohesion_data[b1], data_manager->host_data.cohesion_data[b2]); data_manager->host_data.coh_rigid_rigid[i] = coh; const real3& f_a = data_manager->host_data.fric_data[b1]; const real3& f_b = data_manager->host_data.fric_data[b2]; real3 mu; mu.x = data_manager->composition_strategy->CombineFriction(f_a.x, f_b.x); // sliding mu.y = data_manager->composition_strategy->CombineFriction(f_a.y, f_b.y); // rolling mu.z = data_manager->composition_strategy->CombineFriction(f_a.z, f_b.z); // spinning data_manager->host_data.fric_rigid_rigid[i] = mu; { quaternion quaternion_conjugate = ~data_manager->host_data.rot_rigid[b1]; real3 sbar = Rotate(data_manager->host_data.cpta_rigid_rigid[i] - data_manager->host_data.pos_rigid[b1], quaternion_conjugate); rotated_point_a[i] = real3_int(sbar, b1); quat_a[i] = quaternion_conjugate; } { quaternion quaternion_conjugate = ~data_manager->host_data.rot_rigid[b2]; real3 sbar = Rotate(data_manager->host_data.cptb_rigid_rigid[i] - data_manager->host_data.pos_rigid[b2], quaternion_conjugate); rotated_point_b[i] = real3_int(sbar, b2); quat_b[i] = quaternion_conjugate; } } } } void Project(real* gamma); void Project_Single(int index, real* gamma); void host_Project_single(int index, vec2* ids, real3* friction, real* cohesion, real* gamma); void func_Project_normal(int index, const vec2* ids, const real* cohesion, real* gam); void func_Project_sliding(int index, const vec2* ids, const real3* fric, const real* cohesion, real* gam); void func_Project_spinning(int index, const vec2* ids, const real3* fric, real* gam); void Dx(const DynamicVector<real>& x, DynamicVector<real>& output); void D_Tx(const DynamicVector<real>& x, DynamicVector<real>& output); /// Compute the vector of corrections. void Build_b(); /// Compute the diagonal compliance matrix. void Build_E(); /// Compute the jacobian matrix, no allocation is performed here, /// GenerateSparsity should take care of that. void Build_D(); void Build_s(); /// Fill-in the non zero entries in the bilateral jacobian with ones. /// This operation is sequential. void GenerateSparsity(); int offset; protected: custom_vector<bool2> contact_active_pairs; real inv_h; ///< reciprocal of time step, 1/h real inv_hpa; ///< 1 / (h+a) real inv_hhpa; ///< 1 / h*(h+a) custom_vector<real3_int> rotated_point_a, rotated_point_b; custom_vector<quaternion> quat_a, quat_b; ChParallelDataManager* data_manager; ///< Pointer to the system's data manager }; /// @} parallel_colision } // end namespace chrono
program_evaluator.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2010, 2011, 2012 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: [email protected] (Keir Mierle) // // The ProgramEvaluator runs the cost functions contained in each residual block // and stores the result into a jacobian. The particular type of jacobian is // abstracted out using two template parameters: // // - An "EvaluatePreparer" that is responsible for creating the array with // pointers to the jacobian blocks where the cost function evaluates to. // - A "JacobianWriter" that is responsible for storing the resulting // jacobian blocks in the passed sparse matrix. // // This abstraction affords an efficient evaluator implementation while still // supporting writing to multiple sparse matrix formats. For example, when the // ProgramEvaluator is parameterized for writing to block sparse matrices, the // residual jacobians are written directly into their final position in the // block sparse matrix by the user's CostFunction; there is no copying. // // The evaluation is threaded with OpenMP. // // The EvaluatePreparer and JacobianWriter interfaces are as follows: // // class EvaluatePreparer { // // Prepare the jacobians array for use as the destination of a call to // // a cost function's evaluate method. // void Prepare(const ResidualBlock* residual_block, // int residual_block_index, // SparseMatrix* jacobian, // double** jacobians); // } // // class JacobianWriter { // // Create a jacobian that this writer can write. Same as // // Evaluator::CreateJacobian. // SparseMatrix* CreateJacobian() const; // // // Create num_threads evaluate preparers. Caller owns result which must // // be freed with delete[]. Resulting preparers are valid while *this is. // EvaluatePreparer* CreateEvaluatePreparers(int num_threads); // // // Write the block jacobians from a residual block evaluation to the // // larger sparse jacobian. // void Write(int residual_id, // int residual_offset, // double** jacobians, // SparseMatrix* jacobian); // } // // Note: The ProgramEvaluator is not thread safe, since internally it maintains // some per-thread scratch space. #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_ #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_ // This include must come before any #ifndef check on Ceres compile options. #include "ceres/internal/port.h" #ifdef CERES_USE_OPENMP #include <omp.h> #endif #include <map> #include <string> #include <vector> #include "ceres/execution_summary.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/residual_block.h" #include "ceres/small_blas.h" namespace ceres { namespace internal { struct NullJacobianFinalizer { void operator()(SparseMatrix* jacobian, int num_parameters) {} }; template<typename EvaluatePreparer, typename JacobianWriter, typename JacobianFinalizer = NullJacobianFinalizer> class ProgramEvaluator : public Evaluator { public: ProgramEvaluator(const Evaluator::Options &options, Program* program) : options_(options), program_(program), jacobian_writer_(options, program), evaluate_preparers_( jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) { #ifndef CERES_USE_OPENMP CHECK_EQ(1, options_.num_threads) << "OpenMP support is not compiled into this binary; " << "only options.num_threads=1 is supported."; #endif BuildResidualLayout(*program, &residual_layout_); evaluate_scratch_.reset(CreateEvaluatorScratch(*program, options.num_threads)); } // Implementation of Evaluator interface. SparseMatrix* CreateJacobian() const { return jacobian_writer_.CreateJacobian(); } bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options, const double* state, double* cost, double* residuals, double* gradient, SparseMatrix* jacobian) { ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_); ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL ? "Evaluator::Residual" : "Evaluator::Jacobian", &execution_summary_); // The parameters are stateful, so set the state before evaluating. if (!program_->StateVectorToParameterBlocks(state)) { return false; } if (residuals != NULL) { VectorRef(residuals, program_->NumResiduals()).setZero(); } if (jacobian != NULL) { jacobian->SetZero(); } // Each thread gets it's own cost and evaluate scratch space. for (int i = 0; i < options_.num_threads; ++i) { evaluate_scratch_[i].cost = 0.0; if (gradient != NULL) { VectorRef(evaluate_scratch_[i].gradient.get(), program_->NumEffectiveParameters()).setZero(); } } // This bool is used to disable the loop if an error is encountered // without breaking out of it. The remaining loop iterations are still run, // but with an empty body, and so will finish quickly. bool abort = false; int num_residual_blocks = program_->NumResidualBlocks(); #pragma omp parallel for num_threads(options_.num_threads) for (int i = 0; i < num_residual_blocks; ++i) { // Disable the loop instead of breaking, as required by OpenMP. #pragma omp flush(abort) if (abort) { continue; } #ifdef CERES_USE_OPENMP int thread_id = omp_get_thread_num(); #else int thread_id = 0; #endif EvaluatePreparer* preparer = &evaluate_preparers_[thread_id]; EvaluateScratch* scratch = &evaluate_scratch_[thread_id]; // Prepare block residuals if requested. const ResidualBlock* residual_block = program_->residual_blocks()[i]; double* block_residuals = NULL; if (residuals != NULL) { block_residuals = residuals + residual_layout_[i]; } else if (gradient != NULL) { block_residuals = scratch->residual_block_residuals.get(); } // Prepare block jacobians if requested. double** block_jacobians = NULL; if (jacobian != NULL || gradient != NULL) { preparer->Prepare(residual_block, i, jacobian, scratch->jacobian_block_ptrs.get()); block_jacobians = scratch->jacobian_block_ptrs.get(); } // Evaluate the cost, residuals, and jacobians. double block_cost; if (!residual_block->Evaluate( evaluate_options.apply_loss_function, &block_cost, block_residuals, block_jacobians, scratch->residual_block_evaluate_scratch.get())) { abort = true; // This ensures that the OpenMP threads have a consistent view of 'abort'. Do // the flush inside the failure case so that there is usually only one // synchronization point per loop iteration instead of two. #pragma omp flush(abort) continue; } scratch->cost += block_cost; // Store the jacobians, if they were requested. if (jacobian != NULL) { jacobian_writer_.Write(i, residual_layout_[i], block_jacobians, jacobian); } // Compute and store the gradient, if it was requested. if (gradient != NULL) { int num_residuals = residual_block->NumResiduals(); int num_parameter_blocks = residual_block->NumParameterBlocks(); for (int j = 0; j < num_parameter_blocks; ++j) { const ParameterBlock* parameter_block = residual_block->parameter_blocks()[j]; if (parameter_block->IsConstant()) { continue; } MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( block_jacobians[j], num_residuals, parameter_block->LocalSize(), block_residuals, scratch->gradient.get() + parameter_block->delta_offset()); } } } if (!abort) { const int num_parameters = program_->NumEffectiveParameters(); // Sum the cost and gradient (if requested) from each thread. (*cost) = 0.0; if (gradient != NULL) { VectorRef(gradient, num_parameters).setZero(); } for (int i = 0; i < options_.num_threads; ++i) { (*cost) += evaluate_scratch_[i].cost; if (gradient != NULL) { VectorRef(gradient, num_parameters) += VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters); } } // Finalize the Jacobian if it is available. // `num_parameters` is passed to the finalizer so that additional // storage can be reserved for additional diagonal elements if // necessary. if (jacobian != NULL) { JacobianFinalizer f; f(jacobian, num_parameters); } } return !abort; } bool Plus(const double* state, const double* delta, double* state_plus_delta) const { return program_->Plus(state, delta, state_plus_delta); } int NumParameters() const { return program_->NumParameters(); } int NumEffectiveParameters() const { return program_->NumEffectiveParameters(); } int NumResiduals() const { return program_->NumResiduals(); } virtual std::map<std::string, int> CallStatistics() const { return execution_summary_.calls(); } virtual std::map<std::string, double> TimeStatistics() const { return execution_summary_.times(); } private: // Per-thread scratch space needed to evaluate and store each residual block. struct EvaluateScratch { void Init(int max_parameters_per_residual_block, int max_scratch_doubles_needed_for_evaluate, int max_residuals_per_residual_block, int num_parameters) { residual_block_evaluate_scratch.reset( new double[max_scratch_doubles_needed_for_evaluate]); gradient.reset(new double[num_parameters]); VectorRef(gradient.get(), num_parameters).setZero(); residual_block_residuals.reset( new double[max_residuals_per_residual_block]); jacobian_block_ptrs.reset( new double*[max_parameters_per_residual_block]); } double cost; scoped_array<double> residual_block_evaluate_scratch; // The gradient in the local parameterization. scoped_array<double> gradient; // Enough space to store the residual for the largest residual block. scoped_array<double> residual_block_residuals; scoped_array<double*> jacobian_block_ptrs; }; static void BuildResidualLayout(const Program& program, std::vector<int>* residual_layout) { const std::vector<ResidualBlock*>& residual_blocks = program.residual_blocks(); residual_layout->resize(program.NumResidualBlocks()); int residual_pos = 0; for (int i = 0; i < residual_blocks.size(); ++i) { const int num_residuals = residual_blocks[i]->NumResiduals(); (*residual_layout)[i] = residual_pos; residual_pos += num_residuals; } } // Create scratch space for each thread evaluating the program. static EvaluateScratch* CreateEvaluatorScratch(const Program& program, int num_threads) { int max_parameters_per_residual_block = program.MaxParametersPerResidualBlock(); int max_scratch_doubles_needed_for_evaluate = program.MaxScratchDoublesNeededForEvaluate(); int max_residuals_per_residual_block = program.MaxResidualsPerResidualBlock(); int num_parameters = program.NumEffectiveParameters(); EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads]; for (int i = 0; i < num_threads; i++) { evaluate_scratch[i].Init(max_parameters_per_residual_block, max_scratch_doubles_needed_for_evaluate, max_residuals_per_residual_block, num_parameters); } return evaluate_scratch; } Evaluator::Options options_; Program* program_; JacobianWriter jacobian_writer_; scoped_array<EvaluatePreparer> evaluate_preparers_; scoped_array<EvaluateScratch> evaluate_scratch_; std::vector<int> residual_layout_; ::ceres::internal::ExecutionSummary execution_summary_; }; } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; // A context can be nested in both a discarded statement context and // an immediate function context, so they need to be tracked independently. bool InDiscardedStatement; bool InImmediateFunctionContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext), InDiscardedStatement(false), InImmediateFunctionContext(false) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext || (Context == ExpressionEvaluationContext::DiscardedStatement && InImmediateFunctionContext); } bool isDiscardedStatementContext() const { return Context == ExpressionEvaluationContext::DiscardedStatement || (Context == ExpressionEvaluationContext::ImmediateFunctionContext && InDiscardedStatement); } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl *, 2> Pair; public: SpecialMemberOverloadResult() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; private: Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; bool WarnedDarwinSDKInfoMissing = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } /// Helper function to judge if we are in module purview. /// Return false if we are not in a module. bool isCurrentModulePurview() const { return getCurrentModule() ? getCurrentModule()->isModulePurview() : false; } /// Enter the scope of the global module. Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit); /// Leave the scope of the global module. void PopGlobalModuleFragment(); VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, bool &HasDefault, bool &HasCommas, SmallVectorImpl<StringRef> &Strings); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, bool AllowRecovery = false); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false, ArrayRef<const Expr *> StopAt = None); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isImmediateFunctionContext(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive with indirect clause. Optional<Expr *> Indirect; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, DeclareTargetContextInfo &DTCI); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'compare' clause. OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; QualType PreferredConditionType(ConditionKind K) const { return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy; } ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK, bool MissingOK = false); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc, bool Braced); QualType ProduceCtorInitMemberSignatureHelp( Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc, bool Braced); QualType ProduceTemplateArgumentSignatureHelp( TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall); bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); void deepTypeCheckForSYCLDevice(SourceLocation UsedAt, llvm::DenseSet<QualType> Visited, ValueDecl *DeclToCheck); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
8.race1.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define M 200 #define N 200 int main() { double A[M], B[M][N], C[N], sum0 = 0.0; for (int i = 0; i < M; i++) { #pragma omp parallel for // reduction(+:sum0) for (int j = 0; j < N; j++) { sum0 += B[i][j] * C[j]; } A[i] = sum0; } } // CHECK: Data Race detected // END
core_math.h
// == mojo ==================================================================== // // Copyright (c) [email protected]. All rights reserved. // See LICENSE in root folder // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files(the "Software"), // to deal in the Software without restriction, including without // limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, and/or sell copies of the Software, and to permit persons to // whom the Software is furnished to do so, subject to the following // conditions : // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT // OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR // THE USE OR OTHER DEALINGS IN THE SOFTWARE. // // ============================================================================ // core_math.h: defines matrix class and math functions // ==================================================================== mojo == #pragma once #include <math.h> #include <string.h> #include <string> #include <cstdlib> #include <random> #include <algorithm> #include <immintrin.h> namespace mojo { enum pad_type { zero = 0, edge = 1, median_edge = 2 }; inline float dot(const float *x1, const float *x2, const int size) { switch (size) { case 1: return x1[0] * x2[0]; case 2: return x1[0] * x2[0] + x1[1] * x2[1]; case 3: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]; case 4: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3]; case 5: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3] + x1[4] * x2[4]; default: float v = 0; for (int i = 0; i<size; i++) v += x1[i] * x2[i]; return v; }; } inline float unwrap_2d_dot(const float *x1, const float *x2, const int size, int stride1, int stride2) { float v=0; for(int j=0; j<size; j++) v+= dot(&x1[stride1*j],&x2[stride2*j],size); return v; } // second item is rotated 180 (this is a convolution) inline float dot_rot180(const float *x1, const float *x2, const int size) { switch(size) { case 1: return x1[0]*x2[0]; case 2: return x1[0]*x2[1]+x1[1]*x2[0]; case 3: return x1[0]*x2[2]+x1[1]*x2[1]+x1[2]*x2[0]; case 4: return x1[0]*x2[3]+x1[1]*x2[2]+x1[2]*x2[1]+x1[3]*x2[0]; case 5: return x1[0]*x2[4]+x1[1]*x2[3]+x1[2]*x2[2]+x1[3]*x2[1]+x1[4]*x2[0]; default: float v=0; for(int i=0; i<size; i++) { v+=x1[i]*x2[size-i-1]; } return v; }; } inline float unwrap_2d_dot_rot180(const float *x1, const float *x2, const int size, int stride1, int stride2) { float v=0; for(int j=0; j<size; j++) { v+= dot_rot180(&x1[stride1*j],&x2[stride2*(size-j-1)],size); } return v; } inline void unwrap_aligned_NxN(const int N, float *aligned_out, const float *in, const int in_size, const int stride = 1) { const int node_size = (in_size - N)/stride + 1; int c1 = 0; int off = 0; const int inc_off = N*N*8; for (int j = 0; j < node_size; j += 1) // intput h { for (int i = 0; i < node_size; i += 1) // intput w { const float *tn = in + j*in_size + i; if(N==5) { for (int k = 0; k < 5; k++) { aligned_out[c1 + 0 + k * 40 + off] = tn[0 + 0 + in_size*k]; aligned_out[c1 + 8 + k * 40 + off] = tn[0 + 1 + in_size*k]; aligned_out[c1 + 16 + k * 40 + off] = tn[0 + 2 + in_size*k]; aligned_out[c1 + 24 + k * 40 + off] = tn[0 + 3 + in_size*k]; aligned_out[c1 + 32 + k * 40 + off] = tn[0 + 4 + in_size*k]; } } else if(N==3) { aligned_out[c1 + off] = tn[0]; aligned_out[c1 + 8 + off] = tn[0 + 1]; aligned_out[c1 + 16 + off] = tn[0 + 2]; aligned_out[c1 + 24 + off] = tn[0 + in_size]; aligned_out[c1 + 32 + off] = tn[0 + 1 + in_size]; aligned_out[c1 + 40 + off] = tn[0 + 2 + in_size]; aligned_out[c1 + 48 + off] = tn[0 + 2 * in_size]; aligned_out[c1 + 56 + off] = tn[0 + 1 + 2 * in_size]; aligned_out[c1 + 64 + off] = tn[0 + 2 + 2 * in_size]; } else { int cnt=0; for (int k = 0; k < N; k++) { for (int m = 0; m < N; m++) { aligned_out[c1 + cnt*8 + off] = tn[0 + m + in_size*k]; cnt++; } } } off++; if (off > 7) { off = 0; c1 += inc_off; } } } } inline void dotsum_unwrapped_NxN(const int N, const float *im, const float *filter_ptr, float *out, const int outsize) { const int NN=N*N; for (int j = 0; j < outsize; j += 8) { float *c = out+j; for(int i=0; i<NN; i++) { const float f = filter_ptr[i]; c[0]+=im[0]*f; c[1]+=im[1]*f; c[2]+=im[2]*f; c[3]+=im[3]*f; c[4]+=im[4]*f; c[5]+=im[5]*f; c[6]+=im[6]*f; c[7]+=im[7]*f; im+=8; } } } #ifdef MOJO_AVX inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 32; } _mm256_zeroupper(); } inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); for (int j = 0; j < outsize; j += 8)//stride) // intput w { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 72; } _mm256_zeroupper(); } inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]); const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]); const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]); const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]); for (int j = 0; j < outsize; j += 8)//stride) // intput w { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 128; } _mm256_zeroupper(); } inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]); const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]); const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]); const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]); const __m256 f16 = _mm256_broadcast_ss(&filter_ptr[16]); const __m256 f17 = _mm256_broadcast_ss(&filter_ptr[17]); const __m256 f18 = _mm256_broadcast_ss(&filter_ptr[18]); const __m256 f19 = _mm256_broadcast_ss(&filter_ptr[19]); const __m256 f20 = _mm256_broadcast_ss(&filter_ptr[20]); const __m256 f21 = _mm256_broadcast_ss(&filter_ptr[21]); const __m256 f22 = _mm256_broadcast_ss(&filter_ptr[22]); const __m256 f23 = _mm256_broadcast_ss(&filter_ptr[23]); const __m256 f24 = _mm256_broadcast_ss(&filter_ptr[24]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 128); c1 = _mm256_mul_ps(a, f16); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 136); c1 = _mm256_mul_ps(a, f17); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 144); c1 = _mm256_mul_ps(a, f18); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 152); c1 = _mm256_mul_ps(a, f19); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 160); c1 = _mm256_mul_ps(a, f20); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 168); c1 = _mm256_mul_ps(a, f21); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 176); c1 = _mm256_mul_ps(a, f22); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 184); c1 = _mm256_mul_ps(a, f23); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 192); c1 = _mm256_mul_ps(a, f24); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 200; } _mm256_zeroupper(); } inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); __m256 f[49];//=new __m256(s); for(int i=0; i<49; i++) f[i]= _mm256_broadcast_ss(&filter_ptr[i]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f[0]); for(int i=1; i<49;i++) { a = _mm256_load_ps(_img + 8*i); c1 = _mm256_mul_ps(a, f[i]); c0 = _mm256_add_ps(c0, c1); } a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 49*8; } _mm256_zeroupper(); //delete [] f; } #else // no AVX inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(2, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(3, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(4, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(5, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(7, _img, filter_ptr, out, outsize); } #endif // matrix class --------------------------------------------------- // should use opencv if available // class matrix { int _size; int _capacity; float *_x_mem; void delete_x() { delete[] _x_mem; x = NULL; _x_mem = NULL; } // 4 extra for alignment and 4 for 3 padding for SSE //float *new_x(const int size) { _x_mem = new float[size + 4+3]; x = (float *)(((uintptr_t)_x_mem + 16) & ~(uintptr_t)0x0F); return x; } // avx mem aligment float *new_x(const int size) { _x_mem = new float[size + 8 + 7]; x = (float *)(((uintptr_t)_x_mem + 32) & ~(uintptr_t)0x1F); return x; } public: std::string _name; int cols, rows, chans; int chan_stride; int chan_aligned; float *x; // size must be divisible by 8 for AVX virtual int calc_chan_stride(int w, int h) { if (chan_aligned) { int s = w*h; const int remainder = s % 8; if (remainder > 0) s += 8 - remainder; return s; } else return w*h; } matrix( ): cols(0), rows(0), chans(0), _size(0), _capacity(0), chan_stride(0), x(NULL), chan_aligned(0)/*, empty_chan(NULL)*/{} matrix( int _w, int _h, int _c=1, const float *data=NULL, int align_chan=0): cols(_w), rows(_h), chans(_c) { chan_aligned = align_chan; chan_stride = calc_chan_stride(cols, rows); _size= chan_stride*chans; _capacity=_size; x = new_x(_size); if(data!=NULL) memcpy(x,data,_size*sizeof(float)); } // copy constructor - deep copy matrix( const matrix &m) : cols(m.cols), rows(m.rows), chan_aligned(m.chan_aligned), chans(m.chans), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) {x = new_x(_size); memcpy(x,m.x,sizeof(float)*_size); /*empty_chan = new unsigned char[chans]; memcpy(empty_chan, m.empty_chan, chans);*/} // { v=m.v; x=(float*)v.data();} // copy and pad constructor matrix( const matrix &m, int pad_cols, int pad_rows, mojo::pad_type padding= mojo::zero, int threads=1) : cols(m.cols), rows(m.rows), chans(m.chans), chan_aligned(m.chan_aligned), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) { x = new_x(_size); memcpy(x, m.x, sizeof(float)*_size); *this = pad(pad_cols, pad_rows, padding, threads); } ~matrix() { if (x) delete_x(); } matrix get_chans(int start_channel, int num_chans=1) const { return matrix(cols,rows,num_chans,&x[start_channel*chan_stride]); } // if edge_pad==0, then the padded area is just 0. // if edge_pad==1 it fills with edge pixel colors // if edge_pad==2 it fills with median edge pixel color matrix pad(int dx, int dy, mojo::pad_type edge_pad = mojo::zero, int threads=1) const { return pad(dx, dy, dx, dy, edge_pad, threads); } matrix pad(int dx, int dy, int dx_right, int dy_bottom, mojo::pad_type edge_pad = mojo::zero, int threads=1) const { matrix v(cols+dx+dx_right,rows+dy+dy_bottom,chans);//,NULL,this->chan_aligned); v.fill(0); //float *new_x = new float[chans*w*h]; #pragma omp parallel for num_threads(threads) for(int k=0; k<chans; k++) { const int v_chan_offset=k*v.chan_stride; const int chan_offset=k*chan_stride; // find median color of perimeter float median = 0.f; if (edge_pad == mojo::median_edge) { int perimeter = 2 * (cols + rows - 2); std::vector<float> d(perimeter); for (int i = 0; i < cols; i++) { d[i] = x[i+ chan_offset]; d[i + cols] = x[i + cols*(rows - 1)+ chan_offset]; } for (int i = 1; i < (rows - 1); i++) { d[i + cols * 2] = x[cols*i+ chan_offset]; // file from back so i dont need to cal index d[perimeter - i] = x[cols - 1 + cols*i+ chan_offset]; } std::nth_element(d.begin(), d.begin() + perimeter / 2, d.end()); median = d[perimeter / 2]; //for (int i = 0; i < v.rows*v.cols; i++) v.x[v_chan_offset + i] = solid_fill; } for(int j=0; j<rows; j++) { memcpy(&v.x[dx+(j+dy)*v.cols+v_chan_offset], &x[j*cols+chan_offset], sizeof(float)*cols); if(edge_pad== mojo::edge) { // do left/right side for(int i=0; i<dx; i++) v.x[i+(j+dy)*v.cols+v_chan_offset]=x[0+j*cols+chan_offset]; for (int i = 0; i<dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = x[(cols - 1) + j*cols + chan_offset]; } else if (edge_pad == mojo::median_edge) { for (int i = 0; i < dx; i++) v.x[i + (j + dy)*v.cols + v_chan_offset] = median; for (int i = 0; i < dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = median; } } // top bottom pad if(edge_pad== mojo::edge) { for(int j=0; j<dy; j++) memcpy(&v.x[(j)*v.cols+v_chan_offset],&v.x[(dy)*v.cols+v_chan_offset], sizeof(float)*v.cols); for (int j = 0; j<dy_bottom; j++) memcpy(&v.x[(j + dy + rows)*v.cols + v_chan_offset], &v.x[(rows - 1 + dy)*v.cols + v_chan_offset], sizeof(float)*v.cols); } if (edge_pad == mojo::median_edge) { for (int j = 0; j<dy; j++) for (int i = 0; i<v.cols; i++) v.x[i + j*v.cols + v_chan_offset] = median; for (int j = 0; j<dy_bottom; j++) for (int i = 0; i<v.cols; i++) v.x[i + (j + dy + rows)*v.cols + v_chan_offset] = median; } } return v; } matrix crop(int dx, int dy, int w, int h, int threads=1) const { matrix v(w,h,chans); #pragma omp parallel for num_threads(threads) for(int k=0; k<chans; k++) { for(int j=0; j<h; j++) { memcpy(&v.x[j*w+k*v.chan_stride], &x[dx+(j+dy)*cols+k*chan_stride], sizeof(float)*w); } } return v; } mojo::matrix shift(int dx, int dy, mojo::pad_type edge_pad=mojo::zero) { int orig_cols=cols; int orig_rows=rows; int off_x=abs(dx); int off_y=abs(dy); mojo::matrix shifted= pad(off_x, off_y, edge_pad); return shifted.crop(off_x-dx, off_y-dy,orig_cols,orig_rows); } mojo::matrix flip_cols() { mojo::matrix v(cols,rows,chans); for(int k=0; k<chans; k++) for(int j=0; j<rows; j++) for(int i=0; i<cols; i++) v.x[i+j*cols+k*chan_stride]=x[(cols-i-1)+j*cols+k*chan_stride]; return v; } mojo::matrix flip_rows() { mojo::matrix v(cols, rows, chans); for (int k = 0; k<chans; k++) for (int j = 0; j<rows; j++) memcpy(&v.x[(rows-1-j)*cols + k*chan_stride],&x[j*cols + k*chan_stride], cols*sizeof(float)); return v; } void clip(float min, float max) { int s = chan_stride*chans; for (int i = 0; i < s; i++) { if (x[i] < min) x[i] = min; if (x[i] > max) x[i]=max; } } void min_max(float *min, float *max, int *min_i=NULL, int *max_i=NULL) { int s = rows*cols; int mini = 0; int maxi = 0; for (int c = 0; c < chans; c++) { const int t = chan_stride*c; for (int i = t; i < t+s; i++) { if (x[i] < x[mini]) mini = i; if (x[i] > x[maxi]) maxi = i; } } *min = x[mini]; *max = x[maxi]; if (min_i) *min_i = mini; if (max_i) *max_i = maxi; } float mean() { const int s = rows*cols; int cnt = 0;// channel*s; float average = 0; for (int c = 0; c < chans; c++) { const int t = chan_stride*c; for (int i = 0; i < s; i++) average += x[i + t]; } average = average / (float)(s*chans); return average; } float remove_mean(int channel) { int s = rows*cols; int offset = channel*chan_stride; float average=0; for(int i=0; i<s; i++) average+=x[i+offset]; average= average/(float)s; for(int i=0; i<s; i++) x[i+offset]-=average; return average; } float remove_mean() { float m=mean(); int s = chan_stride*chans; //int offset = channel*s; for(int i=0; i<s; i++) x[i]-=m; return m; } void fill(float val) { for(int i=0; i<_size; i++) x[i]=val; } void fill_random_uniform(float range) { std::mt19937 gen(0); std::uniform_real_distribution<float> dst(-range, range); for (int i = 0; i<_size; i++) x[i] = dst(gen); } void fill_random_normal(float std) { std::mt19937 gen(0); std::normal_distribution<float> dst(0, std); for (int i = 0; i<_size; i++) x[i] = dst(gen); } // deep copy inline matrix& operator =(const matrix &m) { resize(m.cols, m.rows, m.chans, m.chan_aligned); memcpy(x,m.x,sizeof(float)*_size); // memcpy(empty_chan, m.empty_chan, chans); return *this; } int size() const {return _size;} void resize(int _w, int _h, int _c, int align_chans=0) { chan_aligned = align_chans; int new_stride = calc_chan_stride(_w,_h); int s = new_stride*_c; if(s>_capacity) { if(_capacity>0) delete_x(); _size = s; _capacity=_size; x = new_x(_size); } cols = _w; rows = _h; chans = _c; _size = s; chan_stride = new_stride; } // dot vector to 2d mat inline matrix dot_1dx2d(const matrix &m_2d) const { mojo::matrix v(m_2d.rows, 1, 1); for(int j=0; j<m_2d.rows; j++) v.x[j]=dot(x,&m_2d.x[j*m_2d.cols],_size); return v; } // += inline matrix& operator+=(const matrix &m2){ for(int i = 0; i < _size; i++) x[i] += m2.x[i]; return *this; } // -= inline matrix& operator-=(const matrix &m2) { for (int i = 0; i < _size; i++) x[i] -= m2.x[i]; return *this; } #ifndef MOJO_AVX // *= float inline matrix operator *=(const float v) { for (int i = 0; i < _size; i++) x[i] = x[i] * v; return *this; } #else inline matrix operator *=(const float v) { __m128 b; b = _mm_set_ps(v, v, v, v); for (int j = 0; j < _size; j += 4) _mm_store_ps(x + j, _mm_mul_ps(_mm_load_ps(x + j), b)); return *this; } #endif // *= matrix inline matrix operator *=(const matrix &v) { for (int i = 0; i < _size; i++) x[i] = x[i] * v.x[i]; return *this; } inline matrix operator *(const matrix &v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] * v.x[i]; return T; } // * float inline matrix operator *(const float v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] * v; return T; } // + float inline matrix operator +(const float v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] + v; return T; } // + inline matrix operator +(matrix m2) { matrix T(cols,rows,chans); for(int i = 0; i < _size; i++) T.x[i] = x[i] + m2.x[i]; return T; } }; }// namespace
omp_fdtd-2d.c
#include "fdtd-2d.h" #include <omp.h> double bench_t_start, bench_t_end; static double rtclock(){ struct timeval Tp; int stat; stat = gettimeofday(&Tp, NULL); if(stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } void bench_timer_start(){ bench_t_start = omp_get_wtime(); } void bench_timer_stop(){ bench_t_end = omp_get_wtime(); } void bench_timer_print(){ printf("Time in seconds = %0.6lf\n", bench_t_end - bench_t_start); } static void init_array(int tmax, int nx, int ny, float ex[nx][ny], float ey[nx][ny], float hz[nx][ny], float _fict_[tmax]){ int i, j; for(i = 0; i < tmax; i++){ //printf("ThreadId: %d --- i: %d\n", omp_get_thread_num(), i); _fict_[i] = (float) i; } for(i = 0; i < nx; i++) for(j = 0; j < ny; j++){ ex[i][j] = ((float) i * (j + 1)) / nx; ey[i][j] = ((float) i * (j + 2)) / ny; hz[i][j] = ((float) i * (j + 3)) / nx; } } static void print_array(int nx, int ny, float ex[nx][ny], float ey[nx][ny], float hz[nx][ny]){ int i, j; fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n"); fprintf(stderr, "begin dump: %s", "ex"); for(i = 0; i < nx; i++) for(j = 0; j < ny; j++){ if((i * nx + j) % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2f ", ex[i][j]); } fprintf(stderr, "\nend dump: %s\n", "ex"); fprintf(stderr, "==END DUMP_ARRAYS==\n"); fprintf(stderr, "begin dump: %s", "ey"); for(i = 0; i < nx; i++) for(j = 0; j < ny; j++){ if((i * nx + j) % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2f ", ey[i][j]); } fprintf(stderr, "\nend dump: %s\n", "ey"); fprintf(stderr, "begin dump: %s", "hz"); for(i = 0; i < nx; i++) for(j = 0; j < ny; j++){ if((i * nx + j) % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2f ", hz[i][j]); } fprintf(stderr, "\nend dump: %s\n", "hz"); } static void kernel_fdtd_2d(int tmax, int nx, int ny, float ex[nx][ny], float ey[nx][ny], float hz[nx][ny], const float _fict_[tmax]){ int t, i, j; #pragma omp parallel private(t, i, j) { for(t = 0; t < tmax; t++){ #pragma omp for for(j = 0; j < ny; j++) ey[0][j] = _fict_[t]; #pragma omp for collapse(2) for(i = 1; i < nx; i++) for(j = 0; j < ny; j++) ey[i][j] = ey[i][j] - 0.5f * (hz[i][j] - hz[i - 1][j]); #pragma omp for collapse(2) for(i = 0; i < nx; i++) for(j = 1; j < ny; j++) ex[i][j] = ex[i][j] - 0.5f * (hz[i][j] - hz[i][j - 1]); #pragma omp for collapse(2) for(i = 0; i < nx - 1; i++) for(j = 0; j < ny - 1; j++) hz[i][j] = hz[i][j] - 0.7f * (ex[i][j + 1] - ex[i][j] + ey[i + 1][j] - ey[i][j]); } } } int main(int argc, char **argv){ int tmax = TMAX; int nx = NX; int ny = NY; float (*ex)[nx][ny]; ex = (float (*)[nx][ny]) malloc((nx) * (ny) * sizeof(float)); float (*ey)[nx][ny]; ey = (float (*)[nx][ny]) malloc((nx) * (ny) * sizeof(float)); float (*hz)[nx][ny]; hz = (float (*)[nx][ny]) malloc((nx) * (ny) * sizeof(float)); float (*_fict_)[tmax]; _fict_ = (float (*)[tmax]) malloc((tmax) * sizeof(float)); //int num_threads = argc > 1 ? atoi(argv[1]) : 1; //printf("num_threads: %d\n", num_threads); //omp_set_num_threads(num_threads); init_array(tmax, nx, ny, *ex, *ey, *hz, *_fict_); bench_timer_start(); kernel_fdtd_2d(tmax, nx, ny, *ex, *ey, *hz, *_fict_); //omp_get_max_threads() bench_timer_stop(); printf("==========================\n"); printf("THREADS: %d\n", omp_get_max_threads()); bench_timer_print(); printf("==========================\n\n"); if(argc > 42 && !strcmp(argv[0], "")) print_array(nx, ny, *ex, *ey, *hz); free((void *) ex); free((void *) ey); free((void *) hz); free((void *) _fict_); return 0; }
cafe_tree.c
/*! \page Tree Tree * \code{.sh} # tree NEWICK-formatted tree * \endcode * * \code{.sh} # tree -i treefile * \endcode * A NEWICK-formatted tree containing branch lengths and taxon names as they are specified in the \ref Load "input file". Branch lengths should be integer units and the tree should be ultrametric (all paths from root to tip should have the same length). If the tree is not ultrametric to a tolerance of .01%, a warning will be logged. Please note that there should be no spaces in the tree string, nor semicolons at the end of the line. Here is an example of an ultrametric tree followed by its NEWICK notation: \htmlonly <style>div.image img[src="simple_tree.png"]{width:600px;}</style> \endhtmlonly \image html simple_tree.png "(((chimp:6,human:6):81,(mouse:17,rat:17):70):6,dog:93)" width=\\textwidth */ #include "cafe.h" #include<stdlib.h> #include<math.h> #include<mathfunc.h> #include <chooseln_cache.h> #include "time.h" extern void __phylogeny_free_node(pTree ptree, pTreeNode ptnode, va_list ap1); extern pBirthDeathCacheArray probability_cache; extern struct chooseln_cache cache; pTreeNode cafe_tree_new_empty_node(pTree ptree) { pCafeTree pcafe = (pCafeTree)ptree; pCafeNode pcnode = (pCafeNode)memory_new(1, sizeof(CafeNode) ); pcnode->likelihoods = (double*)memory_new(pcafe->size_of_factor,sizeof(double)); pcnode->viterbi = (int*)memory_new(pcafe->size_of_factor,sizeof(int)); pcnode->birth_death_probabilities.lambda = pcafe->lambda; pcnode->birth_death_probabilities.mu = pcafe->mu; pcnode->familysize = -1; pcnode->errormodel = NULL; phylogeny_clear_node((pPhylogenyNode)pcnode); return (pTreeNode)pcnode; } void cafe_tree_set_parameters(pCafeTree pcafe, family_size_range* range, double lambda) { int i; pArrayList nlist = pcafe->super.nlist; copy_range_to_tree(pcafe, range); pcafe->lambda = lambda; int rsize = pcafe->rfsize; int fsize = range->max - range->min + 1; int max_size = rsize > fsize ? rsize : fsize; if ( pcafe->size_of_factor < max_size ) { pcafe->size_of_factor = max_size; for ( i = 0; i < nlist->size ; i++ ) { pCafeNode pcnode = (pCafeNode)nlist->array[i]; pcnode->likelihoods = (double*)memory_realloc(pcnode->likelihoods,pcafe->size_of_factor,sizeof(double)); pcnode->viterbi = (int*)memory_realloc(pcnode->viterbi,pcafe->size_of_factor,sizeof(int)); } } } void __cafe_tree_free_node(pTree ptree, pTreeNode ptnode, va_list ap1) { va_list ap; if (ap1) va_copy(ap, ap1); pCafeNode pcnode = (pCafeNode)ptnode; if (pcnode->likelihoods) memory_free(pcnode->likelihoods); pcnode->likelihoods = NULL; if (pcnode->viterbi) memory_free(pcnode->viterbi); pcnode->viterbi = NULL; __phylogeny_free_node(ptree, ptnode, ap); if (ap1) va_end(ap1); } void cafe_tree_free(pCafeTree pcafe) { if ( pcafe->super.nlist ) { int i; for ( i = 0 ; i < pcafe->super.nlist->size ; i++ ) { __cafe_tree_free_node((pTree)pcafe, (pTreeNode)pcafe->super.nlist->array[i], NULL ); } } else { tree_traveral_prefix((pTree)pcafe,__cafe_tree_free_node); } pTree ptree = (pTree)pcafe; if ( *ptree->count == 0 && ptree->data ) vector_free( ((pVector)ptree->data), free ); tree_free(ptree); } /******************************************************************************* * Tree Output *******************************************************************************/ void cafe_tree_string_name(pString pstr, pPhylogenyNode ptnode) { char buf[STRING_BUF_SIZE]; int familysize = ((pCafeNode)ptnode)->familysize; int idx = 0; if ( ptnode->name || familysize >= 0 ) { if ( ptnode->name ) idx = sprintf(buf,"%s", ptnode->name); if ( familysize >= 0) sprintf(&buf[idx],"_%d", familysize ); string_add(pstr,buf); } } void cafe_tree_string_familysize_lambda(pString pstr, pPhylogenyNode ptnode) { int familysize = ((pCafeNode)ptnode)->familysize; if ( ptnode->branchlength <= 0 ) return; if ( ptnode->name ) string_fadd(pstr,"%s", ptnode->name ); if ( familysize >= 0) string_fadd(pstr,"<%d>", familysize ); double lambda = ((pCafeNode)ptnode)->birth_death_probabilities.lambda; string_fadd(pstr,"_%lf", lambda ); } void cafe_tree_string_familysize(pString pstr, pPhylogenyNode ptnode) { int familysize = ((pCafeNode)ptnode)->familysize; if ( ptnode->branchlength <= 0 ) return; if ( ptnode->name ) string_fadd(pstr,"%s", ptnode->name ); if ( familysize >= 0) string_fadd(pstr,"< %d >", familysize ); } void cafe_tree_string_lambda(pString pstr, pPhylogenyNode ptnode) { if ( ptnode->branchlength <= 0 ) return; if ( ptnode->name ) string_fadd(pstr,"%s", ptnode->name ); string_fadd(pstr,"_%lf", ((pCafeNode)ptnode)->birth_death_probabilities.lambda ); } void cafe_tree_string_id(pString pstr, pPhylogenyNode pnode) { if ( pnode->name ) string_fadd(pstr,"%s", pnode->name ); string_fadd(pstr,"<%d>", pnode->super.id ); } pString cafe_tree_string_with_familysize_lambda(pCafeTree pcafe) { return phylogeny_string((pTree)pcafe,cafe_tree_string_familysize_lambda); } pString cafe_tree_string_with_lambda(pCafeTree pcafe) { return phylogeny_string((pTree)pcafe,cafe_tree_string_lambda); } pString cafe_tree_string_with_familysize(pCafeTree pcafe) { return phylogeny_string((pTree)pcafe,cafe_tree_string_familysize); } pString cafe_tree_string_with_id(pCafeTree pcafe) { return phylogeny_string((pTree)pcafe,cafe_tree_string_id); } pString cafe_tree_string(pCafeTree pcafe) { return phylogeny_string((pTree)pcafe,cafe_tree_string_name); } void cafe_tree_string_print(pCafeTree pcafe) { pString pstr = cafe_tree_string(pcafe); printf("%s\n", pstr->buf ); string_free(pstr); } /** * \brief Set likelihood to 1 for actual value, 0 otherwise, or copy values from an existing errormodel * Copies likelihood values from an errormodel if one exists, * otherwise sets all likelihoods to 0 except for familysize, which is set to 1 */ void initialize_leaf_likelihoods(pTree ptree, pTreeNode ptnode) { pCafeTree pcafe = (pCafeTree)ptree; pCafeNode pcnode = (pCafeNode)ptnode; if (pcnode->errormodel) { memset((void*)pcnode->likelihoods, 0, pcafe->size_of_factor*sizeof(double)); for (int j = 0; j<pcafe->size_of_factor; j++) { // conditional probability of measuring i=familysize when true count is j pcnode->likelihoods[j] = pcnode->errormodel->errormatrix[pcnode->familysize][j]; } } else { // number of likelihoods should be set from the tree's size_of_factor, // therefore the familysize must be less than this assert(pcnode->familysize >= 0 && pcnode->familysize < pcafe->size_of_factor); memset((void*)pcnode->likelihoods, 0, pcafe->size_of_factor*sizeof(double)); pcnode->likelihoods[pcnode->familysize] = 1; } } void compute_child_factor(pCafeTree pcafe, pCafeNode child, family_size_range* range, double *factors) { // p(node=c,child|s) = p(node=c|s)p(child|node=c) integrated over all c // remember child likelihood[c]'s never sum up to become 1 because they are likelihoods conditioned on c's. // incoming nodes to don't sum to 1. outgoing nodes sum to 1 if (!child->birthdeath_matrix) node_set_birthdeath_matrix(child, probability_cache, pcafe->k); assert(child->birthdeath_matrix != NULL); square_matrix_multiply(child->birthdeath_matrix, child->likelihoods, range->root_min, range->root_max, range->min, range->max, factors); } void compute_internal_node_likelihood(pTree ptree, pTreeNode ptnode) { pCafeTree pcafe = (pCafeTree)ptree; pCafeNode pcnode = (pCafeNode)ptnode; family_size_range range; range.min = pcafe->range.min; range.max = pcafe->range.max; if (tree_is_root(ptree, ptnode)) { range.root_min = pcafe->range.root_min; range.root_max = pcafe->range.root_max; } else { range.root_min = pcafe->range.min; range.root_max = pcafe->range.max; } double *left_factor = memory_new(pcafe->size_of_factor, sizeof(double)); double *right_factor = memory_new(pcafe->size_of_factor, sizeof(double)); pCafeNode child1 = (pCafeNode)((pTreeNode)pcnode)->children->head->data; pCafeNode child2 = (pCafeNode)((pTreeNode)pcnode)->children->tail->data; #if _OPENMP > 201307 // tasks aren't supported by older OpenMP versions #pragma omp parallel #endif #pragma omp single nowait { #pragma omp task compute_child_factor(pcafe, child1, &range, left_factor); #pragma omp task compute_child_factor(pcafe, child2, &range, right_factor); #pragma omp taskwait int size = range.root_max - range.root_min + 1; assert(size <= pcafe->size_of_factor); for (int i = 0; i < size; i++) { pcnode->likelihoods[i] = left_factor[i] * right_factor[i]; } } memory_free(left_factor); memory_free(right_factor); } void free_probabilities(struct probabilities *probs) { if (probs->param_lambdas) { memory_free(probs->param_lambdas); probs->param_lambdas = NULL; } if (probs->param_mus) { memory_free(probs->param_mus); probs->param_mus = NULL; } } void compute_node_likelihoods(pTree ptree, pTreeNode ptnode, va_list ap1) { if (tree_is_leaf(ptnode)) { initialize_leaf_likelihoods(ptree, ptnode); } else { compute_internal_node_likelihood(ptree, ptnode); } } void compute_node_likelihoods_recursive(pTree ptree, pTreeNode ptnode) { if (!tree_is_leaf(ptnode)) { #pragma omp task { pTreeNode child1 = (pTreeNode)(ptnode)->children->head->data; compute_node_likelihoods_recursive(ptree, child1); } #pragma omp task { pTreeNode child2 = (pTreeNode)(ptnode)->children->tail->data; compute_node_likelihoods_recursive(ptree, child2); } } #pragma omp taskwait compute_node_likelihoods(ptree, ptnode, NULL); } void compute_tree_likelihoods(pCafeTree pcafe) { compute_node_likelihoods_recursive((pTree)pcafe, pcafe->super.root); } double* get_likelihoods(const pCafeTree pcafe) { return ((pCafeNode)pcafe->super.root)->likelihoods; } /** * \brief Initialize node with probability values that it may need. * If multiple lambdas are set, k_bd is set to an arraylist of matrices with probability values * In this case, values are set up to the value of num_lambdas * otherwise the value birthdeath_matrix is used * probability values are drawn from the cache argument, which should hold a variety * of possible values */ void node_set_birthdeath_matrix(pCafeNode pcnode, pBirthDeathCacheArray cache, int num_lambdas) { if (pcnode->super.branchlength <= 0) return; if (pcnode->birth_death_probabilities.param_lambdas) { if (pcnode->birth_death_probabilities.param_mus) { if (num_lambdas > 0) { for (int k = 0; k<num_lambdas; k++) { struct square_matrix* bd = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[k], pcnode->birth_death_probabilities.param_mus[k]); arraylist_add(pcnode->k_bd, bd); } } else { pcnode->birthdeath_matrix = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[0], pcnode->birth_death_probabilities.param_mus[0]); } } else { if (num_lambdas > 0) { for (int k = 0; k<num_lambdas; k++) { struct square_matrix* bd = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[k], pcnode->birth_death_probabilities.mu); arraylist_add(pcnode->k_bd, bd); } } else { pcnode->birthdeath_matrix = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[0], pcnode->birth_death_probabilities.mu); } } } else { pcnode->birthdeath_matrix = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.lambda, pcnode->birth_death_probabilities.mu); } } void add_key(pArrayList arr, double branchlength, double lambda, double mu) { for (int i = 0; i < arr->size; ++i) { struct BirthDeathCacheKey *key = (struct BirthDeathCacheKey *)arraylist_get(arr, i); if (key->branchlength == branchlength && key->lambda == lambda && key->mu == mu) return; } struct BirthDeathCacheKey *key = malloc(sizeof(struct BirthDeathCacheKey)); memset(key, 0, sizeof(struct BirthDeathCacheKey)); key->branchlength = branchlength; key->lambda = lambda; key->mu = mu; arraylist_add(arr, key); } void get_keys_from_node(pCafeNode pcnode, pArrayList arr, int num_lambdas) { if (pcnode->super.branchlength <= 0) return; if (pcnode->birth_death_probabilities.param_lambdas) { if (pcnode->birth_death_probabilities.param_mus) { if (num_lambdas > 0) { for (int k = 0; k<num_lambdas; k++) { add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[k], pcnode->birth_death_probabilities.param_mus[k]); } } else { add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[0], pcnode->birth_death_probabilities.param_mus[0]); } } else { if (num_lambdas > 0) { for (int k = 0; k<num_lambdas; k++) { add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[k], pcnode->birth_death_probabilities.mu); } } else { add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[0], pcnode->birth_death_probabilities.mu); } } } else { add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.lambda, pcnode->birth_death_probabilities.mu); } } void do_node_set_birthdeath(pTree ptree, pTreeNode ptnode, va_list ap1) { va_list ap; va_copy(ap, ap1); pBirthDeathCacheArray cache = va_arg(ap, pBirthDeathCacheArray); va_end(ap); pCafeTree pcafe = (pCafeTree)ptree; node_set_birthdeath_matrix((pCafeNode)ptnode, cache, pcafe->k); } void gather_keys(pTree ptree, pTreeNode ptnode, va_list ap1) { va_list ap; va_copy(ap, ap1); pArrayList arr = va_arg(ap, pArrayList); va_end(ap); pCafeTree pcafe = (pCafeTree)ptree; get_keys_from_node((pCafeNode)ptnode, arr, pcafe->k); } /** * Set each node's birthdeath matrix based on its values of branchlength, lambdas, and mus **/ void cafe_tree_set_birthdeath(pCafeTree pcafe, int max_family_size) { pArrayList arr = arraylist_new(40); tree_traveral_prefix((pTree)pcafe, gather_keys, arr); pBirthDeathCacheArray bd_cache = birthdeath_cache_init(max_family_size, &cache); #pragma omp parallel #pragma omp for for (int i = 0; i < arr->size; ++i) { struct BirthDeathCacheKey* key = (struct BirthDeathCacheKey*)arraylist_get(arr, i); struct square_matrix *matrix = compute_birthdeath_rates(key->branchlength, key->lambda, key->mu, max_family_size); #pragma omp critical hash_table_add(bd_cache->table, key, sizeof(struct BirthDeathCacheKey), matrix, sizeof(struct square_matrix*)); } tree_traveral_prefix((pTree)pcafe, do_node_set_birthdeath, bd_cache); // free the cache without deleting the matrices void** keys = NULL; hash_table_get_keys(bd_cache->table, &keys); free(keys); hash_table_delete(bd_cache->table); memory_free(bd_cache); } void cafe_tree_node_copy(pTreeNode psrc, pTreeNode pdest) { pCafeNode pcsrc, pcdest; pcsrc = (pCafeNode)psrc; pcdest = (pCafeNode)pdest; phylogeny_node_copy(psrc,pdest); pcdest->birth_death_probabilities.lambda = pcsrc->birth_death_probabilities.lambda; pcdest->familysize = pcsrc->familysize; pcdest->birthdeath_matrix = pcsrc->birthdeath_matrix; } void __cafe_tree_copy_new_fill(pCafeTree psrc, pCafeTree pdest ) { pdest->size_of_factor = psrc->size_of_factor; pdest->range.min = psrc->range.min; pdest->range.max = psrc->range.max; pdest->range.root_min = psrc->range.root_min; pdest->range.root_max = psrc->range.root_max; pdest->lambda = psrc->lambda; pdest->rfsize = psrc->rfsize; } pCafeTree cafe_tree_copy(pCafeTree psrc) { pCafeTree pcafe = (pCafeTree)tree_copy((pTree)psrc, cafe_tree_new_empty_node, cafe_tree_node_copy ); __cafe_tree_copy_new_fill(psrc,pcafe); tree_build_node_list((pTree)pcafe); return pcafe; } pCafeTree cafe_tree_split(pCafeTree pcafe, int idx ) { pCafeTree psub = (pCafeTree)phylogeny_split_tree((pTree)pcafe,idx, __cafe_tree_free_node ); if ( psub ) { __cafe_tree_copy_new_fill(pcafe,psub); } cafe_tree_set_birthdeath(pcafe, probability_cache->maxFamilysize); cafe_tree_set_birthdeath(psub, probability_cache->maxFamilysize); return psub; } /******************************************************************************* * Random family size *******************************************************************************/ void __cafe_tree_node_random_familysize(pTree ptree, pTreeNode pnode, va_list ap1) { va_list ap; va_copy(ap, ap1); int *max = va_arg(ap, int*); int max_family_size = va_arg(ap, int); va_end(ap); if ( tree_is_root(ptree,pnode) ) return; double rnd = unifrnd(); double cumul = 0; pCafeNode pcnode = (pCafeNode)pnode; int parent_family_size = ((pCafeNode)pnode->parent)->familysize; int c = 0; for (; c < max_family_size-1; c++ ) { cumul += square_matrix_get(pcnode->birthdeath_matrix, parent_family_size, c); if ( cumul >= rnd ) break; } pcnode->familysize = c; if (*max < pcnode->familysize) { *max = pcnode->familysize; } } /** * Sets the family size of each node to random value between 0 and the tree's pbdc_array maxFamilySize. **/ int cafe_tree_random_familysize(pCafeTree pcafe, int rootFamilysize, int maxFamilySize) { int max = 0; ((pCafeNode)pcafe->super.root)->familysize = rootFamilysize; tree_traveral_prefix( (pTree)pcafe, __cafe_tree_node_random_familysize, &max, maxFamilySize); return max; } void initialize_leaf_likelihood_clustered(pTree ptree, pTreeNode ptnode) { int root_min, root_max; pCafeTree pcafe = (pCafeTree)ptree; pCafeNode pcnode = (pCafeNode)ptnode; int s, i, j, k; if (tree_is_root(ptree, ptnode->parent)) { root_min = pcafe->range.root_min; root_max = pcafe->range.root_max; } else { root_min = pcafe->range.min; root_max = pcafe->range.max; } for (s = root_min, i = 0; s <= root_max; s++, i++) { for (k = 0; k < pcafe->k; k++) { if (pcnode->familysize < 0) { //fprintf(stderr, "family size not set\n"); pcnode->k_likelihoods[k][i] = 1; } else { if (pcnode->errormodel) { memset((void*)pcnode->k_likelihoods[k], 0, pcafe->size_of_factor * sizeof(double)); for (j = 0; j<pcafe->size_of_factor; j++) { // conditional probability of measuring i=familysize when true count is j pcnode->k_likelihoods[k][j] = pcnode->errormodel->errormatrix[pcnode->familysize][j]; } } else { memset((void*)pcnode->k_likelihoods[k], 0, pcafe->size_of_factor * sizeof(double)); pcnode->k_likelihoods[k][pcnode->familysize] = 1; } } } } } void compute_internal_node_likelihood_clustered(pTree ptree, pTreeNode ptnode) { family_size_range family_size; pCafeTree pcafe = (pCafeTree)ptree; pCafeNode pcnode = (pCafeNode)ptnode; double *tree_factors[2]; tree_factors[0] = memory_new(pcafe->size_of_factor, sizeof(double)); tree_factors[1] = memory_new(pcafe->size_of_factor, sizeof(double)); int s, i, k; double lambda = -1; double mu = -1; if (tree_is_root(ptree, ptnode)) { family_size.root_min = pcafe->range.root_min; family_size.root_max = pcafe->range.root_max; family_size.min = pcafe->range.min; family_size.max = pcafe->range.max; } else { family_size.root_min = pcafe->range.min; family_size.root_max = pcafe->range.max; family_size.min = pcafe->range.min; family_size.max = pcafe->range.max; } int idx; double *factors[2] = { NULL, NULL }; pCafeNode child[2] = { (pCafeNode)((pTreeNode)pcnode)->children->head->data, (pCafeNode)((pTreeNode)pcnode)->children->tail->data }; for (k = 0; k < pcafe->k; k++) { lambda = pcnode->birth_death_probabilities.param_lambdas[k]; if (pcnode->birth_death_probabilities.param_mus) { mu = pcnode->birth_death_probabilities.param_mus[k]; } // for each child for (idx = 0; idx < 2; idx++) { factors[idx] = tree_factors[idx]; memset(factors[idx], 0, pcafe->size_of_factor * sizeof(double)); for (s = family_size.root_min, i = 0; s <= family_size.root_max; s++, i++) { for (int c = family_size.min, j = 0; c <= family_size.max; c++, j++) { factors[idx][i] += birthdeath_likelihood_with_s_c(s, c, child[idx]->super.branchlength, lambda, mu, NULL) * child[idx]->k_likelihoods[k][j]; } } } int size = family_size.root_max - family_size.root_min + 1; for (i = 0; i < size; i++) { pcnode->k_likelihoods[k][i] = factors[0][i] * factors[1][i]; } } memory_free(tree_factors[0]); memory_free(tree_factors[1]); } void compute_node_clustered_likelihood(pTree ptree, pTreeNode ptnode, va_list ap1) { va_list ap; va_copy(ap, ap1); struct chooseln_cache *ln_cache = va_arg(ap, struct chooseln_cache *); va_end(ap); pCafeTree pcafe = (pCafeTree)ptree; int maxFamilySize = MAX(pcafe->range.root_max, pcafe->range.max); if (!chooseln_is_init2(ln_cache)) { chooseln_cache_init2(ln_cache, maxFamilySize); } else if (get_chooseln_cache_size2(ln_cache) < maxFamilySize) { chooseln_cache_resize2(ln_cache, maxFamilySize); } if (tree_is_leaf(ptnode)) { initialize_leaf_likelihood_clustered(ptree, ptnode); } else { compute_internal_node_likelihood_clustered(ptree, ptnode); } } void __cafe_tree_node_compute_clustered_likelihood_using_cache(pTree ptree, pTreeNode ptnode, va_list ap1) { va_list ap; va_copy(ap, ap1); struct chooseln_cache *ln_cache = va_arg(ap, struct chooseln_cache *); va_end(ap); pCafeTree pcafe = (pCafeTree)ptree; pCafeNode pcnode = (pCafeNode)ptnode; double *tree_factors[2]; tree_factors[0] = memory_new(pcafe->size_of_factor, sizeof(double)); tree_factors[1] = memory_new(pcafe->size_of_factor, sizeof(double)); int size; int s, c, i, j, k; family_size_range family_size; double** bd = NULL; int maxFamilySize = MAX(pcafe->range.root_max, pcafe->range.max); if (!chooseln_is_init2(ln_cache)) { chooseln_cache_init2(ln_cache, maxFamilySize); } else if (get_chooseln_cache_size2(ln_cache) < maxFamilySize) { chooseln_cache_resize2(ln_cache, maxFamilySize); } if (tree_is_leaf(ptnode)) { if (tree_is_root(ptree, ptnode->parent)) { family_size.root_min = pcafe->range.root_min; family_size.root_max = pcafe->range.root_max; } else { family_size.root_min = pcafe->range.min; family_size.root_max = pcafe->range.max; } for (s = family_size.root_min, i = 0; s <= family_size.root_max; s++, i++) { // pcnode->likelihoods[i] = pcnode->bd[s][pcnode->familysize]; for (k = 0; k < pcafe->k; k++) { if (pcnode->familysize < 0) { //fprintf(stderr, "family size not set\n"); pcnode->k_likelihoods[k][i] = 1; } else { if (pcnode->errormodel) { memset((void*)pcnode->k_likelihoods[k], 0, pcafe->size_of_factor * sizeof(double)); for (j = 0; j<pcafe->size_of_factor; j++) { // conditional probability of measuring i=familysize when true count is j pcnode->k_likelihoods[k][j] = pcnode->errormodel->errormatrix[pcnode->familysize][j]; } } else { //bd = pcnode->k_bd->array[k]; //pcnode->k_likelihoods[k][i] = bd[s][pcnode->familysize]; memset((void*)pcnode->k_likelihoods[k], 0, pcafe->size_of_factor * sizeof(double)); pcnode->k_likelihoods[k][pcnode->familysize] = 1; } } } } } else { if (tree_is_root(ptree, ptnode)) { family_size.root_min = pcafe->range.root_min; family_size.root_max = pcafe->range.root_max; family_size.min = pcafe->range.min; family_size.max = pcafe->range.max; } else { family_size.root_min = pcafe->range.min; family_size.root_max = pcafe->range.max; family_size.min = pcafe->range.min; family_size.max = pcafe->range.max; } int idx; double *factors[2] = { NULL, NULL }; pCafeNode child[2] = { (pCafeNode)((pTreeNode)pcnode)->children->head->data, (pCafeNode)((pTreeNode)pcnode)->children->tail->data }; for (k = 0; k < pcafe->k; k++) { // for each child for (idx = 0; idx < 2; idx++) { { factors[idx] = tree_factors[idx]; memset(factors[idx], 0, pcafe->size_of_factor * sizeof(double)); bd = child[idx]->k_bd->array[k]; for (s = family_size.root_min, i = 0; s <= family_size.root_max; s++, i++) { for (c = family_size.min, j = 0; c <= family_size.max; c++, j++) { factors[idx][i] += bd[s][c] * child[idx]->k_likelihoods[k][j]; } } } } size = family_size.root_max - family_size.root_min + 1; for (i = 0; i < size; i++) { pcnode->k_likelihoods[k][i] = factors[0][i] * factors[1][i]; } } } memory_free(tree_factors[0]); memory_free(tree_factors[1]); } void cafe_tree_node_free_clustered_likelihoods(pCafeParam param) { int i; pArrayList nlist = param->pcafe->super.nlist; pTree tlambda = param->lambda_tree; if (tlambda == NULL) { for (i = 0; i < nlist->size; i++) { pCafeNode pcnode = (pCafeNode)nlist->array[i]; free_probabilities(&pcnode->birth_death_probabilities); if (pcnode->k_likelihoods) { memory_free(pcnode->k_likelihoods); pcnode->k_likelihoods = NULL; } if (pcnode->k_bd) { arraylist_free(pcnode->k_bd, NULL); pcnode->k_bd = NULL; } } } } double** cafe_tree_clustered_likelihood(pCafeTree pcafe, struct chooseln_cache *ln_cache) { if (probability_cache) { tree_traveral_postfix((pTree)pcafe, __cafe_tree_node_compute_clustered_likelihood_using_cache, ln_cache); } else { tree_traveral_postfix((pTree)pcafe, compute_node_clustered_likelihood, ln_cache); } return ((pCafeNode)pcafe->super.root)->k_likelihoods; }
sort.c
/* * sort.c * * @author: phdenzel * * Quicksort algorithms for DYAMA * * Parallel quicksort can cause bus error 10, because * some systems set the non-main stack size to < 1MB, * which is reached at N ~ 4096 * */ #include "sort.h" #include "world.h" void swapP(particle_t *p1, particle_t *p2) { // swap particle pointers particle_t temp; temp = *p1; *p1 = *p2; *p2 = temp; } void swap(universe *u, int i, int j) { // swap elements i and j in particles particle_t temp; temp = u->particles[i]; u->particles[i] = u->particles[j]; u->particles[j] = temp; } int xpartition(universe *u, int left, int right) { // partition particles from left to right in x double pivot = u->particles[left].r.x; int i = left; int j = right+1; while (1) { do ++i; while (u->particles[i].r.x <= pivot && i <= right); do --j; while (u->particles[j].r.x > pivot); if (i >= j) break; swap(u, i, j); } swap(u, left, j); return j; } int ypartition(universe *u, int left, int right) { // partition particles from left to right in y double pivot = u->particles[left].r.y; int i = left; int j = right+1; while (1) { do ++i; while (u->particles[i].r.y <= pivot && i <= right); do --j; while (u->particles[j].r.y > pivot); if (i >= j) break; swap(u, i, j); } swap(u, left, j); return j; } int zpartition(universe *u, int left, int right) { // partition particles from left to right in y double pivot = u->particles[left].r.z; int i = left; int j = right+1; while (1) { do ++i; while (u->particles[i].r.z <= pivot && i <= right); do --j; while (u->particles[j].r.z > pivot); if (i >= j) break; swap(u, i, j); } swap(u, left, j); return j; } void xQuickSort(universe *u, int left, int right) { // sort particles quickly though int j; if (left < right) { // divide and conquer j = xpartition(u, left, right); #pragma omp parallel sections { #pragma omp section { xQuickSort(u, left, j-1); } #pragma omp section { xQuickSort(u, j+1, right); } } } } void yQuickSort(universe *u, int left, int right) { // sort particles quickly though int j; if (left < right) { // divide and conquer j = ypartition(u, left, right); #pragma omp parallel sections { #pragma omp section { yQuickSort(u, left, j-1); } #pragma omp section { yQuickSort(u, j+1, right); } } } } void zQuickSort(universe *u, int left, int right) { // sort particles quickly though int j; if (left < right) { // divide and conquer j = zpartition(u, left, right); #pragma omp parallel sections { #pragma omp section { zQuickSort(u, left, j-1); } #pragma omp section { zQuickSort(u, j+1, right); } } } }
dcsc.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.6 -------------------------------------------------*/ /* date: 6/15/2017 ---------------------------------------------*/ /* authors: Ariful Azad, Aydin Buluc --------------------------*/ /****************************************************************/ /* Copyright (c) 2010-2017, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DCSC_H #define _DCSC_H #include <cstdlib> #include <vector> #include <limits> #include <cassert> #include "SpDefs.h" #include "SpHelper.h" #include "StackEntry.h" #include "MemoryPool.h" #include "promote.h" namespace combblas { template <class IT, class NT> class Dcsc { public: typedef NT value_type; typedef IT index_type; Dcsc (); Dcsc (IT nnz, IT nzcol); Dcsc (IT nnz, const std::vector<IT> & indices, bool isRow); //!< Create a logical matrix from (row/column) indices vector Dcsc (StackEntry<NT, std::pair<IT,IT> > * multstack, IT mdim, IT ndim, IT nnz); Dcsc (const Dcsc<IT,NT> & rhs); // copy constructor Dcsc<IT,NT> & operator=(const Dcsc<IT,NT> & rhs); // assignment operator Dcsc<IT,NT> & operator+=(const Dcsc<IT,NT> & rhs); // add and assign operator ~Dcsc(); bool operator==(const Dcsc<IT,NT> & rhs); template <typename NNT> operator Dcsc<IT,NNT>() const; //<! numeric type conversion template <typename NIT, typename NNT> operator Dcsc<NIT,NNT>() const; //<! index+numeric type conversion void EWiseMult(const Dcsc<IT,NT> & rhs, bool exclude); void SetDifference(const Dcsc<IT,NT> & rhs); //<! Aydin (June 2021): generalize this to any rhs NT type; as it isn't used anyway void EWiseScale(NT ** scaler); //<! scale elements of "this" with the elements dense rhs matrix template <typename IU, typename NU1, typename NU2> friend Dcsc<IU, typename promote_trait<NU1,NU2>::T_promote> EWiseMult(const Dcsc<IU,NU1> & A, const Dcsc<IU,NU2> * B, bool exclude); // Note that the second parameter is a POINTER template <typename IU, typename NU1, typename NU2> friend Dcsc<IU, typename promote_trait<NU1,NU2>::T_promote> SetDifference(const Dcsc<IU,NU1> & A, const Dcsc<IU,NU2> * B); // Note that the second parameter is a POINTER template <typename _UnaryOperation> void Apply(_UnaryOperation __unary_op) { //transform(numx, numx+nz, numx, __unary_op); #ifdef _OPENMP #pragma omp parallel for #endif for(IT i=0; i < nz; ++i) numx[i] = __unary_op(numx[i]); } template <typename _UnaryOperation, typename GlobalIT> Dcsc<IT,NT>* PruneI(_UnaryOperation __unary_op, bool inPlace, GlobalIT rowOffset, GlobalIT colOffset); template <typename _UnaryOperation> Dcsc<IT,NT>* Prune(_UnaryOperation __unary_op, bool inPlace); template <typename _BinaryOperation> Dcsc<IT,NT>* PruneColumn(NT* pvals, _BinaryOperation __binary_op, bool inPlace); template <typename _BinaryOperation> Dcsc<IT,NT>* PruneColumn(IT* pinds, NT* pvals, _BinaryOperation __binary_op, bool inPlace); IT AuxIndex(const IT colind, bool & found, IT * aux, IT csize) const; void RowSplit(int numsplits); void ColSplit(std::vector< Dcsc<IT,NT>* > & parts, std::vector<IT> & cuts); void ColConcatenate(std::vector< Dcsc<IT,NT>* > & parts, std::vector<IT> & offsets); void Split(Dcsc<IT,NT> * & A, Dcsc<IT,NT> * & B, IT cut); //! \todo{special case of ColSplit, to be deprecated...} void Merge(const Dcsc<IT,NT> * Adcsc, const Dcsc<IT,NT> * B, IT cut); //! \todo{special case of ColConcatenate, to be deprecated...} IT ConstructAux(IT ndim, IT * & aux) const; void Resize(IT nzcnew, IT nznew); template<class VT> void FillColInds(const VT * colnums, IT nind, std::vector< std::pair<IT,IT> > & colinds, IT * aux, IT csize) const; Dcsc<IT,NT> & AddAndAssign (StackEntry<NT, std::pair<IT,IT> > * multstack, IT mdim, IT ndim, IT nnz); template <typename _BinaryOperation> void UpdateDense(NT ** array, _BinaryOperation __binary_op) const; // update dense 2D array's entries with __binary_op using elements of "this" //! wrap object around pre-allocated arrays (possibly RDMA registered) Dcsc (IT * _cp, IT * _jc, IT * _ir, NT * _numx, IT _nz, IT _nzc, bool _memowned = true) : cp(_cp), jc(_jc), ir(_ir), numx(_numx), nz(_nz), nzc(_nzc), memowned(_memowned) {}; IT * cp; //!< The master array, size nzc+1 (keeps column pointers) IT * jc ; //!< col indices, size nzc IT * ir ; //!< row indices, size nz NT * numx; //!< generic values, size nz IT nz; IT nzc; //!< number of columns with at least one non-zero in them bool memowned; private: void getindices (StackEntry<NT, std::pair<IT,IT> > * multstack, IT & rindex, IT & cindex, IT & j, IT nnz); }; } #include "dcsc.cpp" #endif
util.c
/* This file contains functions for memory initialization and allocation and other helper functions. */ #include <stdio.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "mytypes.h" #include "util.h" #include "lecuyer.h" #include "pf3dbench.h" #ifdef OMP45_BUILD #include <cuda_runtime.h> #endif #include "runparm.h" #include "pf3dbenchvars.h" extern void *base_malloc(size_t nbytes); extern void *um_malloc(size_t nbytes); extern int ngrd; /* number of guard cells */ extern int nxa, nya, nza; /* allocated box dimensions */ extern long nthreads; int nthr_reported= 0; extern char RUN_TYPE[100]; static int real_report=0; static int complex_report=0; long errcnt= 0; size_t totalloc= 0; double *dtmp= 0; long dtmp_size= 0; double tpi= 6.28318530717959; real *tmp_ramp=0; int nthr_kern; void init_random(int flag) { if(flag) { use_random= 1; if(mp_rank == 0 || mp_rank == 1) puts("Will initialize using random numbers"); } else { use_random= 0; if(mp_rank == 0 || mp_rank == 1) puts("Will initialize using a linear span of the range"); } } void parse_args(int argc, char **argv) { /* Set the size of the domain, the number of threads, etc. using command line arguments, if supplied. */ nxl= 512; nyl= 384; nzl= 24; num_thr= 4; strcpy(RUN_TYPE, ""); /* Command line arguments take precedence in setting nx, ny, nz */ if(argc > 1) { /* have at least nxl */ sscanf(argv[1], "%d", &nxl); if(argc > 2) { /* have nyl */ sscanf(argv[2], "%d", &nyl); if(argc > 3) { /* have nzl */ sscanf(argv[3], "%d", &nzl); if(argc > 4) { /* have numthread */ sscanf(argv[4], "%d", &num_thr); if(mp_rank == 0) printf("requested %d OMP threads\n", num_thr); if(argc > 5) { /* have flag for random initialization */ sscanf(argv[5], "%d", &use_random); if(argc > 6) { /* have flag for random initialization */ sscanf(argv[6], "%s", RUN_TYPE); } } } } } } if(mp_rank == 0) printf("The RUN_TYPE is %s\n", RUN_TYPE); if(use_random) { if(mp_rank == 0) printf("Will initialize using random numbers\n"); } else { if(mp_rank == 0) printf("Will initialize using strided values\n"); } } void c_print(rcomplex v) { sprintf(cbuf, "%e + I*%e", creal(v), cimag(v)); } long cache_adjust(long size) { /* adjust size upward (if necessary) so that it is an integral multiple of a cache line */ long newsize; newsize= (size+CACHE_LINE-1)/CACHE_LINE; newsize *= CACHE_LINE; return newsize; } void *base_malloc(size_t nbytes) { void *ptr; totalloc += nbytes; /* printf("Total bytes allocated so far %ld\n", totalloc); */ ptr= malloc(nbytes); return ptr; } void *um_malloc(size_t nbytes) { void *ptr; totalloc += nbytes; /* printf("Total bytes allocated so far %ld\n", totalloc); */ #ifdef OMP45_BUILD cudaMallocManaged(&ptr, nbytes, 0); #else ptr= malloc(nbytes); #endif return ptr; } /* this function randomly initializes an integer variable with one a number from lo to hi, inclusive.*/ int *init_int(long n, int lo, int hi) { int ii; int *var; real delta; real rlo, rhi, rvar; var= (int *)wmalloc(sizeof(int)*n); if(dtmp_size < sizeof(double)*n) { if(dtmp) free(dtmp); dtmp_size= sizeof(double)*n; dtmp= (double *)wmalloc(dtmp_size); } le_nrandom(0, n, dtmp); rlo= lo-0.5; rhi= hi+0.5; #ifdef _OPENMP #pragma omp for private(ii) #endif for(ii= 0; ii < n; ii++) { rvar= rlo+(rhi-rlo+1)*dtmp[ii]+0.5; var[ii]= (int)rvar; if(var[ii] > hi) var[ii]= hi; if(var[ii] < lo) var[ii]= lo; } return var; } /* This function creates a real variable, but does not initialize it. */ real *make_real(long n) { real *var; var= (real *)wmalloc(sizeof(real)*n); return var; } void linear_real(int num, real *var, real low, real high) { int i; real delta; delta= (high-low)/(num-1.0); for(i= 0; i < num; i++) { var[i]= low+delta*i; } } void linear_rcomp(int num, rcomplex *var, rcomplex low, rcomplex high) { int i; rcomplex delta; delta= (high-low)/(num-1.0); for(i= 0; i < num; i++) { var[i]= low+delta*i; } } /* this function initializes a variable with uniformly distributed random values between specified lo and hi values.*/ real *init_real(long n, double lo, double hi) { int ii; real *var; var= (real *)wmalloc(sizeof(real)*n); real_set(n, lo, hi, var); return var; } /* this function initializes a variable with uniformly distributed random values between specified lo and hi values.*/ real *init_real_ramp(int numx, int numy, int numz, double lo, double hi) { int ii; real *var; /* Assume init_real_ramp is only used for 3D arrays and allocate space for guard zones on all sides (often not needed) */ var= (real *)wmalloc(sizeof(real)*ngtot); real_set_ramp(numx, numy, numz, lo, hi, var); return var; } void real_set(long n, double lo, double hi, real *var) { int ii; real delta; if(use_random) { if(dtmp_size < sizeof(double)*n) { if(dtmp) free(dtmp); dtmp_size= sizeof(double)*n; dtmp= (double *)wmalloc(dtmp_size); } le_nrandom(0, n, dtmp); #ifdef _OPENMP #pragma omp for private(ii) #endif for(ii= 0; ii < n; ii++) { var[ii]= lo+(hi-lo)*dtmp[ii]; } } else { delta= (hi-lo)/n; if(real_report) { puts("using scaled complex init"); puts("using strided initialization"); real_report= 0; } #ifdef _OPENMP #pragma omp for private(ii) #endif for(ii= 0; ii < n; ii++) { var[ii]= lo+delta*ii; } } } void real_set_ramp(int numx, int numy, int numz, double lo, double hi, real *var) { int ii, jj, kk, tmp_size, ndx; real modulus, phase, ampl, rampval; if(!tmp_ramp) { tmp_size= sizeof(real)*nzl; tmp_ramp= (real *)wmalloc(tmp_size); } /* create a 1D ramp from lo to hi */ linear_real(numz, tmp_ramp, lo, hi); ampl= 0.1; /* The base value for var(ii,jj,kk) is tmp_ramp(kk). dtmp contains random numbers between zero and one. Use them to add fluctuations to the "ramp value". */ #ifdef _OPENMP #pragma omp for private(ii) #endif for(kk= 0; kk < numz; kk++) { rampval= tmp_ramp[kk]; for(jj= 0; jj < numy; jj++) { for(ii= 0; ii < numx; ii++) { ndx= ii+numx*jj+numx*numy*kk; var[ndx]= (1.0 + ampl*(1.0-2.0*le_random(0)) )*rampval; } } } } rcomplex *init_complex(long n, double lo, double hi) { int ii; rcomplex *var; real *dat; dat= init_real(2*n, 0.0, 1.0); var= (rcomplex *)dat; complex_set(n, lo, hi, var); return var; } rcomplex *init_complex_ramp(int numx, int numy, int numz, double lo, double hi) { int ii; rcomplex *var; real *dat; /* Assume init_complex_ramp is only used for 3D arrays and allow space for guard zones on all sides (often not needed) */ dat= init_real(2*ngtot, 0.0, 1.0); var= (rcomplex *)dat; complex_set_ramp(numx, numy, numz, lo, hi, var); return var; } void complex_set(long n, double lo, double hi, rcomplex *var) { int ii; real modulus, phase; #ifdef USE_MODULUS /* Treat a pair of values as the modulus and phase for a complex number. */ #ifdef _OPENMP #pragma omp for private(ii) #endif for(ii= 0; ii < n; ii++) { modulus= CREAL(var[ii]); phase= tpi*CIMAG(var[ii]); var[ii]= (lo+(hi-lo)*modulus)*(COS(phase)+IREAL*SIN(phase)); } #else if(complex_report) { puts("using scaled complex init"); complex_report= 0; } /* Treat a pair of values as a point in a unit complex square. Scale to the requested range. */ #ifdef _OPENMP #pragma omp for private(ii) #endif for(ii= 0; ii < n; ii++) { var[ii]= lo+(hi-lo)*var[ii]; } #endif } void complex_set_ramp(int numx, int numy, int numz, double lo, double hi, rcomplex *var) { int ii, jj, kk, tmp_size, ndx; real modulus, phase, ampl, rampval; if(!tmp_ramp) { tmp_size= sizeof(real)*nzl; tmp_ramp= (real *)wmalloc(tmp_size); } /* create a 1D ramp from lo to hi */ linear_real(numz, tmp_ramp, lo, hi); ampl= 0.02; /* On entry, var contains random numbers between zero and one. Treat the first one as a multiplier for the "ramp value" in tmp and the second as a random angle. */ #ifdef _OPENMP #pragma omp for private(ii) #endif for(kk= 0; kk < numz; kk++) { rampval= tmp_ramp[kk]; for(jj= 0; jj < numy; jj++) { for(ii= 0; ii < numx; ii++) { ndx= ii+numx*jj+numx*numy*kk; modulus= ( 1.0+ampl*(1.0-2.0*le_random(0)) )*rampval; phase= tpi*le_random(0); var[ndx]= modulus*(COS(phase)+IREAL*SIN(phase)); } } } } void kernel_get_time(int nthr, double *tstart, double *tstop) { int i, nhi; nhi= nthr; if(nhi > MAX_NTHREAD) nhi= MAX_NTHREAD; for(i= 0; i < nhi; i++) { tstart[i]= startWallTime[i]; tstop[i]= endWallTime[i]; } } int kernel_get_nthr(void) { /* printf("nthr_kern=%d\n", nthr_kern); */ return nthr_kern; } void start_omp_time(void) { int threadID; #ifdef _OPENMP #pragma omp parallel shared(startWallTime, nthr_kern) private(threadID) { threadID = omp_get_thread_num(); #pragma omp single { nthr_kern = omp_get_num_threads(); } if(!nthr_reported && threadID == 0) { if(mp_rank == 0) printf("number of threads per MPI process is %d\n", nthr_kern); nthr_reported= nthr_kern; } startWallTime[threadID] = omp_get_wtime(); } #else threadID = 0; startWallTime[threadID] = 0.0; #endif } void stop_omp_time(void) { int threadID; #ifdef _OPENMP #pragma omp parallel shared(startWallTime,endWallTime,totalWallTime) private(threadID) { threadID = omp_get_thread_num(); endWallTime[threadID] = omp_get_wtime(); totalWallTime[threadID] = endWallTime[threadID] - startWallTime[threadID]; #ifdef FULLDBG printf("thread %d took time %e\n", threadID, totalWallTime[threadID]); #endif } #else threadID = 0; endWallTime[threadID] = 0.0; totalWallTime[threadID] = 0.0; #endif }
GB_unaryop__identity_uint8_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_fp64 // op(A') function: GB_tran__identity_uint8_fp64 // C type: uint8_t // A type: double // cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_fp64 ( uint8_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Mrpt.h
#ifndef CPP_MRPT_H_ #define CPP_MRPT_H_ #include <algorithm> #include <cmath> #include <functional> #include <map> #include <numeric> #include <random> #include <set> #include <stdexcept> #include <string> #include <utility> #include <vector> #include <Eigen/Dense> #include <Eigen/SparseCore> struct Mrpt_Parameters { int n_trees = 0; /**< Number of trees in the index. */ int depth = 0; /**< Depth of the trees in the index. */ int k = 0; /**< Number of nearest neighbors searched for (if the index is autotuned; otherwise 0). */ int votes = 0; /**< Optimal vote threshold (if the index is autotuned and the target recall is set; otherwise 0). */ double estimated_qtime = 0.0; /**< Estimated query time (if the index is autotuned and the target recall is set; otherwise 0.0). */ double estimated_recall = 0.0; /**< Estimated recall (if the index is autotuned and the target recall is set; otherwise 0.0). */ }; class Mrpt { public: /** @name Constructors * The constructor does not actually build the index. The building is done * by the function grow() which has to be called before queries can be made. * There are two different versions of the constructor which differ only * by the type of the input data. The first version takes the data set * as `Ref` to `MatrixXf`, which means that the argument * can be either `MatrixXf` or `Map<MatrixXf>` (also certain blocks of `MatrixXf` * may be accepted, see [Eigen::Ref](https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html) * for more information). The second version takes a float * pointer to an array containing the data set, and the dimension and * the sample size of the data. There are also corresponding versions * of all the member functions which take input data. In all cases the data * is assumed to be stored in column-major order such that each data point * is stored contiguously in memory. In all cases no copies are made of * the original data matrix. */ /** * @param X_ Eigen ref to the data set, stored as one data point per column */ Mrpt(const Eigen::Ref<const Eigen::MatrixXf> &X_) : X(Eigen::Map<const Eigen::MatrixXf>(X_.data(), X_.rows(), X_.cols())), n_samples(X_.cols()), dim(X_.rows()) {} /** * @param X_ a float array containing the data set with each data point * stored contiguously in memory * @param dim_ dimension of the data * @param n_samples_ number of data points */ Mrpt(const float *X_, int dim_, int n_samples_) : X(Eigen::Map<const Eigen::MatrixXf>(X_, dim_, n_samples_)), n_samples(n_samples_), dim(dim_) {} /**@}*/ /** @name Normal index building. * Build a normal (not autotuned) index. */ /** * Build a normal index. * * @param n_trees_ number of trees to be grown * @param depth_ depth of the trees; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ is the number * of data points * @param density_ expected proportion of non-zero components in the * random vectors; on the interval \f$(0,1]\f$; default value sets density to * \f$ 1 / \sqrt{d} \f$, where \f$d\f$ is the dimension of the data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device */ void grow(int n_trees_, int depth_, float density_ = -1.0, int seed = 0) { if (!empty()) { throw std::logic_error("The index has already been grown."); } if (n_trees_ <= 0) { throw std::out_of_range("The number of trees must be positive."); } if (depth_ <= 0 || depth_ > std::log2(n_samples)) { throw std::out_of_range("The depth must belong to the set {1, ... , log2(n)}."); } if (density_ < -1.0001 || density_ > 1.0001 || (density_ > -0.9999 && density_ < -0.0001)) { throw std::out_of_range("The density must be on the interval (0,1]."); } n_trees = n_trees_; depth = depth_; n_pool = n_trees_ * depth_; n_array = 1 << (depth_ + 1); if (density_ < 0) { density = 1.0 / std::sqrt(dim); } else { density = density_; } density < 1 ? build_sparse_random_matrix(sparse_random_matrix, n_pool, dim, density, seed) : build_dense_random_matrix(dense_random_matrix, n_pool, dim, seed); split_points = Eigen::MatrixXf(n_array, n_trees); tree_leaves = std::vector<std::vector<int>>(n_trees); count_first_leaf_indices_all(leaf_first_indices_all, n_samples, depth); leaf_first_indices = leaf_first_indices_all[depth]; #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { Eigen::MatrixXf tree_projections; if (density < 1) tree_projections.noalias() = sparse_random_matrix.middleRows(n_tree * depth, depth) * X; else tree_projections.noalias() = dense_random_matrix.middleRows(n_tree * depth, depth) * X; tree_leaves[n_tree] = std::vector<int>(n_samples); std::vector<int> &indices = tree_leaves[n_tree]; std::iota(indices.begin(), indices.end(), 0); grow_subtree(indices.begin(), indices.end(), 0, 0, n_tree, tree_projections); } } /**@}*/ /** @name Autotuned index building * Builds an index by autotuning such that the parameters giving the fastest * query time at the target recall level are found. If the target recall level * is not reached at all, then an index giving the highest recall level * is built. The parameters() function can be used to retrieve these optimal * parameter values and the estimated query time and the estimated recall. * There is a version which uses a separate set of test queries (`grow`), * and a version which samples a test set from the data set (`grow_autotune`). */ /** * Build an autotuned index. * * @param target_recall target recall level; on the range [0,1] * @param Q Eigen ref to the the test queries (col = data point, row = dimension). * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max maximum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$ * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, * \mathrm{min}(10, \mathrm{trees\_max})) \f$ * @param density expected proportion of non-zero components in the random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device */ void grow(double target_recall, const Eigen::Ref<const Eigen::MatrixXf> &Q, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density = -1.0, int seed = 0) { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } grow(Q, k_, trees_max, depth_max, depth_min_, votes_max_, density, seed); prune(target_recall); } /** Build an autotuned index. * * @param target_recall target recall level; on the range [0,1] * @param Q float array containing the test queries * @param n_test number of test queries * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max maximum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$ * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, * \mathrm{min}(10, \mathrm{trees\_max})) \f$ * @param density expected proportion of non-zero components in the random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device * @param indices_test parameter used by the version which uses no * separate test set, leave empty. */ void grow(double target_recall, const float *Q, int n_test, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density = -1.0, int seed = 0, const std::vector<int> &indices_test = {}) { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } grow(Q, n_test, k_, trees_max, depth_max, depth_min_, votes_max_, density, seed, indices_test); prune(target_recall); } /** Build an autotuned index sampling test queries from the training set. * * @param target_recall target recall level; on the range [0,1] * @param n_test number of test queries * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max maximum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$ * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, * \mathrm{min}(10, \mathrm{trees\_max})) \f$ * @param density_ expected proportion of non-zero components in the random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device * @param n_test number of test queries sampled from the training set. */ void grow_autotune(double target_recall, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0, int n_test = 100) { if (n_test < 1) { throw std::out_of_range("Test set size must be > 0."); } n_test = n_test > n_samples ? n_samples : n_test; std::vector<int> indices_test(sample_indices(n_test, seed)); const Eigen::MatrixXf Q(subset(indices_test)); grow(target_recall, Q.data(), Q.cols(), k_, trees_max, depth_max, depth_min_, votes_max_, density_, seed, indices_test); } /** * Get the optimal parameters and the estimated recall and query time found * by autotuning. If the index is autotuned without preset recall level, * `estimated_recall`, `estimated_qtime` and `votes` are set to their * default value 0, and `n_trees` and `depth` are set to `trees_max` and * `depth_max, respectively. If the index is not autotuned, * `estimated_recall`, `estimated_qtime`, `votes` and `k` are all set to * their default value 0. * * @return parameters of the index */ Mrpt_Parameters parameters() const { if (index_type == normal || index_type == autotuned_unpruned) { Mrpt_Parameters p; p.n_trees = n_trees; p.depth = depth; p.k = par.k; return p; } return par; } /** * Get whether the index has been autotuned. * * @return true if the index has been autotuned, false otherwise. */ bool is_autotuned() const { return index_type == autotuned; } /**@}*/ /** @name Autotuned index building without preset recall level * Build an autotuned index. This version does not require prespecifying * a target recall level, but an index generated by this function can be used * to subset different indices with different recall levels. This is done by * subset(). The function optimal_parameters() can be used to retrieve a * pareto frontier of optimal parameters. There is a version which uses a * separate set of test queries (`grow`), and a version which samples a * test set from the data set (`grow_autotune`). */ /**@{*/ /** Build an autotuned index without prespecifying a recall level. * * @param data a float array containing the test queries. * @param n_test number of test queries * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max maximum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$ * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, * \mathrm{min}(10, \mathrm{trees\_max})) \f$ * @param density_ expected proportion of non-zero components in the random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device * @param indices_test parameter used by the version which uses no * separate test set, leave empty. **/ void grow(const float *data, int n_test, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0, const std::vector<int> &indices_test = {}) { if (trees_max == - 1) { trees_max = std::min(std::sqrt(n_samples), 1000.0); } if (depth_min_ == -1) { depth_min_ = std::max(static_cast<int>(std::log2(n_samples) - 11), 5); } if (depth_max == -1) { depth_max = std::max(static_cast<int>(std::log2(n_samples) - 4), depth_min_); } if (votes_max_ == -1) { votes_max_ = std::max(trees_max / 10, std::min(trees_max, 10)); } if (density_ > -1.0001 && density_ < -0.9999) { density_ = 1.0 / std::sqrt(dim); } if (!empty()) { throw std::logic_error("The index has already been grown."); } if (k_ <= 0 || k_ > n_samples) { throw std::out_of_range("k_ must belong to the set {1, ..., n}."); } if (trees_max <= 0) { throw std::out_of_range("trees_max must be positive."); } if (depth_max <= 0 || depth_max > std::log2(n_samples)) { throw std::out_of_range("depth_max must belong to the set {1, ... , log2(n)}."); } if (depth_min_ <= 0 || depth_min_ > depth_max) { throw std::out_of_range("depth_min_ must belong to the set {1, ... , depth_max}"); } if (votes_max_ <= 0 || votes_max_ > trees_max) { throw std::out_of_range("votes_max_ must belong to the set {1, ... , trees_max}."); } if (density_ < 0.0 || density_ > 1.0001) { throw std::out_of_range("The density must be on the interval (0,1]."); } if(n_samples < 101) { throw std::out_of_range("Sample size must be at least 101 to autotune an index."); } depth_min = depth_min_; votes_max = votes_max_; k = k_; const Eigen::Map<const Eigen::MatrixXf> Q(data, dim, n_test); grow(trees_max, depth_max, density_, seed); Eigen::MatrixXi exact(k, n_test); compute_exact(Q, exact, indices_test); std::vector<Eigen::MatrixXd> recalls(depth_max - depth_min + 1); cs_sizes = std::vector<Eigen::MatrixXd>(depth_max - depth_min + 1); for (int d = depth_min; d <= depth_max; ++d) { recalls[d - depth_min] = Eigen::MatrixXd::Zero(votes_max, trees_max); cs_sizes[d - depth_min] = Eigen::MatrixXd::Zero(votes_max, trees_max); } for (int i = 0; i < n_test; ++i) { std::vector<Eigen::MatrixXd> recall_tmp(depth_max - depth_min + 1); std::vector<Eigen::MatrixXd> cs_size_tmp(depth_max - depth_min + 1); count_elected(Q.col(i), Eigen::Map<Eigen::VectorXi>(exact.data() + i * k, k), votes_max, recall_tmp, cs_size_tmp); for (int d = depth_min; d <= depth_max; ++d) { recalls[d - depth_min] += recall_tmp[d - depth_min]; cs_sizes[d - depth_min] += cs_size_tmp[d - depth_min]; } } for (int d = depth_min; d <= depth_max; ++d) { recalls[d - depth_min] /= (k * n_test); cs_sizes[d - depth_min] /= n_test; } fit_times(Q); std::set<Mrpt_Parameters,decltype(is_faster)*> pars = list_parameters(recalls); opt_pars = pareto_frontier(pars); index_type = autotuned_unpruned; par.k = k_; } /** Build an autotuned index without prespecifying a recall level. * * @param Q Eigen ref to the test queries. * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max depth of trees grown; ; on the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters on the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$ * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, * \mathrm{min}(10, \mathrm{trees\_max})) \f$ * @param density_ expected proportion of non-zero components of random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device */ void grow(const Eigen::Ref<const Eigen::MatrixXf> &Q, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0) { if (Q.rows() != dim) { throw std::invalid_argument("Dimensions of the data and the validation set do not match."); } grow(Q.data(), Q.cols(), k_, trees_max, depth_max, depth_min_, votes_max_, density_, seed); } /** Build an autotuned index sampling test queries from the training set * and without prespecifying a recall level. * * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max depth of trees grown; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters on the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$ * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, * \mathrm{min}(10, \mathrm{trees\_max})) \f$ * @param density_ expected proportion of non-zero components of random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device * @param n_test number of test queries sampled from the training set. */ void grow_autotune(int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0, int n_test = 100) { if (n_test < 1) { throw std::out_of_range("Test set size must be > 0."); } n_test = n_test > n_samples ? n_samples : n_test; std::vector<int> indices_test(sample_indices(n_test, seed)); const Eigen::MatrixXf Q(subset(indices_test)); grow(Q.data(), Q.cols(), k_, trees_max, depth_max, depth_min_, votes_max_, density_, seed, indices_test); } /** Create a new index by copying trees from an autotuned index grown * without a prespecified recall level. The index is created so that * it gives a fastest query time at the recall level given as the parameter. * If this recall level is not met, then it creates an index with a * highest possible recall level. * * @param target_recall target recall level; on the range [0,1] * @return an autotuned Mrpt index with a recall level at least as high as * target_recall */ Mrpt subset(double target_recall) const { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } Mrpt index2(X); index2.par = parameters(target_recall); int depth_max = depth; index2.n_trees = index2.par.n_trees; index2.depth = index2.par.depth; index2.votes = index2.par.votes; index2.n_pool = index2.depth * index2.n_trees; index2.n_array = 1 << (index2.depth + 1); index2.tree_leaves.assign(tree_leaves.begin(), tree_leaves.begin() + index2.n_trees); index2.leaf_first_indices_all = leaf_first_indices_all; index2.density = density; index2.k = k; index2.split_points = split_points.topLeftCorner(index2.n_array, index2.n_trees); index2.leaf_first_indices = leaf_first_indices_all[index2.depth]; if (index2.density < 1) { index2.sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(index2.n_pool, index2.dim); for (int n_tree = 0; n_tree < index2.n_trees; ++n_tree) index2.sparse_random_matrix.middleRows(n_tree * index2.depth, index2.depth) = sparse_random_matrix.middleRows(n_tree * depth_max, index2.depth); } else { index2.dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(index2.n_pool, index2.dim); for (int n_tree = 0; n_tree < index2.n_trees; ++n_tree) index2.dense_random_matrix.middleRows(n_tree * index2.depth, index2.depth) = dense_random_matrix.middleRows(n_tree * depth_max, index2.depth); } index2.index_type = autotuned; return index2; } /** Create a new index by copying trees from an autotuned index grown * without a prespecified recall level. The index is created so that * it gives a fastest query time at the recall level given as the parameter. * If this recall level is not met, then it creates an index with a * highest possible recall level. This function differs from subset() only * by the return value. * * @param target_recall target recall level; on the range [0,1] * @return pointer to a dynamically allocated autotuned Mrpt index with * a recall level at least as high as target_recall */ Mrpt *subset_pointer(double target_recall) const { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } Mrpt *index2 = new Mrpt(X); index2->par = parameters(target_recall); int depth_max = depth; index2->n_trees = index2->par.n_trees; index2->depth = index2->par.depth; index2->votes = index2->par.votes; index2->n_pool = index2->depth * index2->n_trees; index2->n_array = 1 << (index2->depth + 1); index2->tree_leaves.assign(tree_leaves.begin(), tree_leaves.begin() + index2->n_trees); index2->leaf_first_indices_all = leaf_first_indices_all; index2->density = density; index2->k = k; index2->split_points = split_points.topLeftCorner(index2->n_array, index2->n_trees); index2->leaf_first_indices = leaf_first_indices_all[index2->depth]; if (index2->density < 1) { index2->sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(index2->n_pool, index2->dim); for (int n_tree = 0; n_tree < index2->n_trees; ++n_tree) index2->sparse_random_matrix.middleRows(n_tree * index2->depth, index2->depth) = sparse_random_matrix.middleRows(n_tree * depth_max, index2->depth); } else { index2->dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(index2->n_pool, index2->dim); for (int n_tree = 0; n_tree < index2->n_trees; ++n_tree) index2->dense_random_matrix.middleRows(n_tree * index2->depth, index2->depth) = dense_random_matrix.middleRows(n_tree * depth_max, index2->depth); } index2->index_type = autotuned; return index2; } /** * Return the pareto frontier of optimal parameters for an index which * is autotuned without setting a recall level. This means that each * parameter combination in a returned vector is optimal in a sense * that it is a fastest (measured by query time) parameter combination * to obtain as least as high recall level that it has. * * @return vector of optimal parameters */ std::vector<Mrpt_Parameters> optimal_parameters() const { if (index_type == normal) { throw std::logic_error("The list of optimal parameters cannot be retrieved for the non-autotuned index."); } if (index_type == autotuned) { throw std::logic_error("The list of optimal parameters cannot be retrieved for the index which has already been subsetted or deleted to the target recall level."); } std::vector<Mrpt_Parameters> new_pars; std::copy(opt_pars.begin(), opt_pars.end(), std::back_inserter(new_pars)); return new_pars; } /**@}*/ /** @name Approximate k-nn search * A query using a non-autotuned index. Finds k approximate nearest neighbors * from a data set X for a query point q. Because the index is not autotuned, * k and vote threshold are set manually. The indices of k nearest neighbors * are written to a buffer out, which has to be preallocated to have at least * length k. Optionally also Euclidean distances to these k nearest points * are written to a buffer out_distances. If there are less than k points in * the candidate set, -1 is written to the remaining locations of the * output buffers. */ /** * Approximate k-nn search using a normal index. * * @param data pointer to an array containing the query point * @param k number of nearest neighbors searched for * @param vote_threshold - number of votes required for a query point to be included in the candidate set * @param out output buffer (size = k) for the indices of k approximate nearest neighbors * @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors * @param out_n_elected optional output parameter (size = 1) for the candidate set size */ void query(const float *data, int k, int vote_threshold, int *out, float *out_distances = nullptr, int *out_n_elected = nullptr) const { if (k <= 0 || k > n_samples) { throw std::out_of_range("k must belong to the set {1, ..., n}."); } if (vote_threshold <= 0 || vote_threshold > n_trees) { throw std::out_of_range("vote_threshold must belong to the set {1, ... , n_trees}."); } if (empty()) { throw std::logic_error("The index must be built before making queries."); } const Eigen::Map<const Eigen::VectorXf> q(data, dim); Eigen::VectorXf projected_query(n_pool); if (density < 1) projected_query.noalias() = sparse_random_matrix * q; else projected_query.noalias() = dense_random_matrix * q; std::vector<int> found_leaves(n_trees); /* * The following loops over all trees, and routes the query to exactly one * leaf in each. */ #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int idx_tree = 0; for (int d = 0; d < depth; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; } else { idx_tree = idx_right; } } found_leaves[n_tree] = idx_tree - (1 << depth) + 1; } int n_elected = 0, max_leaf_size = n_samples / (1 << depth) + 1; Eigen::VectorXi elected(n_trees * max_leaf_size); Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int leaf_begin = leaf_first_indices[found_leaves[n_tree]]; int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1]; const std::vector<int> &indices = tree_leaves[n_tree]; for (int i = leaf_begin; i < leaf_end; ++i) { int idx = indices[i]; if (++votes(idx) == vote_threshold) elected(n_elected++) = idx; } } if (out_n_elected) { *out_n_elected = n_elected; } exact_knn(q, k, elected, n_elected, out, out_distances); } /** * Approximate k-nn search using a normal index. * * @param q Eigen ref to the query point * @param k number of nearest neighbors searched for * @param vote_threshold number of votes required for a query point to be included in the candidate set * @param out output buffer (size = k) for the indices of k approximate nearest neighbors * @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors * @param out_n_elected optional output parameter (size = 1) for the candidate set size */ void query(const Eigen::Ref<const Eigen::VectorXf> &q, int k, int vote_threshold, int *out, float *out_distances = nullptr, int *out_n_elected = nullptr) const { query(q.data(), k, vote_threshold, out, out_distances, out_n_elected); } /**@}*/ /** @name Approximate k-nn search using autotuned index * Approximate k-nn search using an autotuned index. Finds k approximate * nearest neighbors from a data set X for a query point q. Because the index * is autotuned, no parameters other than a query point and an output are * required: k is preset, and the optimal vote count is used automatically. * The indices of k nearest neighbors are written to a buffer out, which has * to be preallocated to have at least length k. Optionally also the Euclidean * distances to these k nearest points are written to a buffer * out_distances. If there are less than k points in the candidate set, * -1 is written to the remaining locations of the output buffers. */ /** * Approximate k-nn search using an autotuned index. * * @param q pointer to an array containing the query point * @param out output buffer (size = k) for the indices of k approximate nearest neighbors * @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors * @param out_n_elected optional output parameter (size = 1) for the candidate set size */ void query(const float *q, int *out, float *out_distances = nullptr, int *out_n_elected = nullptr) const { if (index_type == normal) { throw std::logic_error("The index is not autotuned: k and vote threshold has to be specified."); } if (index_type == autotuned_unpruned) { throw std::logic_error("The target recall level has to be set before making queries."); } query(q, k, votes, out, out_distances, out_n_elected); } /** * Approximate k-nn search using an autotuned index. * * @param q Eigen ref to the query point * @param out output buffer (size = k) for the indices of k approximate nearest neighbors * @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors * @param out_n_elected optional output parameter (size = 1) for the candidate set size */ void query(const Eigen::Ref<const Eigen::VectorXf> &q, int *out, float *out_distances = nullptr, int *out_n_elected = nullptr) const { query(q.data(), out, out_distances, out_n_elected); } /**@}*/ /** @name Exact k-nn search * Functions for fast exact k-nn search: find k nearest neighbors for a * query point q from a data set X_. The indices of k nearest neighbors are * written to a buffer out, which has to be preallocated to have at least * length k. Optionally also the Euclidean distances to these k nearest points * are written to a buffer out_distances. There are both static and member * versions. */ /** * @param q_data pointer to an array containing the query point * @param X_data pointer to an array containing the data set * @param dim dimension of data * @param n_samples number of points in a data set * @param k number of neighbors searched for * @param out output buffer (size = k) for the indices of k nearest neighbors * @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors */ static void exact_knn(const float *q_data, const float *X_data, int dim, int n_samples, int k, int *out, float *out_distances = nullptr) { const Eigen::Map<const Eigen::MatrixXf> X(X_data, dim, n_samples); const Eigen::Map<const Eigen::VectorXf> q(q_data, dim); if (k < 1 || k > n_samples) { throw std::out_of_range("k must be positive and no greater than the sample size of data X."); } Eigen::VectorXf distances(n_samples); #pragma omp parallel for for (int i = 0; i < n_samples; ++i) distances(i) = (X.col(i) - q).squaredNorm(); if (k == 1) { Eigen::MatrixXf::Index index; distances.minCoeff(&index); out[0] = index; if (out_distances) out_distances[0] = std::sqrt(distances(index)); return; } Eigen::VectorXi idx(n_samples); std::iota(idx.data(), idx.data() + n_samples, 0); std::partial_sort(idx.data(), idx.data() + k, idx.data() + n_samples, [&distances](int i1, int i2) { return distances(i1) < distances(i2); }); for (int i = 0; i < k; ++i) out[i] = idx(i); if (out_distances) { for (int i = 0; i < k; ++i) out_distances[i] = std::sqrt(distances(idx(i))); } } /** * @param q Eigen ref to a query point * @param X Eigen ref to a data set * @param k number of neighbors searched for * @param out output buffer (size = k) for the indices of k nearest neighbors * @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors */ static void exact_knn(const Eigen::Ref<const Eigen::VectorXf> &q, const Eigen::Ref<const Eigen::MatrixXf> &X, int k, int *out, float *out_distances = nullptr) { Mrpt::exact_knn(q.data(), X.data(), X.rows(), X.cols(), k, out, out_distances); } /** * @param q pointer to an array containing the query point * @param k number of neighbors searched for * @param out output buffer (size = k) for the indices of k nearest neighbors * @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors */ void exact_knn(const float *q, int k, int *out, float *out_distances = nullptr) const { Mrpt::exact_knn(q, X.data(), dim, n_samples, k, out, out_distances); } /** * @param q pointer to an array containing the query point * @param k number of points searched for * @param out output buffer (size = k) for the indices of k nearest neighbors * @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors */ void exact_knn(const Eigen::Ref<const Eigen::VectorXf> &q, int k, int *out, float *out_distances = nullptr) const { Mrpt::exact_knn(q.data(), X.data(), dim, n_samples, k, out, out_distances); } /**@}*/ /** @name Utility functions * Saving and loading an index and checking if it is already constructed. * Saving and loading work for both autotuned and non-autotuned indices, and * load() retrieves also the optimal parameters found by autotuning. * The same data set used to build a saved index has to be used to * construct the index into which it is loaded. */ /** * Saves the index to a file. * * @param path - filepath to the output file. * @return true if saving succeeded, false otherwise. */ bool save(const char *path) const { FILE *fd; if ((fd = fopen(path, "wb")) == NULL) return false; int i = index_type; fwrite(&i, sizeof(int), 1, fd); if (index_type == 2) { write_parameter_list(opt_pars, fd); } write_parameters(&par, fd); fwrite(&n_trees, sizeof(int), 1, fd); fwrite(&depth, sizeof(int), 1, fd); fwrite(&density, sizeof(float), 1, fd); fwrite(split_points.data(), sizeof(float), n_array * n_trees, fd); // save tree leaves for (int i = 0; i < n_trees; ++i) { int sz = tree_leaves[i].size(); fwrite(&sz, sizeof(int), 1, fd); fwrite(&tree_leaves[i][0], sizeof(int), sz, fd); } // save random matrix if (density < 1) { int non_zeros = sparse_random_matrix.nonZeros(); fwrite(&non_zeros, sizeof(int), 1, fd); for (int k = 0; k < sparse_random_matrix.outerSize(); ++k) { for (Eigen::SparseMatrix<float, Eigen::RowMajor>::InnerIterator it(sparse_random_matrix, k); it; ++it) { float val = it.value(); int row = it.row(), col = it.col(); fwrite(&row, sizeof(int), 1, fd); fwrite(&col, sizeof(int), 1, fd); fwrite(&val, sizeof(float), 1, fd); } } } else { fwrite(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } /** * Loads an index from a file. * * @param path filepath to the index file. * @return true if loading succeeded, false otherwise. */ bool load(const char *path) { FILE *fd; if ((fd = fopen(path, "rb")) == NULL) return false; int i; fread(&i, sizeof(int), 1, fd); index_type = static_cast<itype>(i); if (index_type == autotuned_unpruned) { read_parameter_list(fd); } read_parameters(&par, fd); fread(&n_trees, sizeof(int), 1, fd); fread(&depth, sizeof(int), 1, fd); fread(&density, sizeof(float), 1, fd); n_pool = n_trees * depth; n_array = 1 << (depth + 1); count_first_leaf_indices_all(leaf_first_indices_all, n_samples, depth); leaf_first_indices = leaf_first_indices_all[depth]; split_points = Eigen::MatrixXf(n_array, n_trees); fread(split_points.data(), sizeof(float), n_array * n_trees, fd); // load tree leaves tree_leaves = std::vector<std::vector<int>>(n_trees); for (int i = 0; i < n_trees; ++i) { int sz; fread(&sz, sizeof(int), 1, fd); std::vector<int> leaves(sz); fread(&leaves[0], sizeof(int), sz, fd); tree_leaves[i] = leaves; } // load random matrix if (density < 1) { int non_zeros; fread(&non_zeros, sizeof(int), 1, fd); sparse_random_matrix = Eigen::SparseMatrix<float>(n_pool, dim); std::vector<Eigen::Triplet<float>> triplets; for (int k = 0; k < non_zeros; ++k) { int row, col; float val; fread(&row, sizeof(int), 1, fd); fread(&col, sizeof(int), 1, fd); fread(&val, sizeof(float), 1, fd); triplets.push_back(Eigen::Triplet<float>(row, col, val)); } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } else { dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(n_pool, dim); fread(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); k = par.k; votes = par.votes; return true; } /** * Is the index is already constructed or not? * * @return - is the index empty? */ bool empty() const { return n_trees == 0; } /**@}*/ /** @name * Friend declarations for test fixtures. Tests are located at * https://github.com/vioshyvo/RP-test. */ friend class MrptTest; friend class UtilityTest; /**@}*/ private: /** * Builds a single random projection tree. The tree is constructed by recursively * projecting the data on a random vector and splitting into two by the median. */ void grow_subtree(std::vector<int>::iterator begin, std::vector<int>::iterator end, int tree_level, int i, int n_tree, const Eigen::MatrixXf &tree_projections) { int n = end - begin; int idx_left = 2 * i + 1; int idx_right = idx_left + 1; if (tree_level == depth) return; std::nth_element(begin, begin + n / 2, end, [&tree_projections, tree_level] (int i1, int i2) { return tree_projections(tree_level, i1) < tree_projections(tree_level, i2); }); auto mid = end - n / 2; if (n % 2) { split_points(i, n_tree) = tree_projections(tree_level, *(mid - 1)); } else { auto left_it = std::max_element(begin, mid, [&tree_projections, tree_level] (int i1, int i2) { return tree_projections(tree_level, i1) < tree_projections(tree_level, i2); }); split_points(i, n_tree) = (tree_projections(tree_level, *mid) + tree_projections(tree_level, *left_it)) / 2.0; } grow_subtree(begin, mid, tree_level + 1, idx_left, n_tree, tree_projections); grow_subtree(mid, end, tree_level + 1, idx_right, n_tree, tree_projections); } /** * Find k nearest neighbors from data for the query point */ void exact_knn(const Eigen::Map<const Eigen::VectorXf> &q, int k, const Eigen::VectorXi &indices, int n_elected, int *out, float *out_distances = nullptr) const { if (!n_elected) { for (int i = 0; i < k; ++i) out[i] = -1; if (out_distances) { for (int i = 0; i < k; ++i) out_distances[i] = -1; } return; } Eigen::VectorXf distances(n_elected); #pragma omp parallel for for (int i = 0; i < n_elected; ++i) distances(i) = (X.col(indices(i)) - q).squaredNorm(); if (k == 1) { Eigen::MatrixXf::Index index; distances.minCoeff(&index); out[0] = n_elected ? indices(index) : -1; if (out_distances) out_distances[0] = n_elected ? std::sqrt(distances(index)) : -1; return; } int n_to_sort = n_elected > k ? k : n_elected; Eigen::VectorXi idx(n_elected); std::iota(idx.data(), idx.data() + n_elected, 0); std::partial_sort(idx.data(), idx.data() + n_to_sort, idx.data() + n_elected, [&distances](int i1, int i2) { return distances(i1) < distances(i2); }); for (int i = 0; i < k; ++i) out[i] = i < n_elected ? indices(idx(i)) : -1; if (out_distances) { for (int i = 0; i < k; ++i) out_distances[i] = i < n_elected ? std::sqrt(distances(idx(i))) : -1; } } void prune(double target_recall) { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } par = parameters(target_recall); if (!par.n_trees) { return; } int depth_max = depth; n_trees = par.n_trees; depth = par.depth; votes = par.votes; n_pool = depth * n_trees; n_array = 1 << (depth + 1); tree_leaves.resize(n_trees); tree_leaves.shrink_to_fit(); split_points.conservativeResize(n_array, n_trees); leaf_first_indices = leaf_first_indices_all[depth]; if (density < 1) { Eigen::SparseMatrix<float, Eigen::RowMajor> srm_new(n_pool, dim); for (int n_tree = 0; n_tree < n_trees; ++n_tree) srm_new.middleRows(n_tree * depth, depth) = sparse_random_matrix.middleRows(n_tree * depth_max, depth); sparse_random_matrix = srm_new; } else { Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> drm_new(n_pool, dim); for (int n_tree = 0; n_tree < n_trees; ++n_tree) drm_new.middleRows(n_tree * depth, depth) = dense_random_matrix.middleRows(n_tree * depth_max, depth); dense_random_matrix = drm_new; } index_type = autotuned; } void count_elected(const Eigen::VectorXf &q, const Eigen::Map<Eigen::VectorXi> &exact, int votes_max, std::vector<Eigen::MatrixXd> &recalls, std::vector<Eigen::MatrixXd> &cs_sizes) const { Eigen::VectorXf projected_query(n_pool); if (density < 1) projected_query.noalias() = sparse_random_matrix * q; else projected_query.noalias() = dense_random_matrix * q; int depth_min = depth - recalls.size() + 1; std::vector<std::vector<int>> start_indices(n_trees); #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { start_indices[n_tree] = std::vector<int>(depth - depth_min + 1); int idx_tree = 0; for (int d = 0; d < depth; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; } else { idx_tree = idx_right; } if (d >= depth_min - 1) start_indices[n_tree][d - depth_min + 1] = idx_tree - (1 << (d + 1)) + 1; } } const int *exact_begin = exact.data(); const int *exact_end = exact.data() + exact.size(); for (int depth_crnt = depth_min; depth_crnt <= depth; ++depth_crnt) { Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples); const std::vector<int> &leaf_first_indices = leaf_first_indices_all[depth_crnt]; Eigen::MatrixXd recall(votes_max, n_trees); Eigen::MatrixXd candidate_set_size(votes_max, n_trees); recall.col(0) = Eigen::VectorXd::Zero(votes_max); candidate_set_size.col(0) = Eigen::VectorXd::Zero(votes_max); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { std::vector<int> &found_leaves = start_indices[n_tree]; if (n_tree) { recall.col(n_tree) = recall.col(n_tree - 1); candidate_set_size.col(n_tree) = candidate_set_size.col(n_tree - 1); } int leaf_begin = leaf_first_indices[found_leaves[depth_crnt - depth_min]]; int leaf_end = leaf_first_indices[found_leaves[depth_crnt - depth_min] + 1]; const std::vector<int> &indices = tree_leaves[n_tree]; for (int i = leaf_begin; i < leaf_end; ++i) { int idx = indices[i]; int v = ++votes(idx); if (v <= votes_max) { candidate_set_size(v - 1, n_tree)++; if (std::find(exact_begin, exact_end, idx) != exact_end) recall(v - 1, n_tree)++; } } } recalls[depth_crnt - depth_min] = recall; cs_sizes[depth_crnt - depth_min] = candidate_set_size; } } /** * Builds a random sparse matrix for use in random projection. The components of * the matrix are drawn from the distribution * * 0 w.p. 1 - a * N(0, 1) w.p. a * * where a = density. */ static void build_sparse_random_matrix(Eigen::SparseMatrix<float, Eigen::RowMajor> &sparse_random_matrix, int n_row, int n_col, float density, int seed = 0) { sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(n_row, n_col); std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::uniform_real_distribution<float> uni_dist(0, 1); std::normal_distribution<float> norm_dist(0, 1); std::vector<Eigen::Triplet<float>> triplets; for (int j = 0; j < n_row; ++j) { for (int i = 0; i < n_col; ++i) { if (uni_dist(gen) > density) continue; triplets.push_back(Eigen::Triplet<float>(j, i, norm_dist(gen))); } } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } /* * Builds a random dense matrix for use in random projection. The components of * the matrix are drawn from the standard normal distribution. */ static void build_dense_random_matrix(Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> &dense_random_matrix, int n_row, int n_col, int seed = 0) { dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(n_row, n_col); std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::normal_distribution<float> normal_dist(0, 1); std::generate(dense_random_matrix.data(), dense_random_matrix.data() + n_row * n_col, [&normal_dist, &gen] { return normal_dist(gen); }); } void compute_exact(const Eigen::Map<const Eigen::MatrixXf> &Q, Eigen::MatrixXi &out_exact, const std::vector<int> &indices_test = {}) const { int n_test = Q.cols(); Eigen::VectorXi idx(n_samples); std::iota(idx.data(), idx.data() + n_samples, 0); for (int i = 0; i < n_test; ++i) { if(!indices_test.empty()) { std::remove(idx.data(), idx.data() + n_samples, indices_test[i]); } exact_knn(Eigen::Map<const Eigen::VectorXf>(Q.data() + i * dim, dim), k, idx, (indices_test.empty() ? n_samples : n_samples - 1), out_exact.data() + i * k); std::sort(out_exact.data() + i * k, out_exact.data() + i * k + k); if(!indices_test.empty()) { idx[n_samples - 1] = indices_test[i]; } } } static bool is_faster(const Mrpt_Parameters &par1, const Mrpt_Parameters &par2) { return par1.estimated_qtime < par2.estimated_qtime; } void vote(const Eigen::VectorXf &projected_query, int vote_threshold, Eigen::VectorXi &elected, int &n_elected, int n_trees, int depth_crnt) { std::vector<int> found_leaves(n_trees); const std::vector<int> &leaf_first_indices = leaf_first_indices_all[depth_crnt]; #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int idx_tree = 0; for (int d = 0; d < depth_crnt; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; } else { idx_tree = idx_right; } } found_leaves[n_tree] = idx_tree - (1 << depth_crnt) + 1; } int max_leaf_size = n_samples / (1 << depth_crnt) + 1; elected = Eigen::VectorXi(n_trees * max_leaf_size); Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int leaf_begin = leaf_first_indices[found_leaves[n_tree]]; int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1]; const std::vector<int> &indices = tree_leaves[n_tree]; for (int i = leaf_begin; i < leaf_end; ++i) { int idx = indices[i]; if (++votes(idx) == vote_threshold) elected(n_elected++) = idx; } } } std::pair<double,double> fit_projection_times(const Eigen::Map<const Eigen::MatrixXf> &Q, std::vector<int> &exact_x) { std::vector<double> projection_times, projection_x; long double idx_sum = 0; std::vector<int> tested_trees {1,2,3,4,5,7,10,15,20,25,30,40,50}; generate_x(tested_trees, n_trees, 10, n_trees); for (int d = depth_min; d <= depth; ++d) { for (int i = 0; i < (int) tested_trees.size(); ++i) { int t = tested_trees[i]; int n_random_vectors = t * d; projection_x.push_back(n_random_vectors); Eigen::SparseMatrix<float, Eigen::RowMajor> sparse_mat; Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> dense_mat; if (density < 1) { build_sparse_random_matrix(sparse_mat, n_random_vectors, dim, density); } else { build_dense_random_matrix(dense_mat, n_random_vectors, dim); } double start_proj = omp_get_wtime(); Eigen::VectorXf projected_query(n_random_vectors); if (density < 1) { projected_query.noalias() = sparse_mat * Q.col(0); } else { projected_query.noalias() = dense_mat * Q.col(0); } double end_proj = omp_get_wtime(); projection_times.push_back(end_proj - start_proj); idx_sum += projected_query.norm(); int votes_index = votes_max < t ? votes_max : t; for (int v = 1; v <= votes_index; ++v) { int cs_size = get_candidate_set_size(t, d, v); if (cs_size > 0) exact_x.push_back(cs_size); } } } // use results to ensure that the compiler does not optimize away the timed code. projection_x[0] += idx_sum > 1.0 ? 0.0000 : 0.0001; return fit_theil_sen(projection_x, projection_times); } std::vector<std::map<int,std::pair<double,double>>> fit_voting_times(const Eigen::Map<const Eigen::MatrixXf> &Q) { int n_test = Q.cols(); std::random_device rd; std::mt19937 rng(rd()); std::uniform_int_distribution<int> uni(0, n_test - 1); std::vector<int> tested_trees {1,2,3,4,5,7,10,15,20,25,30,40,50}; generate_x(tested_trees, n_trees, 10, n_trees); std::vector<int> vote_thresholds_x {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; generate_x(vote_thresholds_x, votes_max, 10, votes_max); beta_voting = std::vector<std::map<int,std::pair<double,double>>>(); for (int d = depth_min; d <= depth; ++d) { std::map<int,std::pair<double,double>> beta; for (const auto &v : vote_thresholds_x) { long double idx_sum = 0; std::vector<double> voting_times, voting_x; for (int i = 0; i < (int) tested_trees.size(); ++i) { int t = tested_trees[i]; int n_el = 0; Eigen::VectorXi elected; auto ri = uni(rng); Eigen::VectorXf projected_query(n_trees * depth); if (density < 1) { projected_query.noalias() = sparse_random_matrix * Q.col(ri); } else { projected_query.noalias() = dense_random_matrix * Q.col(ri); } double start_voting = omp_get_wtime(); vote(projected_query, v, elected, n_el, t, d); double end_voting = omp_get_wtime(); voting_times.push_back(end_voting - start_voting); voting_x.push_back(t); for (int i = 0; i < n_el; ++i) idx_sum += elected(i); } voting_x[0] += idx_sum > 1.0 ? 0.0 : 0.00001; beta[v] = fit_theil_sen(voting_x, voting_times); } beta_voting.push_back(beta); } return beta_voting; } static void generate_x(std::vector<int> &x, int max_generated, int n_tested, int max_val) { n_tested = max_generated > n_tested ? n_tested : max_val; int increment = max_generated / n_tested; for (int i = 1; i <= n_tested; ++i) { if (std::find(x.begin(), x.end(), i * increment) == x.end() && i * increment <= max_generated) { x.push_back(i * increment); } } auto end = std::remove_if(x.begin(), x.end(), [max_val](int t) { return t > max_val; }); x.erase(end, x.end()); } std::pair<double,double> fit_exact_times(const Eigen::Map<const Eigen::MatrixXf> &Q) { std::vector<int> s_tested {1,2,5,10,20,35,50,75,100,150,200,300,400,500}; generate_x(s_tested, n_samples / 20, 20, n_samples); int n_test = Q.cols(); std::vector<double> exact_times; long double idx_sum = 0; std::random_device rd; std::mt19937 rng(rd()); std::uniform_int_distribution<int> uni(0, n_test - 1); std::uniform_int_distribution<int> uni2(0, n_samples - 1); std::vector<double> ex; int n_sim = 20; for (int i = 0; i < (int) s_tested.size(); ++i) { double mean_exact_time = 0; int s_size = s_tested[i]; ex.push_back(s_size); for (int m = 0; m < n_sim; ++m) { auto ri = uni(rng); Eigen::VectorXi elected(s_size); for (int j = 0; j < elected.size(); ++j) elected(j) = uni2(rng); double start_exact = omp_get_wtime(); std::vector<int> res(k); exact_knn(Eigen::Map<const Eigen::VectorXf>(Q.data() + ri * dim, dim), k, elected, s_size, &res[0]); double end_exact = omp_get_wtime(); mean_exact_time += (end_exact - start_exact); for (int l = 0; l < k; ++l) idx_sum += res[l]; } mean_exact_time /= n_sim; exact_times.push_back(mean_exact_time); } ex[0] += idx_sum > 1.0 ? 0.0 : 0.00001; return fit_theil_sen(ex, exact_times); } std::set<Mrpt_Parameters,decltype(is_faster)*> list_parameters(const std::vector<Eigen::MatrixXd> &recalls) { std::set<Mrpt_Parameters,decltype(is_faster)*> pars(is_faster); std::vector<Eigen::MatrixXd> query_times(depth - depth_min + 1); for (int d = depth_min; d <= depth; ++d) { Eigen::MatrixXd query_time = Eigen::MatrixXd::Zero(votes_max, n_trees); for (int t = 1; t <= n_trees; ++t) { int votes_index = votes_max < t ? votes_max : t; for (int v = 1; v <= votes_index; ++v) { double qt = get_query_time(t, d, v); query_time(v - 1, t - 1) = qt; Mrpt_Parameters p; p.n_trees = t; p.depth = d; p.votes = v; p.k = k; p.estimated_qtime = qt; p.estimated_recall = recalls[d - depth_min](v - 1, t - 1); pars.insert(p); } } query_times[d - depth_min] = query_time; } return pars; } std::set<Mrpt_Parameters,decltype(is_faster)*> pareto_frontier(const std::set<Mrpt_Parameters,decltype(is_faster)*> &pars) { opt_pars = std::set<Mrpt_Parameters,decltype(is_faster)*>(is_faster); double best_recall = -1.0; for (const auto &p : pars) { // compute pareto frontier for query times and recalls if (p.estimated_recall > best_recall) { opt_pars.insert(p); best_recall = p.estimated_recall; } } return opt_pars; } void fit_times(const Eigen::Map<const Eigen::MatrixXf> &Q) { std::vector<int> exact_x; beta_projection = fit_projection_times(Q, exact_x); beta_voting = fit_voting_times(Q); beta_exact = fit_exact_times(Q); } static std::pair<double,double> fit_theil_sen(const std::vector<double> &x, const std::vector<double> &y) { int n = x.size(); std::vector<double> slopes; for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i != j) slopes.push_back((y[j] - y[i]) / (x[j] - x[i])); } } int n_slopes = slopes.size(); std::nth_element(slopes.begin(), slopes.begin() + n_slopes / 2, slopes.end()); double slope = *(slopes.begin() + n_slopes / 2); std::vector<double> residuals(n); for (int i = 0; i < n; ++i) residuals[i] = y[i] - slope * x[i]; std::nth_element(residuals.begin(), residuals.begin() + n / 2, residuals.end()); double intercept = *(residuals.begin() + n / 2); return std::make_pair(intercept, slope); } void write_parameters(const Mrpt_Parameters *p, FILE *fd) const { if (!fd) { return; } fwrite(&p->n_trees, sizeof(int), 1, fd); fwrite(&p->depth, sizeof(int), 1, fd); fwrite(&p->votes, sizeof(int), 1, fd); fwrite(&p->k, sizeof(int), 1, fd); fwrite(&p->estimated_qtime, sizeof(double), 1, fd); fwrite(&p->estimated_recall, sizeof(double), 1, fd); } void read_parameters(Mrpt_Parameters *p, FILE *fd) { fread(&p->n_trees, sizeof(int), 1, fd); fread(&p->depth, sizeof(int), 1, fd); fread(&p->votes, sizeof(int), 1, fd); fread(&p->k, sizeof(int), 1, fd); fread(&p->estimated_qtime, sizeof(double), 1, fd); fread(&p->estimated_recall, sizeof(double), 1, fd); } void write_parameter_list(const std::set<Mrpt_Parameters,decltype(is_faster)*> &pars, FILE *fd) const { if (!fd) { return; } int par_sz = pars.size(); fwrite(&par_sz, sizeof(int), 1, fd); for (const auto p : pars) write_parameters(&p, fd); } void read_parameter_list(FILE *fd) { if (!fd) { return; } opt_pars = std::set<Mrpt_Parameters,decltype(is_faster)*>(is_faster); int par_sz = 0; fread(&par_sz, sizeof(int), 1, fd); for (int i = 0; i < par_sz; ++i) { Mrpt_Parameters p; read_parameters(&p, fd); opt_pars.insert(p); } } Mrpt_Parameters parameters(double target_recall) const { double tr = target_recall - epsilon; for (const auto &p : opt_pars) { if (p.estimated_recall > tr) { return p; } } if (!opt_pars.empty()) { return *(opt_pars.rbegin()); } return Mrpt_Parameters(); } /** * Computes the leaf sizes of a tree assuming a median split and that * when the number points is odd, the extra point is always assigned to * to the left branch. */ static void count_leaf_sizes(int n, int level, int tree_depth, std::vector<int> &out_leaf_sizes) { if (level == tree_depth) { out_leaf_sizes.push_back(n); return; } count_leaf_sizes(n - n / 2, level + 1, tree_depth, out_leaf_sizes); count_leaf_sizes(n / 2, level + 1, tree_depth, out_leaf_sizes); } /** * Computes indices of the first elements of leaves in a vector containing * all the leaves of a tree concatenated. Assumes that median split is used * and when the number points is odd, the extra point is always assigned to * the left branch. */ static void count_first_leaf_indices(std::vector<int> &indices, int n, int depth) { std::vector<int> leaf_sizes; count_leaf_sizes(n, 0, depth, leaf_sizes); indices = std::vector<int>(leaf_sizes.size() + 1); indices[0] = 0; for (int i = 0; i < (int) leaf_sizes.size(); ++i) indices[i + 1] = indices[i] + leaf_sizes[i]; } static void count_first_leaf_indices_all(std::vector<std::vector<int>> &indices, int n, int depth_max) { for (int d = 0; d <= depth_max; ++d) { std::vector<int> idx; count_first_leaf_indices(idx, n, d); indices.push_back(idx); } } static double predict_theil_sen(double x, std::pair<double,double> beta) { return beta.first + beta.second * x; } double get_candidate_set_size(int tree, int depth, int v) const { return cs_sizes[depth - depth_min](v - 1, tree - 1); } double get_projection_time(int n_trees, int depth, int v) const { return predict_theil_sen(n_trees * depth, beta_projection); } double get_voting_time(int n_trees, int depth, int v) const { const std::map<int,std::pair<double,double>> &beta = beta_voting[depth - depth_min]; if (v <= 0 || beta.empty()) { return 0.0; } for (const auto &b : beta) { if (v <= b.first) { return predict_theil_sen(n_trees, b.second); } } return predict_theil_sen(n_trees, beta.rbegin()->second); } double get_exact_time(int n_trees, int depth, int v) const { return predict_theil_sen(get_candidate_set_size(n_trees, depth, v), beta_exact); } double get_query_time(int tree, int depth, int v) const { return get_projection_time(tree, depth, v) + get_voting_time(tree, depth, v) + get_exact_time(tree, depth, v); } std::vector<int> sample_indices(int n_test, int seed = 0) const { std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::vector<int> indices_data(n_samples); std::iota(indices_data.begin(), indices_data.end(), 0); std::shuffle(indices_data.begin(), indices_data.end(), gen); return std::vector<int>(indices_data.begin(), indices_data.begin() + n_test); } Eigen::MatrixXf subset(const std::vector<int> &indices) const { int n_test = indices.size(); Eigen::MatrixXf Q = Eigen::MatrixXf(dim, n_test); for(int i = 0; i < n_test; ++i) Q.col(i) = X.col(indices[i]); return Q; } const Eigen::Map<const Eigen::MatrixXf> X; // the data matrix Eigen::MatrixXf split_points; // all split points in all trees std::vector<std::vector<int>> tree_leaves; // contains all leaves of all trees Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> dense_random_matrix; // random vectors needed for all the RP-trees Eigen::SparseMatrix<float, Eigen::RowMajor> sparse_random_matrix; // random vectors needed for all the RP-trees std::vector<std::vector<int>> leaf_first_indices_all; // first indices for each level std::vector<int> leaf_first_indices; // first indices of each leaf of tree in tree_leaves const int n_samples; // sample size of data const int dim; // dimension of data Mrpt_Parameters par; int n_trees = 0; // number of RP-trees int depth = 0; // depth of an RP-tree with median split float density = -1.0; // expected ratio of non-zero components in a projection matrix int n_pool = 0; // amount of random vectors needed for all the RP-trees int n_array = 0; // length of the one RP-tree as array int votes = 0; // optimal number of votes to use int k = 0; enum itype {normal, autotuned, autotuned_unpruned}; itype index_type = normal; // Member variables used in autotuning: int depth_min = 0; int votes_max = 0; const double epsilon = 0.0001; // error bound for comparisons of recall levels std::vector<Eigen::MatrixXd> cs_sizes; std::pair<double,double> beta_projection, beta_exact; std::vector<std::map<int,std::pair<double,double>>> beta_voting; std::set<Mrpt_Parameters,decltype(is_faster)*> opt_pars; }; #endif // CPP_MRPT_H_
GB_sort.c
//------------------------------------------------------------------------------ // GB_sort: sort all vectors in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB_sort.h" #include "GB_werk.h" #include "GB_transpose.h" #include "GB_ek_slice.h" // macros: // GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend, // GB_msort_ISO_ascend or _descend, // or GB_msort_func_UDT // GB_TYPE bool, int8_, ... or GB_void for UDT // GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise // GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in // GB_GET(x,X,i) x = (op->xtype) X [i] // GB_COPY(A,i,C,k) A [i] = C [k] // GB_SWAP(A,i,k) swap A [i] and A [k] // GB_LT compare two entries, x < y //------------------------------------------------------------------------------ // macros for all built-in types //------------------------------------------------------------------------------ #define GB_SORT_UDT 0 #define GB_ADDR(A,i) ((A) + (i)) #define GB_GET(x,A,i) GB_TYPE x = A [i] #define GB_COPY(A,i,B,j) A [i] = B [j] #define GB_SIZE sizeof (GB_TYPE) #define GB_SWAP(A,i,j) { GB_TYPE t = A [i] ; A [i] = A [j] ; A [j] = t ; } //------------------------------------------------------------------------------ // ascending sort for built-in types //------------------------------------------------------------------------------ #define GB_LT(less,a,i,b,j) \ less = (((a) < (b)) ? true : (((a) == (b)) ? ((i) < (j)) : false)) #define GB_TYPE bool #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_BOOL) #include "GB_sort_template.c" #define GB_TYPE int8_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT8) #include "GB_sort_template.c" #define GB_TYPE int16_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT16) #include "GB_sort_template.c" #define GB_TYPE int32_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT32) #include "GB_sort_template.c" #define GB_TYPE int64_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT64) #include "GB_sort_template.c" #define GB_TYPE uint8_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT8) #include "GB_sort_template.c" #define GB_TYPE uint16_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT16) #include "GB_sort_template.c" #define GB_TYPE uint32_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT32) #include "GB_sort_template.c" #define GB_TYPE uint64_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT64) #include "GB_sort_template.c" #define GB_TYPE float #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_FP32) #include "GB_sort_template.c" #define GB_TYPE double #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_FP64) #include "GB_sort_template.c" //------------------------------------------------------------------------------ // descending sort for built-in types //------------------------------------------------------------------------------ #undef GB_LT #define GB_LT(less,a,i,b,j) \ less = (((a) > (b)) ? true : (((a) == (b)) ? ((i) < (j)) : false)) #define GB_TYPE bool #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_BOOL) #include "GB_sort_template.c" #define GB_TYPE int8_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT8) #include "GB_sort_template.c" #define GB_TYPE int16_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT16) #include "GB_sort_template.c" #define GB_TYPE int32_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT32) #include "GB_sort_template.c" #define GB_TYPE int64_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT64) #include "GB_sort_template.c" #define GB_TYPE uint8_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT8) #include "GB_sort_template.c" #define GB_TYPE uint16_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT16) #include "GB_sort_template.c" #define GB_TYPE uint32_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT32) #include "GB_sort_template.c" #define GB_TYPE uint64_t #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT64) #include "GB_sort_template.c" #define GB_TYPE float #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_FP32) #include "GB_sort_template.c" #define GB_TYPE double #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_FP64) #include "GB_sort_template.c" //------------------------------------------------------------------------------ // macros for user-defined types and when typecasting is performed //------------------------------------------------------------------------------ #undef GB_ADDR #undef GB_GET #undef GB_COPY #undef GB_SIZE #undef GB_SWAP #undef GB_LT #define GB_ADDR(A,i) ((A) + (i) * csize) #define GB_GET(x,A,i) GB_void x [GB_VLA(xsize)] ; \ fcast (x, GB_ADDR (A, i), csize) #define GB_COPY(A,i,B,j) memcpy (GB_ADDR (A, i), GB_ADDR (B, j), csize) #define GB_SIZE csize #define GB_TYPE GB_void #define GB_SWAP(A,i,j) \ { \ GB_void t [GB_VLA(csize)] ; /* declare the scalar t */ \ memcpy (t, GB_ADDR (A, i), csize) ; /* t = A [i] */ \ GB_COPY (A, i, A, j) ; /* A [i] = A [j] */ \ memcpy (GB_ADDR (A, j), t, csize) ; /* A [j] = t */ \ } #define GB_LT(less,a,i,b,j) \ { \ flt (&less, a, b) ; /* less = (a < b) */ \ if (!less) \ { \ /* check for equality and tie-break on index */ \ bool more ; \ flt (&more, b, a) ; /* more = (b < a) */ \ less = (more) ? false : ((i) < (j)) ; \ } \ } #undef GB_SORT_UDT #define GB_SORT_UDT 1 #define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _UDT) #include "GB_sort_template.c" //------------------------------------------------------------------------------ // GB_sort //------------------------------------------------------------------------------ #undef GB_FREE_WORKSPACE #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (C_ek_slicing, int64_t) ; \ GB_Matrix_free (&T) ; \ } #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ if (!C_is_NULL) GB_phbix_free (C) ; \ GB_phbix_free (P) ; \ } // redefine to use the revised GB_FREE_ALL above: #include "GB_static_header.h" GrB_Info GB_sort ( // output: GrB_Matrix C, // matrix with sorted vectors on output GrB_Matrix P, // matrix with permutations on output // input: GrB_BinaryOp op, // comparator for the sort GrB_Matrix A, // matrix to sort const bool A_transpose, // false: sort each row, true: sort each column GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT_MATRIX_OK (A, "A for GB_sort", GB0) ; ASSERT_BINARYOP_OK (op, "op for GB_sort", GB0) ; GrB_Matrix T = NULL ; struct GB_Matrix_opaque T_header ; GB_WERK_DECLARE (C_ek_slicing, int64_t) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; bool C_is_NULL = (C == NULL) ; if (C_is_NULL && P == NULL) { // either C, or P, or both must be present return (GrB_NULL_POINTER) ; } GrB_Type atype = A->type ; GrB_Type ctype = (C_is_NULL) ? atype : C->type ; GrB_Type ptype = (P == NULL) ? GrB_INT64 : P->type ; if (op->ztype != GrB_BOOL || op->xtype != op->ytype || atype != ctype || !(ptype == GrB_INT64 || ptype == GrB_UINT64) || !GB_Type_compatible (atype, op->xtype)) { // op must return bool, and its inputs x and y must have the same type; // the types of A and C must match exactly; P must be INT64 or UINT64; // A and C must be typecasted to the input type of the op. return (GrB_DOMAIN_MISMATCH) ; } int64_t anrows = GB_NROWS (A) ; int64_t ancols = GB_NCOLS (A) ; if ((C != NULL && (GB_NROWS (C) != anrows || GB_NCOLS (C) != ancols)) || (P != NULL && (GB_NROWS (P) != anrows || GB_NCOLS (P) != ancols))) { // C and P must have the same dimensions as A return (GrB_DIMENSION_MISMATCH) ; } bool A_iso = A->iso ; bool sort_in_place = (A == C) ; // free any prior content of C and P GB_phbix_free (P) ; if (!sort_in_place) { GB_phbix_free (C) ; } //-------------------------------------------------------------------------- // make a copy of A, unless it is aliased with C //-------------------------------------------------------------------------- if (C_is_NULL) { // C is a temporary matrix, which is freed when done GB_CLEAR_STATIC_HEADER (T, &T_header) ; C = T ; } if (A_transpose) { // ensure C is in sparse or hypersparse CSC format if (A->is_csc) { // A is already CSC if (!sort_in_place) { // A = C GB_OK (GB_dup_worker (&C, A_iso, A, true, atype, Context)) ; } } else { // A is CSR but C must be CSC if (sort_in_place) { // A = A' GB_OK (GB_transpose_in_place (A, true, Context)) ; } else { // C = A' GB_OK (GB_transpose_cast (C, atype, true, A, false, Context)) ; } } } else { // ensure C is in sparse or hypersparse CSR format if (!A->is_csc) { // A is already CSR if (!sort_in_place) { // A = C GB_OK (GB_dup_worker (&C, A_iso, A, true, atype, Context)) ; } } else { // A is CSC but C must be CSR if (sort_in_place) { // A = A' GB_OK (GB_transpose_in_place (A, false, Context)) ; } else { // C = A' GB_OK (GB_transpose_cast (C, atype, false, A, false, Context)) ; } } } // ensure C is sparse or hypersparse if (GB_IS_BITMAP (C) || GB_IS_FULL (C)) { GB_OK (GB_convert_any_to_sparse (C, Context)) ; } //-------------------------------------------------------------------------- // sort C in place //-------------------------------------------------------------------------- GB_Opcode opcode = op->opcode ; GB_Type_code acode = atype->code ; if ((op->xtype == atype) && (op->ytype == atype) && (opcode == GB_LT_binop_code || opcode == GB_GT_binop_code) && (acode < GB_UDT_code)) { //---------------------------------------------------------------------- // no typecasting, using built-in < or > operators, builtin types //---------------------------------------------------------------------- if (opcode == GB_LT_binop_code) { // ascending sort switch (acode) { case GB_BOOL_code : GB_OK (GB(sort_matrix_ascend_BOOL )(C, Context)) ; break ; case GB_INT8_code : GB_OK (GB(sort_matrix_ascend_INT8 )(C, Context)) ; break ; case GB_INT16_code : GB_OK (GB(sort_matrix_ascend_INT16 )(C, Context)) ; break ; case GB_INT32_code : GB_OK (GB(sort_matrix_ascend_INT32 )(C, Context)) ; break ; case GB_INT64_code : GB_OK (GB(sort_matrix_ascend_INT64 )(C, Context)) ; break ; case GB_UINT8_code : GB_OK (GB(sort_matrix_ascend_UINT8 )(C, Context)) ; break ; case GB_UINT16_code : GB_OK (GB(sort_matrix_ascend_UINT16 )(C, Context)) ; break ; case GB_UINT32_code : GB_OK (GB(sort_matrix_ascend_UINT32 )(C, Context)) ; break ; case GB_UINT64_code : GB_OK (GB(sort_matrix_ascend_UINT64 )(C, Context)) ; break ; case GB_FP32_code : GB_OK (GB(sort_matrix_ascend_FP32 )(C, Context)) ; break ; case GB_FP64_code : GB_OK (GB(sort_matrix_ascend_FP64 )(C, Context)) ; break ; default:; } } else // opcode == GB_GT_binop_code { // descending sort switch (acode) { case GB_BOOL_code : GB_OK (GB(sort_matrix_descend_BOOL )(C, Context)) ; break ; case GB_INT8_code : GB_OK (GB(sort_matrix_descend_INT8 )(C, Context)) ; break ; case GB_INT16_code : GB_OK (GB(sort_matrix_descend_INT16 )(C, Context)) ; break ; case GB_INT32_code : GB_OK (GB(sort_matrix_descend_INT32 )(C, Context)) ; break ; case GB_INT64_code : GB_OK (GB(sort_matrix_descend_INT64 )(C, Context)) ; break ; case GB_UINT8_code : GB_OK (GB(sort_matrix_descend_UINT8 )(C, Context)) ; break ; case GB_UINT16_code : GB_OK (GB(sort_matrix_descend_UINT16)(C, Context)) ; break ; case GB_UINT32_code : GB_OK (GB(sort_matrix_descend_UINT32)(C, Context)) ; break ; case GB_UINT64_code : GB_OK (GB(sort_matrix_descend_UINT64)(C, Context)) ; break ; case GB_FP32_code : GB_OK (GB(sort_matrix_descend_FP32 )(C, Context)) ; break ; case GB_FP64_code : GB_OK (GB(sort_matrix_descend_FP64 )(C, Context)) ; break ; default:; } } } else { //---------------------------------------------------------------------- // typecasting, user-defined types, or unconventional operators //---------------------------------------------------------------------- GB_OK (GB (sort_matrix_UDT) (C, op, Context)) ; } //-------------------------------------------------------------------------- // constuct the final indices //-------------------------------------------------------------------------- int64_t cnz = GB_nnz (C) ; int64_t cnvec = C->nvec ; int64_t *restrict Ti = NULL ; if (P == NULL) { // P is not constructed; use C->i to construct the new indices Ti = C->i ; } else { // allocate P->i and use it to construct the new indices P->i = GB_MALLOC (cnz, int64_t, &(P->i_size)) ; if (P->i == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Ti = P->i ; } int C_nthreads, C_ntasks ; GB_SLICE_MATRIX (C, 1, chunk) ; int64_t *restrict Cp = C->p ; const int64_t cvlen = C->vlen ; int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static,1) for (tid = 0 ; tid < C_ntasks ; tid++) { int64_t kfirst = kfirst_Cslice [tid] ; int64_t klast = klast_Cslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { const int64_t pC0 = Cp [k] ; int64_t pC_start, pC_end ; GB_get_pA (&pC_start, &pC_end, tid, k, kfirst, klast, pstart_Cslice, Cp, cvlen) ; for (int64_t pC = pC_start ; pC < pC_end ; pC++) { Ti [pC] = pC - pC0 ; } } } //-------------------------------------------------------------------------- // construct P //-------------------------------------------------------------------------- bool C_is_hyper = GB_IS_HYPERSPARSE (C) ; if (P != NULL) { P->is_csc = C->is_csc ; P->nvec = C->nvec ; P->nvec_nonempty = C->nvec_nonempty ; P->iso = false ; P->vlen = C->vlen ; P->vdim = C->vdim ; if (C_is_NULL) { // the values of C are not needed. The indices of C become the // values of P, Cp becomes Pp, and Ch (if present) becomes Ph. P->x = C->i ; C->i = NULL ; P->x_size = C->i_size ; P->p = C->p ; C->p = NULL ; P->p_size = C->p_size ; P->h = C->h ; C->h = NULL ; P->h_size = C->h_size ; P->plen = C->plen ; } else { // C is required on output. The indices of C are copied and // become the values of P. Cp is copied to Pp, and Ch (if present) // is copied to Ph. P->plen = cnvec ; P->x = GB_MALLOC (cnz, int64_t, &(P->x_size)) ; // x:OK P->p = GB_MALLOC (cnvec+1, int64_t, &(P->p_size)) ; P->h = NULL ; if (C_is_hyper) { P->h = GB_MALLOC (cnvec, int64_t, &(P->h_size)) ; } if (P->x == NULL || P->p == NULL || (C_is_hyper && P->h == NULL)) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // copy from C to P GB_memcpy (P->x, C->i, cnz * sizeof (int64_t), nthreads_max) ; GB_memcpy (P->p, C->p, (cnvec+1) * sizeof (int64_t), nthreads_max) ; if (C_is_hyper) { GB_memcpy (P->h, C->h, cnvec * sizeof (int64_t), nthreads_max) ; } } P->magic = GB_MAGIC ; } //-------------------------------------------------------------------------- // finalize the pattern of C //-------------------------------------------------------------------------- if (!C_is_NULL && P != NULL) { // copy P->i into C->i GB_memcpy (C->i, P->i, cnz * sizeof (int64_t), nthreads_max) ; } //-------------------------------------------------------------------------- // free workspace, and comform/return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; if (!C_is_NULL) { ASSERT_MATRIX_OK (C, "C output of GB_sort (before conform)", GB0) ; GB_OK (GB_conform (C, Context)) ; ASSERT_MATRIX_OK (C, "C output of GB_sort", GB0) ; } if (P != NULL) { ASSERT_MATRIX_OK (P, "P output of GB_sort (before conform)", GB0) ; GB_OK (GB_conform (P, Context)) ; ASSERT_MATRIX_OK (P, "P output of GB_sort", GB0) ; } return (GrB_SUCCESS) ; }
integral.c
/** * integral.c * * Counting integral with MPI or OpenMP * * @author pikryukov * @version 1.0 * * e-mail: [email protected] * * Copyright (C) Kryukov Pavel 2012 * for MIPT MPI course. */ /* C generic */ #include <stdio.h> /* fprintf */ #include <stdlib.h> /* exit */ #include <math.h> /* sin */ #include <assert.h> #ifdef USE_MPI /* If USE_MPI is defined, we use MPI, otherwise - OpenMP */ #include <mpi.h> #define ERRORPRINT(...) \ {if (!rank) fprintf(stderr, __VA_ARGS__); MPI_Finalize(); exit(1);} #define PRINT(...) {if (!rank) printf(__VA_ARGS__);} #else #define ERRORPRINT(...) {fprintf(stderr, __VA_ARGS__); exit(1);} #define PRINT(...) printf(__VA_ARGS__); #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #define FUNC(x) (sin(1 / (x))) #define ZERO(x) (1 / (M_PI * (x))) /* zero of FUNC. #1 is the greatest. */ #define N 10000 /* Number of zeroes to count integral on */ #define M 10000 /* Number of nodes between zeroes */ /** * Counts integral of FUNC from a to b usin * trapezoid method with M nodes * @param a left edge * @param b right edge * @return integral */ double monointegral(double a, double b) { int i; double tau = (b - a) / M; double start = a; double sum = 0.; for (i = 0; i < M; ++i) { /* Counting with trapezoids rule */ sum += tau * (FUNC(start + tau) + FUNC(start)) * 0.5; /* Go to next node */ start += tau; } return sum; } #ifdef USE_MPI /** * Splits N jobs to 'rank' thread * @param rank rank of current thread * @param size size of pool * @param start pointer to start number of job * @param finish pointer to finish number of job */ void splitfine(int rank, int size, int* start, int* finish) { /* We try to split numbers like this: */ /* 0 1 2 3 4 (5 == amount) 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 <- resRank 21 22 23 24 25 26 */ const unsigned resRank = size - N % size; const unsigned amount = N / size; *start = rank * amount; if (rank > resRank) *start += rank - resRank; *finish = *start + amount; if (rank >= resRank) ++(*finish); } #endif /** * Counts integral of FUNC from NODE(N + 1) to NODE(1) * in parallel threads * @param rank thread rank (if MPI) * @param size pool size (if MPI) */ void multiintegral(int rank, int size) { double res, sum = 0.; int start, finish, i; #ifdef USE_MPI splitfine(rank, size, &start, &finish); #else start = 0; finish = N; #pragma omp parallel for reduction (+: sum) private(i) #endif /* USE_MPI */ for (i = start; i < finish; ++i) { sum += monointegral(ZERO(i + 2), ZERO(i + 1)); } #ifdef USE_MPI MPI_Reduce(&sum, &res, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); #else res = sum; #endif /* USE_MPI */ PRINT("Integral of sin 1/x from %e to %e is %e\n", ZERO(N + 1), ZERO(1), res); } /** * Entry point of program * @param argc should be 1 * @param argv no arguments needed * @return 0 on success, 1 on error */ int main(int argc, char** argv) { int rank = 0, size = 0; #ifdef USE_MPI MPI_Init(&argc, &argv); double t = -MPI_Wtime(); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); #endif /* USE_MPI */ if (argc != 1) ERRORPRINT("Syntax error.\n No arguments at all\n"); multiintegral(rank, size); #ifdef USE_MPI PRINT("Time is %f s\n", t += MPI_Wtime()); MPI_Finalize(); #endif /* USE_MPI */ return 0; }
GB_unaryop__abs_int64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint16 // op(A') function: GB_tran__abs_int64_uint16 // C type: int64_t // A type: uint16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint16 ( int64_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gra.c
/***************************************************************************** * * Elmer, A Finite Element Software for Multiphysical Problems * * Copyright 1st April 1995 - , CSC - IT Center for Science Ltd., Finland * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library (in file ../LGPL-2.1); if not, write * to the Free Software Foundation, Inc., 51 Franklin Street, * Fifth Floor, Boston, MA 02110-1301 USA * *****************************************************************************/ /******************************************************************************* * * MATC graphics main module. * ******************************************************************************* * * Author: Juha Ruokolainen * * Address: CSC - IT Center for Science Ltd. * Keilaranta 14, P.O. BOX 405 * 02101 Espoo, Finland * Tel. +358 0 457 2723 * Telefax: +358 0 457 2302 * EMail: [email protected] * * Date: 30 May 1996 * * Modified by: * * Date of modification: * ******************************************************************************/ /* * $Id: gra.c,v 1.1.1.1 2005/04/14 13:29:14 vierinen Exp $ * * $Log: gra.c,v $ * Revision 1.1.1.1 2005/04/14 13:29:14 vierinen * initial matc automake package * * Revision 1.2 1998/08/01 12:34:40 jpr * * Added Id, started Log. * * */ #include "elmer/matc.h" static double gra_vsx, gra_vsy, gra_vtx, gra_vty; #pragma omp threadprivate (gra_vsx, gra_vsy, gra_vtx, gra_vty) void gra_mult(GMATRIX gm1, GMATRIX gm2); void gra_ident(GMATRIX gm); void gra_init_matc(devtype, name) int devtype; char *name; { if ( gra_state.driver != 0 ) { GRA_CLOSE(); } if (name != NULL) { if ((gra_state.out_fp = fopen(name, "w")) == NULL) { error("gra: open: Can't open named output stream\n"); } } gra_funcs[G_VIEWPORT] = gra_set_viewport; gra_funcs[G_WINDOW] = gra_set_window; gra_funcs[G_PERSPECTIVE] = gra_perspective; gra_funcs[G_TRANSLATE] = gra_translate; gra_funcs[G_ROTATE] = gra_rotate; gra_funcs[G_SCALE] = gra_scale; gra_funcs[G_VIEWPOINT] = gra_viewpoint; gra_funcs[G_GETMATRIX] = gra_getmatrix; gra_funcs[G_SETMATRIX] = gra_setmatrix; gra_funcs[G_DBUFFER] = gra_dbuffer_null; gra_funcs[G_SBUFFER] = gra_dbuffer_null; gra_funcs[G_SWAPBUF] = gra_dbuffer_null; switch(devtype) { #ifdef GRA_DRV_IRIS case 1: case 2: gra_funcs[G_OPEN] = gra_iris_open; gra_funcs[G_CLOSE] = gra_iris_close; gra_funcs[G_CLEAR] = gra_iris_clear; gra_funcs[G_VIEWPORT] = gra_iris_viewport; gra_funcs[G_WINDOW] = gra_iris_window; gra_funcs[G_PERSPECTIVE] = gra_iris_perspective; gra_funcs[G_TRANSLATE] = gra_iris_translate; gra_funcs[G_ROTATE] = gra_iris_rotate; gra_funcs[G_SCALE] = gra_iris_scale; gra_funcs[G_VIEWPOINT] = gra_iris_viewpoint; gra_funcs[G_DEFCOLOR] = gra_iris_defcolor; gra_funcs[G_COLOR] = gra_iris_color; gra_funcs[G_POLYLINE] = gra_iris_polyline; gra_funcs[G_DRAW] = gra_iris_draw; gra_funcs[G_MOVE] = gra_iris_move; gra_funcs[G_POLYMARKER] = gra_iris_polymarker; gra_funcs[G_MARKER] = gra_iris_marker; gra_funcs[G_AREAFILL] = gra_iris_areafill; gra_funcs[G_IMAGE] = gra_iris_image; gra_funcs[G_TEXT] = gra_iris_text; gra_funcs[G_SETMATRIX] = gra_iris_setmatrix; gra_funcs[G_FLUSH] = gra_iris_flush; gra_funcs[G_RESET] = gra_iris_reset; gra_funcs[G_DBUFFER] = gra_iris_dbuffer; gra_funcs[G_SBUFFER] = gra_iris_sbuffer; gra_funcs[G_SWAPBUF] = gra_iris_swapbuf; gra_state.driver = GRA_DRV_IRIS; break; #endif #ifdef GRA_DRV_TEKLIB case 4105: case 4107: case 4111: case 4128: case 4129: gra_funcs[G_OPEN] = gra_teklib_open; gra_funcs[G_CLOSE] = gra_teklib_close; gra_funcs[G_CLEAR] = gra_teklib_clear; gra_funcs[G_DEFCOLOR] = gra_teklib_defcolor; gra_funcs[G_COLOR] = gra_teklib_color; gra_funcs[G_POLYLINE] = gra_teklib_polyline; gra_funcs[G_DRAW] = gra_teklib_draw; gra_funcs[G_MOVE] = gra_teklib_move; gra_funcs[G_POLYMARKER] = gra_teklib_polymarker; gra_funcs[G_MARKER] = gra_teklib_marker; gra_funcs[G_AREAFILL] = gra_teklib_areafill; gra_funcs[G_IMAGE] = gra_teklib_image; gra_funcs[G_TEXT] = gra_teklib_text; gra_funcs[G_FLUSH] = gra_teklib_flush; gra_funcs[G_RESET] = gra_teklib_reset; gra_state.driver = GRA_DRV_TEKLIB; break; #endif #ifdef GRA_DRV_PS case 4: gra_funcs[G_OPEN] = gra_ps_open; gra_funcs[G_CLOSE] = gra_ps_close; gra_funcs[G_CLEAR] = gra_ps_clear; gra_funcs[G_DEFCOLOR] = gra_ps_defcolor; gra_funcs[G_COLOR] = gra_ps_color; gra_funcs[G_POLYLINE] = gra_ps_polyline; gra_funcs[G_DRAW] = gra_ps_draw; gra_funcs[G_MOVE] = gra_ps_move; gra_funcs[G_POLYMARKER] = gra_ps_polymarker; gra_funcs[G_MARKER] = gra_ps_marker; gra_funcs[G_AREAFILL] = gra_ps_areafill; gra_funcs[G_IMAGE] = gra_ps_image; gra_funcs[G_TEXT] = gra_ps_text; gra_funcs[G_FLUSH] = gra_ps_flush; gra_funcs[G_RESET] = gra_ps_reset; gra_state.driver = GRA_DRV_PS; break; #endif default: error("gra: Unknown device selection\n"); break; } GRA_OPEN(devtype); gra_ident(gra_state.modelm); gra_ident(gra_state.viewm); gra_ident(gra_state.projm); gra_ident(gra_state.transfm); GRA_WINDOW(-1.0,1.0,-1.0,1.0,-1.0,1.0); GRA_VIEWPORT(0.0,1.0,0.0,1.0); gra_state.pratio = 0.0; } void gra_close_sys() { int i; if (gra_state.out_fp != NULL) { fclose(gra_state.out_fp); gra_state.out_fp = NULL; } for(i = 0; i < GRA_FUNCS; i++) { gra_funcs[i] = gra_error; } gra_state.driver = 0; } void gra_dbuffer_null() {}; void gra_getmatrix(gm) GMATRIX gm; { memcpy((char *)gm, (char *)gra_state.transfm,sizeof(GMATRIX)); } void gra_setmatrix(gm) GMATRIX gm; { memcpy((char *)gra_state.transfm,(char *)gm,sizeof(GMATRIX)); gra_ident(gra_state.modelm); gra_ident(gra_state.projm); gra_ident(gra_state.viewm); } void gra_set_transfm() { int i,j; for(i = 0; i < 4; i++) for(j = 0; j < 4; j++) { gra_state.transfm[i][j] = gra_state.modelm[i][j]; } gra_mult(gra_state.transfm,gra_state.viewm); gra_mult(gra_state.transfm,gra_state.projm); } void gra_mult(gm1, gm2) GMATRIX gm1, gm2; { int i,j,k; double s[4]; for(i = 0; i < 4; i++) { for(j = 0; j < 4; j++) { s[j] = 0.0; for(k = 0; k < 4; k++) { s[j] += gm1[i][k] * gm2[k][j]; } } for(j = 0; j < 4; j++) gm1[i][j] = s[j]; } } void gra_ident(gm) GMATRIX gm; { gm[0][0] = 1; gm[0][1] = 0; gm[0][2] = 0; gm[0][3] = 0; gm[1][0] = 0; gm[1][1] = 1; gm[1][2] = 0; gm[1][3] = 0; gm[2][0] = 0; gm[2][1] = 0; gm[2][2] = 1; gm[2][3] = 0; gm[3][0] = 0; gm[3][1] = 0; gm[3][2] = 0; gm[3][3] = 1; } void gra_viewpoint(xf,yf,zf,xt,yt,zt) double xf,yf,zf,xt,yt,zt; { GMATRIX gvm; double r1,r2; /* * translate(-vpf); */ gra_ident(gra_state.viewm); gra_state.viewm[3][0] = -xf; gra_state.viewm[3][1] = -yf; gra_state.viewm[3][2] = -zf; xf = xf - xt; yf = yf - yt; zf = zf - zt; /* * rotate(90 0 0) */ gra_ident(gvm); gvm[1][2] = -1; gvm[2][1] = 1; gvm[1][1] = 0; gvm[2][2] = 0; gra_mult(gra_state.viewm, gvm); r1 = sqrt(xf*xf + yf*yf); if (r1 != 0) { gra_ident(gvm); gvm[0][0] = -yf/r1; gvm[2][2] = gvm[0][0]; gvm[0][2] = xf/r1; gvm[2][0] = -gvm[0][2]; gra_mult(gra_state.viewm,gvm); } r2 = sqrt(yf*yf + zf*zf); if (r2 != 0) { gra_ident(gvm); gvm[1][1] = r1/r2; gvm[2][2] = gvm[1][1]; gvm[1][2] = zf/r2; gvm[2][1] = -gvm[1][2]; gra_mult(gra_state.viewm,gvm); } gra_ident(gvm); gvm[2][2] = -1.0; gra_mult(gra_state.viewm,gvm); gra_set_transfm(); } void gra_rotate(rx, ry, rz) double rx, ry, rz; { static double pip180 = 3.1415926535898/180.0; #pragma omp threadprivate (pip180) GMATRIX grm; rx *= pip180; gra_ident(grm); grm[1][1] = cos(rx); grm[1][2] = -sin(rx); grm[2][1] = sin(rx); grm[2][2] = cos(rx); gra_mult(gra_state.modelm,grm); ry *= pip180; gra_ident(grm); grm[0][0] = cos(ry); grm[0][2] = sin(ry); grm[2][0] = -sin(ry); grm[2][2] = cos(ry); gra_mult(gra_state.modelm,grm); rz *= pip180; gra_ident(grm); grm[0][0] = cos(rz); grm[0][1] = -sin(rz); grm[1][0] = sin(rz); grm[1][1] = cos(rz); gra_mult(gra_state.modelm,grm); gra_set_transfm(); } void gra_scale(sx, sy, sz) double sx, sy, sz; { GMATRIX gsm; gra_ident(gsm); gsm[0][0] = sx; gsm[1][1] = sy; gsm[2][2] = sz; gra_mult(gra_state.modelm,gsm); gra_set_transfm(); } void gra_translate(tx, ty, tz) double tx, ty, tz; { GMATRIX gtm; gra_ident(gtm); gtm[3][0] = tx; gtm[3][1] = ty; gtm[3][2] = tz; gra_mult(gra_state.modelm,gtm); gra_set_transfm(); } void gra_perspective(r) double r; { gra_ident(gra_state.projm); gra_state.projm[0][0] = r; gra_state.projm[1][1] = r; gra_state.pratio = r; gra_set_transfm(); } void gra_set_proj() { gra_vsx = (gra_state.viewport.xhigh - gra_state.viewport.xlow) / 2; gra_vsy = (gra_state.viewport.yhigh - gra_state.viewport.ylow) / 2; gra_vtx = gra_state.viewport.xlow + gra_vsx; gra_vty = gra_state.viewport.ylow + gra_vsy; } void gra_set_window(x1,x2,y1,y2,z1,z2) double x1,x2,y1,y2,z1,z2; { GMATRIX gvm; gra_state.window.xlow = x1; gra_state.window.xhigh = x2; gra_state.window.ylow = y1; gra_state.window.yhigh = y2; gra_state.window.zlow = z1; gra_state.window.zhigh = z2; gra_ident(gra_state.projm); gra_state.projm[0][0] = 2 / (x2-x1); gra_state.projm[1][1] = 2 / (y2-y1); gra_state.projm[2][2] = 2 / (z2-z1); gra_ident(gvm); gvm[3][0] = -1 - gra_state.projm[0][0] * x1; gvm[3][1] = -1 - gra_state.projm[1][1] * y1; gvm[3][2] = -1 - gra_state.projm[2][2] * z1; gra_mult(gra_state.projm, gvm); gra_state.pratio = 0.0; gra_set_transfm(); } void gra_set_viewport(x1,x2,y1,y2) double x1, x2, y1, y2; { gra_state.viewport.xlow = x1; gra_state.viewport.xhigh = x2; gra_state.viewport.ylow = y1; gra_state.viewport.yhigh = y2; gra_set_proj(); } void gra_mtrans(x,y,z,xe,ye,ze) double x,y,z,*xe,*ye,*ze; { *xe = x * gra_state.transfm[0][0] + y * gra_state.transfm[1][0] + z * gra_state.transfm[2][0] + gra_state.transfm[3][0]; *ye = x * gra_state.transfm[0][1] + y * gra_state.transfm[1][1] + z * gra_state.transfm[2][1] + gra_state.transfm[3][1]; *ze = x * gra_state.transfm[0][2] + y * gra_state.transfm[1][2] + z * gra_state.transfm[2][2] + gra_state.transfm[3][2]; if (gra_state.pratio > 0.0 && *ze != 0.0) { *xe /= *ze; *ye /= *ze; } } /* * Window to viewport transformation * * ViewportX = ScaleX * WindowX + TransX * ScaleX = (vp.xmax - vp.xmin) / (w.xmax - w.xmin) * TransX = (vp.xmin - ScaleX * w.xmin) */ void gra_window_to_viewport(x, y, z, xs, ys) double x, y, z, *xs, *ys; { /* double xe, ye, ze; gra_mtrans(x, y, z, &xe, &ye, &ze); */ *xs = gra_vsx * x + gra_vtx; *ys = gra_vsy * y + gra_vty; } void gra_error() { error("gra: graphics package not initialized\n"); }
micksort.c
/** * micksort - counting sort on MPI * * (C) 2018 by Micky Faas, LIACS Leiden * micky<at>eduktity<dot>org */ #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <stdbool.h> #include <inttypes.h> #include <assert.h> #include <string.h> #include <math.h> #include <getopt.h> #include <emmintrin.h> #include <pmmintrin.h> #define FAST_RAND // Use the old-fashioned POSIX PRNG instead of the new one from (G)LIBC #define HALF_BINS // Use bins of 4-bits instead of 8 to save memory and improve cache performance typedef uint8_t map_t; typedef map_t* mapptr_t; //#define N RAND_MAX+1UL // Number of possible values obtained from rand() #define N 2147483648UL //#define N 64 #ifdef HALF_BINS #define BUF_SIZE 1073741824UL #define BINS_PER_BYTE 2 #else #define BUF_SIZE N #define BINS_PER_BYTE 1 #endif #define N_SEEDS 5 static const uint32_t SEED[N_SEEDS] = { 0x1234abcd, 0x10203040, 0x40e8c724, 0x79cbba1d, 0xac7bd459 }; static uint32_t RSTATE =0; static void srand_simple( uint32_t seed ) { if( seed == 0 ) RSTATE =1; RSTATE =seed; } static inline uint32_t rand_simple() { // Copied from the GLIBC source code for TYPE0 PRNGs RSTATE = ((RSTATE * 1103515245U) + 12345U) & 0x7fffffff; return RSTATE; } static void printMap( mapptr_t map, size_t n, int base ) { for( uint64_t i=0; i < n; i++ ) { if( map[i] ) printf( "%ld(%d), ", i+base, map[i] ); } printf( "\n" ); } static void randSeq2( mapptr_t map, uint64_t count, int seed, int rank ) { memset( map, 0, BUF_SIZE * sizeof(map_t) ); #ifdef FAST_RAND srand_simple( SEED[seed] + rank ); #else srand( SEED[seed] + rank ); #endif for( uint64_t i =0; i < count; i++ ) { #ifdef FAST_RAND uint32_t num = rand_simple() % 64; #else uint32_t num = rand()% 64; #endif #ifdef HALF_BINS uint32_t idx =(num & 0x7FFFFFFE) >> 1; // Remove the least bit, divide by two uint8_t f = map[idx]; // Get the corresponding pair of bins uint32_t pos =num & 0x1; // Get the least bit to determine which pair to increment uint8_t mask =0xF << (pos ? 4 : 0); // Mask the pair we want // Write back the other pair + our pair incremented by one f = (f & ~mask) | ((f & mask) + (1 << (pos ? 4 : 0))); map[idx] =f; assert( (f & mask) != 0 ); // Check for overflow #else map[num]++; assert( map[num] != 0 ); #endif } } static void mergemm( mapptr_t restrict dst, mapptr_t restrict maps, int n, int stride ) { // The following is the multi-core SIMD variant of the commented code. /*for( int i =0; i < stride; i++ ) { for( int j =0; j < n; j++ ) { dst[i] += maps[j*stride+i]; } }*/ const blockSize = stride / 64; #pragma omp parallel for for( int b =0; b < stride; b+= blockSize ) { for( int i =0; i < blockSize; i+=16 ) { __m128i sum = _mm_setzero_si128(); for( int j =0; j < n; j++ ) { __m128i a = _mm_load_si128( (const __m128i*) &maps[j*stride+i+b] ); sum = _mm_add_epi8( sum, a ); } _mm_store_si128( (__m128i*) &dst[i+b], sum ); } } } static mapptr_t micksort3( uint64_t count, int seed, int rank, int procs ) { /* Generation part */ const size_t npp = count / procs; // Numbers to generate per process const size_t c = rank==0 ? count % procs : 0; // Remainder in case count % P != 0 // Generate and sort the local sequence mapptr_t map = _mm_malloc( BUF_SIZE*sizeof(map_t), 16 ); randSeq2( map, npp+c, seed, rank ); //printf( "Node %d: ", rank ); printMap( map, N, 0 ); /* Communication part */ const int bpp = N / procs; // Number of bins per process const int base = rank * bpp; mapptr_t sorted; if( procs > 1 ) { mapptr_t inmap = _mm_malloc( BUF_SIZE * sizeof(map_t), 16 ); MPI_Request reqs[procs]; for( int root =0; root < procs; root++ ) MPI_Igather( map+root*(bpp/BINS_PER_BYTE), bpp/BINS_PER_BYTE, MPI_BYTE, inmap, bpp/BINS_PER_BYTE, MPI_BYTE, root, MPI_COMM_WORLD, reqs+root ); MPI_Waitall( procs, reqs, MPI_STATUSES_IGNORE ); _mm_free( map ); sorted =_mm_malloc( (bpp/BINS_PER_BYTE)*sizeof(map_t), 16 ); mergemm( sorted, inmap, procs, bpp/BINS_PER_BYTE ); _mm_free( inmap ); } else sorted = map; return sorted; } void printParallel( mapptr_t restrict map, unsigned int skip, int rank, int procs ) { const unsigned int bpp = N / procs; // Number of bins per process const unsigned int base = rank * bpp; // Value of the first element in the map unsigned int skipCount =0; // Number of elements skipped so far for( int r =0; r < procs; r++ ) { if( rank == r ) { if( rank != 0 ) { MPI_Recv( &skipCount, 1, MPI_INT, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE ); } for( unsigned int i =0; i < bpp/BINS_PER_BYTE; i++ ) { uint8_t f = map[i]; // Get the corresponding pair of bins #ifdef HALF_BINS int bins[2] = { f & 0xF, (f >> 4) & 0xF }; #else int bins[1] = { f }; #endif for( int b =0; b < BINS_PER_BYTE; b++ ) { for( unsigned int j =0; j < bins[b]; j++ ) { if( skipCount == 0 ) { printf( "%d ", base + i*BINS_PER_BYTE + b ); } if( ++skipCount == skip ) skipCount =0; } } } if( rank != procs - 1 ) MPI_Send( &skipCount, 1, MPI_INT, rank+1, 0, MPI_COMM_WORLD ); else printf( "\n" ); fflush( stdout ); } MPI_Barrier( MPI_COMM_WORLD ); } } int main( int argc, char** argv ) { int procs, rank; int seed =0, skip =10000; uint64_t count = 0; int c; const char * short_opt = "hs:n:i:"; struct option long_opt[] = { {"help", no_argument, NULL, 'h'}, {"count", required_argument, NULL, 'n'}, {"seed", required_argument, NULL, 's'}, {"skip", required_argument, NULL, 'i'}, {NULL, 0, NULL, 0 } }; while( (c = getopt_long( argc, argv, short_opt, long_opt, NULL )) != -1 ) { switch( c ) { case -1: case 0: break; case 's': seed = atoi( optarg ); if( seed < 0 || seed >= N_SEEDS ) { fprintf( stderr, "(e) Invalid seed index.\n" ); return -2; } break; case 'n': count =atol( optarg ); break; case 'i': skip =atoi( optarg ); break; case 'h': printf( "(i) Usage: %s [OPTIONS]\n", argv[0] ); printf( " -s, --seed n seed index for pseudo random numbers 0..%d\n", N_SEEDS-1 ); printf( " -n, --count n total length of the random array\n" ); printf( " -i, --skip n print only every n-th number\n" ); printf( " -h, --help print this help and exit\n" ); printf( "\n"); return 0; case ':': case '?': fprintf( stderr, "(e) Try `%s --help' for more information.\n", argv[0] ); return -2; default: fprintf( stderr, "(e) %s: invalid option -- %c\n", argv[0], c ); fprintf( stderr, "(e) Try `%s --help' for more information.\n", argv[0] ); return -2; }; }; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &procs ); #ifdef FAST_RAND const char* frstr = "ON"; #else const char* frstr = "OFF"; #endif if(rank==0) printf( "(i) Started.\n(i) Sorting %ld numbers on %d processes.\n(i) Seed is 0x%x\n(i) Fast random is %s\n(i) Bins are %d bits\n", count, procs, SEED[seed], frstr, 8 / BINS_PER_BYTE ); double start_time = MPI_Wtime(); mapptr_t map = micksort3( count, seed, rank, procs ); double end_time = MPI_Wtime(); if( skip != 0 ) printParallel( map, skip, rank, procs ); if( rank==0) printf( "(t) Sorting took %gs\n", count, end_time-start_time ); // Some stuff for debugging // if( rank==0) printMap( map ); /* if( rank==0 ) { unsigned char max =0; for( size_t i =0; i < N; i++ ) { if( map[i] > max ) max =map[i]; } printf( "Max count is %d\n", max ); }*/ _mm_free( map ); MPI_Finalize(); return 0; }
buggy_version.c
#include<stdio.h> int main(){ int sum = 1; int i =1; // increase sum by one each iteratiob using openmp #pragma omp parallel for private(i) reduction( + : sum ) for (i = i; i < 10; i++) { sum +=1; } }
GB_binop__bxor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int16) // C=scalar+B GB (_bind1st__bxor_int16) // C=scalar+B' GB (_bind1st_tran__bxor_int16) // C=A+scalar GB (_bind2nd__bxor_int16) // C=A'+scalar GB (_bind2nd_tran__bxor_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT16 || GxB_NO_BXOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
transform.h
/*! * Copyright 2018 XGBoost contributors */ #ifndef XGBOOST_COMMON_TRANSFORM_H_ #define XGBOOST_COMMON_TRANSFORM_H_ #include <dmlc/omp.h> #include <xgboost/data.h> #include <utility> #include <vector> #include <type_traits> // enable_if #include "host_device_vector.h" #include "common.h" #include "span.h" #if defined (__CUDACC__) #include "device_helpers.cuh" #endif // defined (__CUDACC__) namespace xgboost { namespace common { constexpr size_t kBlockThreads = 256; namespace detail { #if defined(__CUDACC__) template <typename Functor, typename... SpanType> __global__ void LaunchCUDAKernel(Functor _func, Range _range, SpanType... _spans) { for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) { _func(i, _spans...); } } #endif // defined(__CUDACC__) } // namespace detail /*! \brief Do Transformation on HostDeviceVectors. * * \tparam CompiledWithCuda A bool parameter used to distinguish compilation * trajectories, users do not need to use it. * * Note: Using Transform is a VERY tricky thing to do. Transform uses template * argument to duplicate itself into two different types, one for CPU, * another for CUDA. The trick is not without its flaw: * * If you use it in a function that can be compiled by both nvcc and host * compiler, the behaviour is un-defined! Because your function is NOT * duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution * will merge functions with same signature. */ template <bool CompiledWithCuda = WITH_CUDA()> class Transform { private: template <typename Functor> struct Evaluator { public: Evaluator(Functor func, Range range, GPUSet devices, bool reshard) : func_(func), range_{std::move(range)}, reshard_{reshard}, distribution_{std::move(GPUDistribution::Block(devices))} {} Evaluator(Functor func, Range range, GPUDistribution dist, bool reshard) : func_(func), range_{std::move(range)}, reshard_{reshard}, distribution_{std::move(dist)} {} /*! * \brief Evaluate the functor with input pointers to HostDeviceVector. * * \tparam HDV... HostDeviceVectors type. * \param vectors Pointers to HostDeviceVector. */ template <typename... HDV> void Eval(HDV... vectors) const { bool on_device = !distribution_.IsEmpty(); if (on_device) { LaunchCUDA(func_, vectors...); } else { LaunchCPU(func_, vectors...); } } private: // CUDA UnpackHDV template <typename T> Span<T> UnpackHDV(HostDeviceVector<T>* _vec, int _device) const { auto span = _vec->DeviceSpan(_device); return span; } template <typename T> Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec, int _device) const { auto span = _vec->ConstDeviceSpan(_device); return span; } // CPU UnpackHDV template <typename T> Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const { return Span<T> {_vec->HostPointer(), static_cast<typename Span<T>::index_type>(_vec->Size())}; } template <typename T> Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const { return Span<T const> {_vec->ConstHostPointer(), static_cast<typename Span<T>::index_type>(_vec->Size())}; } // Recursive unpack for Reshard. template <typename T> void UnpackReshard(GPUDistribution dist, const HostDeviceVector<T>* vector) const { vector->Reshard(dist); } template <typename Head, typename... Rest> void UnpackReshard(GPUDistribution dist, const HostDeviceVector<Head>* _vector, const HostDeviceVector<Rest>*... _vectors) const { _vector->Reshard(dist); UnpackReshard(dist, _vectors...); } #if defined(__CUDACC__) template <typename std::enable_if<CompiledWithCuda>::type* = nullptr, typename... HDV> void LaunchCUDA(Functor _func, HDV*... _vectors) const { if (reshard_) UnpackReshard(distribution_, _vectors...); GPUSet devices = distribution_.Devices(); size_t range_size = *range_.end() - *range_.begin(); // Extract index to deal with possible old OpenMP. size_t device_beg = *(devices.begin()); size_t device_end = *(devices.end()); #pragma omp parallel for schedule(static, 1) if (devices.Size() > 1) for (omp_ulong device = device_beg; device < device_end; ++device) { // NOLINT // Ignore other attributes of GPUDistribution for spliting index. // This deals with situation like multi-class setting where // granularity is used in data vector. size_t shard_size = GPUDistribution::Block(devices).ShardSize( range_size, devices.Index(device)); Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)}; dh::safe_cuda(cudaSetDevice(device)); const int GRID_SIZE = static_cast<int>(dh::DivRoundUp(*(range_.end()), kBlockThreads)); detail::LaunchCUDAKernel<<<GRID_SIZE, kBlockThreads>>>( _func, shard_range, UnpackHDV(_vectors, device)...); } } #else /*! \brief Dummy funtion defined when compiling for CPU. */ template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr, typename... HDV> void LaunchCUDA(Functor _func, HDV*... _vectors) const { LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA(); } #endif // defined(__CUDACC__) template <typename... HDV> void LaunchCPU(Functor func, HDV*... vectors) const { omp_ulong end = static_cast<omp_ulong>(*(range_.end())); #pragma omp parallel for schedule(static) for (omp_ulong idx = 0; idx < end; ++idx) { func(idx, UnpackHDV(vectors)...); } } private: /*! \brief Callable object. */ Functor func_; /*! \brief Range object specifying parallel threads index range. */ Range range_; /*! \brief Whether resharding for vectors is required. */ GPUDistribution distribution_; bool reshard_; }; public: /*! * \brief Initialize a Transform object. * * \tparam Functor A callable object type. * \return A Evaluator having one method Eval. * * \param func A callable object, accepting a size_t thread index, * followed by a set of Span classes. * \param range Range object specifying parallel threads index range. * \param devices GPUSet specifying GPUs to use, when compiling for CPU, * this should be GPUSet::Empty(). * \param reshard Whether Reshard for HostDeviceVector is needed. */ template <typename Functor> static Evaluator<Functor> Init(Functor func, Range const range, GPUSet const devices, bool const reshard = true) { return Evaluator<Functor> {func, std::move(range), std::move(devices), reshard}; } template <typename Functor> static Evaluator<Functor> Init(Functor func, Range const range, GPUDistribution const dist, bool const reshard = true) { return Evaluator<Functor> {func, std::move(range), std::move(dist), reshard}; } }; } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_TRANSFORM_H_
IOLayersRules.h
// Copyright 2016-present, Facebook, Inc. // All rights reserved. // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #ifndef INPUTLAYER_H #define INPUTLAYER_H // Rulebook Format // rules[0][0] == mode // rules[0][1] == maxActive per spatial location (==1 for modes 0,1,2) // rules[0][2] == nInputRows // rules[0][3] == nOutputRows // rules[1] nOutputRows x (1+maxActive) // mode 0==guaranteed unique 1==overwrite, 2=keep, 3=sum, 4=mean template <Int dimension> void inputLayerRules(SparseGrids<dimension> &SGs, RuleBook &rules, long *coords, Int nInputRows, Int nInputColumns, Int batchSize, Int mode, Int &nActive) { assert(nActive == 0); assert(rules.size() == 0); assert(SGs.size() == 0); SGs.resize(batchSize); // Set a minimum batch size if necessary Point<dimension> p; if (mode == 0) { nActive = nInputRows; rules.resize(1); rules[0].push_back(mode); rules[0].push_back(1); rules[0].push_back(nInputRows); rules[0].push_back(nInputRows); if (nInputColumns == dimension) { SGs.resize(1); auto &sg = SGs[0]; for (Int i = 0; i < nInputRows; ++i) { for (Int j = 0; j < dimension; j++) p[j] = coords[j]; coords += dimension; sg.mp[p] = i; } } else { // nInputColumns == dimension + 1 Int idx; for (Int i = 0; i < nInputRows; ++i) { for (Int j = 0; j < dimension; j++) p[j] = coords[j]; idx = coords[dimension]; coords += dimension + 1; if (idx + 1 >= (Int)SGs.size()) SGs.resize(idx + 1); SGs[idx].mp[p] = i; } } return; } // Compile list of how input rows correspond to output rows std::vector<std::vector<Int>> outputRows; if (nInputColumns == dimension) { SGs.resize(1); auto &sg = SGs[0]; for (Int i = 0; i < nInputRows; ++i) { for (Int j = 0; j < dimension; j++) p[j] = coords[j]; coords += dimension; if (sg.mp.insert(make_pair(p, nActive)).second) { outputRows.resize(++nActive); } outputRows[sg.mp[p]].push_back(i); } } else { // nInputColumns == dimension + 1 Int idx; for (Int i = 0; i < nInputRows; ++i) { for (Int j = 0; j < dimension; j++) p[j] = coords[j]; idx = coords[dimension]; coords += dimension + 1; if (idx + 1 >= (Int)SGs.size()) SGs.resize(idx + 1); auto &sg = SGs[idx]; if (sg.mp.insert(make_pair(p, nActive)).second) { outputRows.resize(++nActive); } outputRows[sg.mp[p]].push_back(i); } } rules.resize(2); rules[0].push_back(mode); rules[0].push_back(1); // replace with maxActive if mode==3 or 4 rules[0].push_back(nInputRows); rules[0].push_back(outputRows.size()); auto &rule = rules[1]; if (mode == 1) { for (Int i = 0; i < nActive; ++i) { rule.push_back(1); rule.push_back(outputRows[i].front()); } } if (mode == 2) { for (Int i = 0; i < nActive; ++i) { rule.push_back(1); rule.push_back(outputRows[i].back()); } } if (mode == 3 or mode == 4) { Int maxActive = 0; for (auto &row : outputRows) maxActive = std::max(maxActive, (Int)row.size()); rules[0][1] = maxActive; for (auto &row : outputRows) { rule.push_back(row.size()); for (auto &r : row) rule.push_back(r); rule.resize((rule.size() + maxActive) / (maxActive + 1) * (maxActive + 1)); } } } // Rulebook Format // rules[0][0] == mode // rules[0][1] == maxActive per spatial location (==1 for modes 0,1,2) // rules[0][2] == batchSize // rules[0][3] == length // rules[0][4] == nOutputRows // rules[1] nOutputRows x (1+maxActive) // bl is a batchSize x length x dimension long array of coordinates // mode 0==guaranteed unique and all present; 1==overwrite, 2=keep, 3=sum, // 4=mean template <Int dimension> void blRules(SparseGrids<dimension> &SGs, RuleBook &rules, long *coords, Int batchSize, Int length, Int mode, Int &nActive) { assert(nActive == 0); assert(rules.size() == 0); assert(SGs.size() == 0); SGs.resize(batchSize); Int I; if (mode == 0) { nActive = batchSize * length; rules.resize(1); rules[0].push_back(mode); rules[0].push_back(1); rules[0].push_back(batchSize); rules[0].push_back(length); rules[0].push_back(nActive); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &sg = SGs[I]; sg.ctr = I * length; auto c = coords + I * length * dimension; Point<dimension> p; for (Int l = 0; l < length; ++l) { for (Int j = 0; j < dimension; ++j) p[j] = c[j]; c += dimension; sg.mp[p] = l; } } return; } if (mode <= 2) { // Compile list of how input rows correspond to output rows std::vector<std::vector<Int>> outputRows(batchSize); std::vector<Int> nActives(batchSize); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &sg = SGs[I]; auto &ors = outputRows[I]; auto &nAct = nActives[I]; auto c = coords + I * length * dimension; Int i = I * length; Point<dimension> p; if (mode == 1) { for (Int l = 0; l < length; ++l, ++i) { for (Int j = 0; j < dimension; ++j) p[j] = *c++; if (p[0] >= 0) { if (sg.mp.insert(make_pair(p, nAct)).second) { nAct++; ors.push_back(i); } else { ors[sg.mp[p]] = i; } } } } if (mode == 2) { for (Int l = 0; l < length; ++l, ++i) { for (Int j = 0; j < dimension; ++j) p[j] = *c++; if (p[0] >= 0) { if (sg.mp.insert(make_pair(p, nAct)).second) { nAct++; ors.push_back(i); } } } } } for (I = 0; I < batchSize; I++) { SGs[I].ctr = nActive; nActive += nActives[I]; } Int maxActive = 1; rules.resize(2); rules[0].push_back(mode); rules[0].push_back(maxActive); rules[0].push_back(batchSize); rules[0].push_back(length); rules[0].push_back(nActive); auto &rule = rules[1]; rule.resize(2 * nActive); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &ors = outputRows[I]; auto rr = &rule[SGs[I].ctr * 2]; for (auto &row : ors) { rr[0] = 1; rr[1] = row; rr += 2; } } return; } if (mode == 3 or mode == 4) { // Compile list of how input rows correspond to output rows std::vector<std::vector<std::vector<Int>>> outputRows(batchSize); std::vector<Int> nActives(batchSize); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &sg = SGs[I]; auto &ors = outputRows[I]; auto &nAct = nActives[I]; auto c = coords + I * length * dimension; Int i = I * length; Point<dimension> p; for (Int l = 0; l < length; ++l, ++i) { for (Int j = 0; j < dimension; ++j) p[j] = *c++; if (p[0] >= 0) { if (sg.mp.insert(make_pair(p, nAct)).second) { nAct++; ors.resize(nAct); } ors[sg.mp[p]].push_back(i); } } } for (I = 0; I < batchSize; I++) { SGs[I].ctr = nActive; nActive += nActives[I]; } Int maxActive = 1; if (mode >= 3) for (auto &ors : outputRows) for (auto &row : ors) maxActive = std::max(maxActive, (Int)row.size()); rules.resize(2); rules[0].push_back(mode); rules[0].push_back(maxActive); rules[0].push_back(batchSize); rules[0].push_back(length); rules[0].push_back(nActive); auto &rule = rules[1]; rule.resize((maxActive + 1) * nActive); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &ors = outputRows[I]; auto rr = &rule[SGs[I].ctr * (maxActive + 1)]; for (auto &row : ors) { rr[0] = row.size(); for (Int i = 0; i < (Int)row.size(); ++i) rr[i + 1] = row[i]; rr += 1 + maxActive; } } } } #endif /* INPUTLAYER_H */
GB_transpose.c
//------------------------------------------------------------------------------ // GB_transpose: C=A' or C=op(A'), with typecasting //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // CALLS: GB_builder // TODO:: this is way too complicated. Reduce it to two cases: // C = A' both C and A are passed in, not Chandle // C = C' C is transposed in-place, (C==A aliased) // In both cases, require the header for C and A to be allocated already // (either static or dynamic). A is never modified, unless C==A. // C and A cannot be NULL on input. If C==A then C contains a valid // matrix on input (the input matrix A), otherise GB_phbix_free(C) is // immediately done. Either header may be static or dynamic. // Transpose a matrix, C=A', and optionally apply a unary operator and/or // typecast the values. The transpose may be done in-place, in which case C or // A are modified in-place. // The input matrix may have shallow components (even if in-place), and the // output may also have shallow components (even if the input matrix is not // shallow). // This function is CSR/CSC agnostic; it sets the output matrix format from // C_is_csc but otherwise ignores the CSR/CSC type of A and C. // There are 3 ways to use this function: // Method1 (in_place_C): If A_in is not NULL and Chandle is NULL, then A is // modified in-place, and the A_in matrix is not freed when done. // Method2 (in_place_A): If A_in is NULL, then C = (*Chandle) is transposed // in-place. If out of memory, (*Chandle) is always returned as NULL, which // frees the input matrix C if the transpose is done in-place. // Method3 (C=A'): Otherwise, A_in and Chandle are non-NULL, and C=A' is // computed. If (*Chandle) is NULL on input, then a new header is allocated. // If (*Chandle) is non-NULL on input, it is assumed to be an uninitialized // static header, not obtained from malloc. On output, C->static_header will // be true. // The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is // m-by-n, then at most O(e/n) threads are used. The GB_builder method is more // scalable, but not as fast with a modest number of threads. #include "GB_transpose.h" #include "GB_build.h" #include "GB_apply.h" #define GB_FREE_ALL ; // free prior content of A, if transpose is done in-place #define GB_FREE_IN_PLACE_A \ { \ if (in_place) \ { \ /* A is being transposed in-place */ \ /* free prior content of A but not &A itself */ \ if (!Ap_shallow) GB_FREE (&Ap, Ap_size) ; \ if (!Ah_shallow) GB_FREE (&Ah, Ah_size) ; \ if (!Ab_shallow) GB_FREE (&Ab, Ab_size) ; \ if (!Ai_shallow) GB_FREE (&Ai, Ai_size) ; \ if (!Ax_shallow) GB_FREE (&Ax, Ax_size) ; \ } \ else \ { \ /* A is not modified; it is purely an input matrix */ \ ; \ } \ } // free the new C matrix, unless C=A' is being done in-place of A #define GB_FREE_C \ { \ if (!in_place_A) \ { \ /* free all of C and all its contents &C */ \ GB_Matrix_free (Chandle) ; \ } \ } //------------------------------------------------------------------------------ // GB_transpose //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_transpose // C=A', C=(ctype)A or C=op(A') ( GrB_Matrix *Chandle, // output matrix C, possibly modified in-place GrB_Type ctype, // desired type of C; if NULL use A->type. // ignored if op is present (cast to op->ztype) const bool C_is_csc, // desired CSR/CSC format of C const GrB_Matrix A_in, // input matrix // no operator is applied if both op1 and op2 are NULL const GrB_UnaryOp op1_in, // unary operator to apply const GrB_BinaryOp op2_in, // binary operator to apply const GxB_Scalar scalar, // scalar to bind to binary operator bool binop_bind1st, // if true, binop(x,A) else binop(A,y) GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs and determine if transpose is done in-place //-------------------------------------------------------------------------- GrB_Info info ; GBURBLE ("(transpose) ") ; GrB_Matrix A, C ; bool in_place_C, in_place_A ; bool C_static_header = false ; struct GB_Matrix_opaque T_header ; GrB_Matrix T = GB_clear_static_header (&T_header) ; if (A_in == NULL) { //---------------------------------------------------------------------- // Method1 (in_place_C): C = C' ; &C is transposed in-place //---------------------------------------------------------------------- // GB_transpose (&C, ctype, csc, NULL, op) ; // C=A' is transposed in-place, in the matrix C. // The matrix C is freed if an error occurs and C is set to NULL. ASSERT (Chandle != NULL) ; // at least &C or A must be non-NULL ASSERT (*Chandle != NULL) ; A = (*Chandle) ; C = A ; // C must be freed if an error occurs in_place_C = true ; // C is modified in-place in_place_A = false ; ASSERT (A == C && C == (*Chandle)) ; C_static_header = C->static_header ; } else if (Chandle == NULL || (*Chandle) == A_in) { //---------------------------------------------------------------------- // Method2 (in_place_A): A = A' ; A transposed in-place //---------------------------------------------------------------------- // GB_transpose (NULL, ctype, csc, A, op) ; // GB_transpose (&A, ctype, csc, A, op) ; // C=A' is transposed in-place, in the matrix A. // The matrix A_in is not freed if an error occurs. A = A_in ; Chandle = &A ; // C must not be freed if an error occurs C = A ; in_place_C = false ; in_place_A = true ; // A is modified in-place ASSERT (A == C && C == (*Chandle)) ; C_static_header = A->static_header ; } else { //---------------------------------------------------------------------- // Method3 (C=A'): C and A are different; C may be a static header //---------------------------------------------------------------------- // GB_transpose (&C, ctype, csc, A, op) ; // &C and A are both non-NULL, and not aliased. // C=A' where C is a new matrix constructed here. // The matrix C is freed if an error occurs, and C is set to NULL // unless C has a static header. // If C is NULL, a new header is allocated and returned as (*Chandle). // Otherwise, C = (*Chandle) is assumed to be an uninitialized static // header, and (*Chandle) is not modified. A = A_in ; C = (*Chandle) ; // NULL, or pre-existing static header in_place_C = false ; // C and A are different matrices in_place_A = false ; ASSERT (A != C && C == (*Chandle)) ; C_static_header = (C != NULL) ; if (C_static_header) { GB_clear_static_header (C) ; } } bool in_place = (in_place_A || in_place_C) ; ASSERT_MATRIX_OK (A, "A input for GB_transpose", GB0) ; ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ; ASSERT_UNARYOP_OK_OR_NULL (op1_in, "unop for GB_transpose", GB0) ; ASSERT_BINARYOP_OK_OR_NULL (op2_in, "binop for GB_transpose", GB0) ; ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ; ASSERT (C == (*Chandle)) ; // both may be NULL // get the current sparsity control of A float A_hyper_switch = A->hyper_switch ; float A_bitmap_switch = A->bitmap_switch ; int A_sparsity = A->sparsity ; // wait if A has pending tuples or zombies, but leave it jumbled GB_MATRIX_WAIT_IF_PENDING_OR_ZOMBIES (A) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- GrB_Type atype = A->type ; size_t asize = atype->size ; GB_Type_code acode = atype->code ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; int64_t anzmax = A->nzmax ; // if in-place, these must be freed when done, whether successful or not int64_t *restrict Ap = A->p ; size_t Ap_size = A->p_size ; int64_t *restrict Ah = A->h ; size_t Ah_size = A->h_size ; int64_t *restrict Ai = A->i ; size_t Ab_size = A->b_size ; int8_t *restrict Ab = A->b ; size_t Ai_size = A->i_size ; GB_void *restrict Ax = (GB_void *) A->x ; size_t Ax_size = A->x_size ; bool A_is_bitmap = GB_IS_BITMAP (A) ; bool A_is_packed = GB_is_packed (A) ; bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; bool Ap_shallow = A->p_shallow ; bool Ah_shallow = A->h_shallow ; bool Ai_shallow = A->i_shallow ; bool Ax_shallow = A->x_shallow ; bool Ab_shallow = A->b_shallow ; int64_t anz = GB_NNZ (A) ; int64_t anvec = A->nvec ; int64_t anvals = A->nvals ; //-------------------------------------------------------------------------- // determine the max number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // determine the type of C and get the unary or binary operator //-------------------------------------------------------------------------- // If a unary or binary operator is present, C is always returned as // the ztype of the operator. The input ctype is ignored. GrB_UnaryOp op1 = NULL ; GrB_BinaryOp op2 = NULL ; GB_Opcode opcode = GB_NOP_opcode ; if (op1_in != NULL) { // get the unary operator opcode = op1_in->opcode ; if (atype == op1_in->xtype && opcode == GB_IDENTITY_opcode) { // op1 is a built-in identity operator, with the same type as A, so // do not apply the operator and do not typecast. op1 is NULL. ctype = atype ; } else { // apply the operator, z=op1(x) op1 = op1_in ; ctype = op1->ztype ; } } else if (op2_in != NULL) { // get the binary operator GrB_Type op2_intype = binop_bind1st ? op2_in->xtype : op2_in->ytype ; opcode = op2_in->opcode ; // only GB_apply calls GB_transpose with op2_in, and it ensures this // condition holds: the first(A,y), second(x,A), and any(...) have // been renamed to identity(A), so these cases do not occur here. ASSERT (! ((opcode == GB_ANY_opcode) || (opcode == GB_FIRST_opcode && !binop_bind1st) || (opcode == GB_SECOND_opcode && binop_bind1st))) ; // apply the operator, z=op2(A,y) or op2(x,A) op2 = op2_in ; ctype = op2->ztype ; } else { // no operator. both op1 and op2 are NULL if (ctype == NULL) { // no typecasting if ctype is NULL ctype = atype ; } } GB_Type_code ccode = ctype->code ; size_t csize = ctype->size ; //-------------------------------------------------------------------------- // check for positional operators //-------------------------------------------------------------------------- bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; GrB_UnaryOp save_op1 = op1 ; GrB_BinaryOp save_op2 = op2 ; if (op_is_positional) { // do not apply the op until after the transpose op1 = NULL ; op2 = NULL ; // replace op1 with the ONE operator, as a placeholder ASSERT (ctype == GrB_INT64 || ctype == GrB_INT32) ; op1 = (ctype == GrB_INT64) ? GxB_ONE_INT64 : GxB_ONE_INT32 ; } //-------------------------------------------------------------------------- // C = A' //-------------------------------------------------------------------------- ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ; bool allocate_new_Cx = (ctype != atype) || (op1 != NULL) || (op2 != NULL) ; if (anz == 0) { //====================================================================== // quick return if A is empty //====================================================================== // free prior space of A, if transpose is done in-place GB_FREE_IN_PLACE_A ; // A is empty; create a new empty matrix C, with the new type and // dimensions. C is hypersparse for now but may convert when // returned. info = GB_new_bix (Chandle, C_static_header, // hyper, old or new header ctype, avdim, avlen, GB_Ap_calloc, C_is_csc, GxB_HYPERSPARSE, true, A_hyper_switch, 1, 1, true, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_C ; return (info) ; } ASSERT_MATRIX_OK (*Chandle, "C transpose empty", GB0) ; ASSERT (!GB_JUMBLED (*Chandle)) ; } else if (A_is_packed) { //====================================================================== // transpose a packed or bitmap matrix or vector //====================================================================== // A is packed if it is either: (a) bitmap, (b) full, or (c) sparse or // hypersparse with all entries present, no zombies, no pending tuples, // and not jumbled. For (c), the matrix A can be treated as if it was // full, and the pattern (A->p, A->h, and A->i) can be ignored. int sparsity = (A_is_bitmap) ? GxB_BITMAP : GxB_FULL ; bool T_cheap = // T can be done quickly if: (avlen == 1 || avdim == 1) // A is a row or column vector, && op1 == NULL && op2 == NULL // no operator to apply, and && atype == ctype ; // no typecasting // allocate T if (T_cheap) { // just initialize the static header of T, not T->b or T->x info = GB_new (&T, true, // bitmap or full, static header ctype, avdim, avlen, GB_Ap_null, C_is_csc, sparsity, A_hyper_switch, 1, Context) ; ASSERT (info == GrB_SUCCESS) ; } else { // allocate all of T, including T->b and T->x info = GB_new_bix (&T, true, // bitmap or full, static header ctype, avdim, avlen, GB_Ap_null, C_is_csc, sparsity, true, A_hyper_switch, 1, anzmax, true, Context) ; } if (info != GrB_SUCCESS) { // out of memory GB_FREE_C ; return (info) ; } T->magic = GB_MAGIC ; if (sparsity == GxB_BITMAP) { T->nvals = anvals ; // for bitmap case only } //---------------------------------------------------------------------- // T = A' //---------------------------------------------------------------------- // Since A is full, # threads to use is nthreads, and the // nworkspaces parameter is not used int64_t anz_held = GB_NNZ_HELD (A) ; ASSERT (anz > 0 && anz_held > 0) ; int nthreads = GB_nthreads (anz_held + anvec, chunk, nthreads_max) ; if (T_cheap) { // no work to do. Transposing does not change A->b or A->x T->b = Ab ; T->b_size = Ab_size ; T->x = Ax ; T->x_size = Ax_size ; T->nzmax = A->nzmax ; if (in_place) { // transplant A->b and A->x into T T->b_shallow = Ab_shallow ; T->x_shallow = Ax_shallow ; Ab = NULL ; // do not free prior Ab Ax = NULL ; // do not free prior Ax A->b = NULL ; A->x = NULL ; } else { // T is a purely shallow copy of A T->b_shallow = (Ab != NULL) ; T->x_shallow = true ; } } else if (op1 == NULL && op2 == NULL) { // do not apply an operator; optional typecast to C->type GB_transpose_ix (T, A, NULL, NULL, 0, nthreads) ; } else { // apply an operator, C has type op->ztype GB_transpose_op (T, op1, op2, scalar, binop_bind1st, A, NULL, NULL, 0, nthreads) ; } ASSERT_MATRIX_OK (T, "T dense/bitmap", GB0) ; ASSERT (!GB_JUMBLED (T)) ; // free prior space of A, if transpose is done in-place GB_FREE_IN_PLACE_A ; //---------------------------------------------------------------------- // transplant T into C //---------------------------------------------------------------------- // allocate the output matrix C as a full or bitmap matrix // if *Chandle == NULL, allocate a new header; otherwise reuse existing info = GB_new (Chandle, C_static_header, // bitmap/full, old/new header ctype, avdim, avlen, GB_Ap_null, C_is_csc, sparsity, A_hyper_switch, 0, Context) ; if (info != GrB_SUCCESS) { // out of memory ASSERT (!in_place) ; // cannot fail if in-place, ASSERT (!C_static_header) ; // or if C has a static header GB_FREE_C ; GB_phbix_free (T) ; return (info) ; } // Transplant T into the result C, making a copy if T is shallow info = GB_transplant (*Chandle, ctype, &T, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_C ; return (info) ; } ASSERT_MATRIX_OK (*Chandle, "Chandle, GB_transpose, bitmap/full", GB0) ; } else if (avdim == 1) { //====================================================================== // transpose a "column" vector into a "row" //====================================================================== // transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen). // A must be sorted first. ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ; GB_MATRIX_WAIT (A) ; ASSERT (!GB_JUMBLED (A)) ; //---------------------------------------------------------------------- // allocate space //---------------------------------------------------------------------- // Allocate the header of C, with no C->p, C->h, C->i, C->b, or C->x // content, and initialize the type and dimension of C. The new matrix // is hypersparse. This step does not allocate anything if in-place. // if *Chandle == NULL, allocate a new header; otherwise reuse existing info = GB_new (Chandle, C_static_header, // hyper; old or new header ctype, 1, avlen, GB_Ap_null, C_is_csc, GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ; if (info != GrB_SUCCESS) { // out of memory ASSERT (!in_place) ; // cannot fail if in-place, ASSERT (!C_static_header) ; // or if C has a static header GB_FREE_C ; return (info) ; } if (!in_place) { C = (*Chandle) ; } else { ASSERT (A == C && A == (*Chandle)) ; } // allocate new space for the values and pattern int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ; int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ; GB_void *restrict Cx = NULL ; size_t Cx_size = 0 ; bool ok = true ; Cp = GB_MALLOC (anz+1, int64_t, &Cp_size) ; Ci = GB_MALLOC (anz , int64_t, &Ci_size) ; ok = (Cp != NULL && Ci != NULL) ; if (allocate_new_Cx) { // allocate new space for the new typecasted numerical values of C ASSERT (anz > 0) ; Cx = GB_MALLOC (anz * ctype->size, GB_void, &Cx_size) ; ok = ok && (Cx != NULL) ; } if (!ok) { // out of memory GB_FREE (&Cp, Cp_size) ; GB_FREE (&Ci, Ci_size) ; GB_FREE (&Cx, Cx_size) ; GB_FREE_IN_PLACE_A ; GB_FREE_C ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // fill the content of C //---------------------------------------------------------------------- // numerical values: apply the operator, typecast, or make shallow copy if (op1 != NULL || op2 != NULL) { // Cx = op (A) info = GB_apply_op ( // op1 != identity of same types (GB_void *) Cx, op1, op2, scalar, binop_bind1st, A, Context) ; // GB_apply_op can only fail if op1/op2 are positional ASSERT (!GB_OP_IS_POSITIONAL (op1)) ; ASSERT (!GB_OP_IS_POSITIONAL (op2)) ; ASSERT (info == GrB_SUCCESS) ; C->x = Cx ; C->x_size = Cx_size ; C->x_shallow = false ; // prior Ax will be freed } else if (ctype != atype) { // copy the values from A into C and cast from atype to ctype C->x = Cx ; C->x_size = Cx_size ; C->x_shallow = false ; GB_cast_array (Cx, ccode, Ax, acode, Ab, asize, anz, 1) ; // prior Ax will be freed } else // ctype == atype { // no type change; numerical values of C are a shallow copy of A. C->x = Ax ; C->x_size = Ax_size ; C->x_shallow = (in_place) ? Ax_shallow : true ; Ax = NULL ; // do not free prior Ax } // each entry in A becomes a non-empty vector in C // C is a hypersparse 1-by-avlen matrix C->h = Ai ; C->h_size = Ai_size ; C->h_shallow = (in_place) ? Ai_shallow : true ; Ai = NULL ; // do not free prior Ai // C->p = 0:anz and C->i = zeros (1,anz), newly allocated C->plen = anz ; C->nvec = anz ; C->nvec_nonempty = anz ; C->i = Ci ; C->i_size = Ci_size ; C->p = Cp ; C->p_size = Cp_size ; // fill the vector pointers C->p int nthreads = GB_nthreads (anz, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < anz ; k++) { Ci [k] = 0 ; Cp [k] = k ; } Cp [anz] = anz ; C->nzmax = anz ; C->magic = GB_MAGIC ; // free prior space of A, if transpose is done in-place GB_FREE_IN_PLACE_A ; } else if (avlen == 1) { //====================================================================== // transpose a "row" into a "column" vector //====================================================================== // transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1). // if A->vlen is 1, all vectors of A are implicitly sorted ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ; //---------------------------------------------------------------------- // allocate workspace, if needed //---------------------------------------------------------------------- int ntasks = 0 ; int nth = GB_nthreads (avdim, chunk, nthreads_max) ; GB_WERK_DECLARE (Count, int64_t) ; if (nth > 1 && !A_is_hyper) { // ntasks and Count are not needed if nth == 1 ntasks = 8 * nth ; ntasks = GB_IMIN (ntasks, avdim) ; ntasks = GB_IMAX (ntasks, 1) ; GB_WERK_PUSH (Count, ntasks+1, int64_t) ; if (Count == NULL) { // out of memory GB_FREE_C ; return (GrB_OUT_OF_MEMORY) ; } } // Allocate the header of C, with no C->p, C->h, C->i, or C->x content, // and initialize the type and dimension of C. If in-place, A->p, // A->h, A->i, and A->x are all NULL. The new matrix is sparse, but // can be CSR or CSC. This step does not allocate anything if in // place. // if *Chandle == NULL, allocate a new header; otherwise reuse existing info = GB_new (Chandle, C_static_header, // sparse; old or new header ctype, avdim, 1, GB_Ap_null, C_is_csc, GxB_SPARSE, A_hyper_switch, 0, Context) ; if (info != GrB_SUCCESS) { // out of memory ASSERT (!in_place) ; // cannot fail if in-place, ASSERT (!C_static_header) ; // or if C has a static header GB_FREE_C ; GB_WERK_POP (Count, int64_t) ; return (info) ; } if (!in_place) { C = (*Chandle) ; } else { ASSERT (A == C && A == (*Chandle)) ; } // allocate new space for the values and pattern int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ; int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ; GB_void *restrict Cx = NULL ; size_t Cx_size = 0 ; bool ok = true ; Cp = GB_MALLOC (2, int64_t, &Cp_size) ; ok = ok && (Cp != NULL) ; if (!A_is_hyper) { // A is sparse, so new space is needed for Ci Ci = GB_MALLOC (anz, int64_t, &Ci_size) ; ok = ok && (Ci != NULL) ; } if (allocate_new_Cx) { // allocate new space for the new typecasted numerical values of C Cx = GB_MALLOC (anz * ctype->size, GB_void, &Cx_size) ; ok = ok && (Cx != NULL) ; } if (!ok) { // out of memory GB_FREE (&Cp, Cp_size) ; GB_FREE (&Ci, Ci_size) ; GB_FREE (&Cx, Cx_size) ; GB_WERK_POP (Count, int64_t) ; GB_FREE_IN_PLACE_A ; GB_FREE_C ; return (GrB_OUT_OF_MEMORY) ; } Cp [0] = 0 ; Cp [1] = 0 ; //---------------------------------------------------------------------- // numerical values of C: apply the op, typecast, or make shallow copy //---------------------------------------------------------------------- // numerical values: apply the operator, typecast, or make shallow copy if (op1 != NULL || op2 != NULL) { // Cx = op (A) info = GB_apply_op ( // op1 != identity of same types (GB_void *) Cx, op1, op2, scalar, binop_bind1st, A, Context) ; // GB_apply_op can only fail if op1/op2 are positional ASSERT (!GB_OP_IS_POSITIONAL (op1)) ; ASSERT (!GB_OP_IS_POSITIONAL (op2)) ; ASSERT (info == GrB_SUCCESS) ; C->x = Cx ; C->x_size = Cx_size ; C->x_shallow = false ; // prior Ax will be freed } else if (ctype != atype) { // copy the values from A into C and cast from atype to ctype C->x = Cx ; C->x_size = Cx_size ; C->x_shallow = false ; GB_cast_array (Cx, ccode, Ax, acode, Ab, asize, anz, 1) ; // prior Ax will be freed } else // ctype == atype { // no type change; numerical values of C are a shallow copy of A C->x = Ax ; C->x_size = Ax_size ; C->x_shallow = (in_place) ? Ax_shallow : true ; Ax = NULL ; // do not free prior Ax } //---------------------------------------------------------------------- // pattern of C //---------------------------------------------------------------------- if (A_is_hyper) { //------------------------------------------------------------------ // each non-empty vector in A becomes an entry in C //------------------------------------------------------------------ C->i = Ah ; C->i_size = Ah_size ; C->i_shallow = (in_place) ? Ah_shallow : true ; ASSERT (anvec == anz) ; Ah = NULL ; // do not free prior Ah } else { //------------------------------------------------------------------ // find the non-empty vectors of A, which become entries in C //------------------------------------------------------------------ ASSERT (Ah == NULL) ; if (nth == 1) { //-------------------------------------------------------------- // construct Ci with a single thread //-------------------------------------------------------------- int64_t k = 0 ; for (int64_t j = 0 ; j < avdim ; j++) { if (Ap [j] < Ap [j+1]) { Ci [k++] = j ; } } ASSERT (k == anz) ; } else { //-------------------------------------------------------------- // construct Ci in parallel //-------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nth) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = 0 ; GB_PARTITION (jstart, jend, avdim, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap [j] < Ap [j+1]) { k++ ; } } Count [tid] = k ; } GB_cumsum (Count, ntasks, NULL, 1, NULL) ; ASSERT (Count [ntasks] == anz) ; #pragma omp parallel for num_threads(nth) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = Count [tid] ; GB_PARTITION (jstart, jend, avdim, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap [j] < Ap [j+1]) { Ci [k++] = j ; } } } } #ifdef GB_DEBUG int64_t k = 0 ; for (int64_t j = 0 ; j < avdim ; j++) { if (Ap [j] < Ap [j+1]) { ASSERT (Ci [k] == j) ; k++ ; } } ASSERT (k == anz) ; #endif C->i = Ci ; C->i_size = Ci_size ; C->i_shallow = false ; } //--------------------------------------------------------------------- // vector pointers of C //--------------------------------------------------------------------- // C->p = [0 anz] and C->h = NULL ASSERT (C->plen == 1) ; ASSERT (C->nvec == 1) ; ASSERT (C->h == NULL) ; C->p = Cp ; C->p_size = Cp_size ; C->p_shallow = false ; C->nvec_nonempty = (anz == 0) ? 0 : 1 ; // fill the vector pointers C->p Cp [0] = 0 ; Cp [1] = anz ; C->nzmax = anz ; C->magic = GB_MAGIC ; ASSERT (!GB_JUMBLED (C)) ; // free prior space of A, if transpose done in-place, and free workspace GB_FREE_IN_PLACE_A ; GB_WERK_POP (Count, int64_t) ; } else { //====================================================================== // transpose a general sparse or hypersparse matrix //====================================================================== ASSERT_MATRIX_OK (A, "A for GB_transpose", GB0) ; // T=A' with optional typecasting, or T=op(A') //---------------------------------------------------------------------- // select the method //---------------------------------------------------------------------- int nworkspaces_bucket, nthreads_bucket ; bool use_builder = GB_transpose_method (A, &nworkspaces_bucket, &nthreads_bucket, Context) ; //---------------------------------------------------------------------- // transpose the matrix with the selected method //---------------------------------------------------------------------- if (use_builder) { //================================================================== // transpose via GB_builder //================================================================== //------------------------------------------------------------------ // allocate and create iwork //------------------------------------------------------------------ // allocate iwork of size anz int64_t *iwork = NULL ; size_t iwork_size = 0 ; iwork = GB_MALLOC (anz, int64_t, &iwork_size) ; if (iwork == NULL) { // out of memory GB_FREE_C ; return (GrB_OUT_OF_MEMORY) ; } // Construct the "row" indices of C, which are "column" indices of // A. This array becomes the permanent T->i on output. This phase // must be done before Chandle is created below, since that step // destroys A. info = GB_extract_vector_list (iwork, A, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE (&iwork, iwork_size) ; GB_FREE_C ; return (info) ; } //------------------------------------------------------------------ // allocate the output matrix and additional space (jwork and S) //------------------------------------------------------------------ // Allocate the header of C, with no C->p, C->h, C->i, or C->x // content, and initialize the type and dimension of C. If in // place, A->p, A->h, A->i, and A->x are all NULL. The new matrix // is hypersparse, but can be CSR or CSC. This step does not // allocate anything if in-place. // if *Chandle == NULL, allocate a new header; otherwise reuse info = GB_new (Chandle, C_static_header, // hyper, old or new header ctype, avdim, avlen, GB_Ap_null, C_is_csc, GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ; if (info != GrB_SUCCESS) { // out of memory ASSERT (!in_place) ; // cannot fail if in-place, ASSERT (!C_static_header) ; // or if C has a static header GB_FREE (&iwork, iwork_size) ; GB_FREE_C ; return (info) ; } if (!in_place) { C = (*Chandle) ; } else { ASSERT (A == C && A == (*Chandle)) ; } // if in_place, the prior Ap and Ah can now be freed if (in_place) { if (!Ap_shallow) GB_FREE (&Ap, Ap_size) ; if (!Ah_shallow) GB_FREE (&Ah, Ah_size) ; } int64_t *jwork = NULL ; size_t jwork_size = 0 ; GB_Type_code scode ; GB_void *S = NULL ; GB_void *Swork = NULL ; size_t Swork_size = 0 ; // for the GB_builder method, if the transpose is done in-place and // A->i is not shallow, A->i can be used and then freed. // Otherwise, A->i is not modified at all. bool ok = true ; bool recycle_Ai = (in_place && !Ai_shallow) ; if (!recycle_Ai) { // allocate jwork of size anz jwork = GB_MALLOC (anz, int64_t, &jwork_size) ; ok = ok && (jwork != NULL) ; } if (op1 != NULL || op2 != NULL) { // allocate Swork of size anz * csize Swork = GB_MALLOC (anz * csize, GB_void, &Swork_size) ; ok = ok && (Swork != NULL) ; } if (!ok) { // out of memory GB_FREE (&iwork, iwork_size) ; GB_FREE (&jwork, jwork_size) ; GB_FREE (&Swork, Swork_size) ; GB_FREE_IN_PLACE_A ; GB_FREE_C ; return (GrB_OUT_OF_MEMORY) ; } //------------------------------------------------------------------ // construct jwork and Swork //------------------------------------------------------------------ // "row" indices of A become "column" indices of C if (recycle_Ai) { // Ai is used as workspace for the "column" indices of C. // jwork is a shallow copy of Ai, and is freed by GB_builder. jwork = Ai ; jwork_size = Ai_size ; ASSERT (in_place) ; // set Ai to NULL so it is not freed by GB_FREE_IN_PLACE_A Ai = NULL ; } else { // jwork = Ai, making a deep copy. jwork is freed by // GB_builder. A->i is not modified, even if out of memory. GB_memcpy (jwork, Ai, anz * sizeof (int64_t), nthreads_max) ; } // numerical values: apply the op, typecast, or make shallow copy if (op1 != NULL || op2 != NULL) { // Swork = op (A) info = GB_apply_op ( // op1 != identity of same types (GB_void *) Swork, op1, op2, scalar, binop_bind1st, A, Context) ; // GB_apply_op can only fail if op1/op2 are positional ASSERT (!GB_OP_IS_POSITIONAL (op1)) ; ASSERT (!GB_OP_IS_POSITIONAL (op2)) ; ASSERT (info == GrB_SUCCESS) ; // GB_builder will not need to typecast Swork to T->x, and it // may choose to transplant it into T->x scode = ccode ; #if 0 if (in_place && !Ax_shallow) { // A is being transposed in-place so A->x is no longer // needed. If A->x is shallow this can be skipped. T->x // will not be shallow if the op is present. A->x should // be freed early to free up space for GB_builder. // However, in the current usage, when op is used, A is not // transposed in-place, so this step is not needed. ASSERT (GB_DEAD_CODE) ; GB_FREE (&Ax, A->x_size) ; } #endif } else { // GB_builder will typecast S from atype to ctype if needed. // S is a shallow copy of Ax, and must not be modified. S = Ax ; scode = acode ; } //------------------------------------------------------------------ // build the matrix: T = (ctype) A' or op ((xtype) A') //------------------------------------------------------------------ // internally, jwork is freed and then T->x is allocated, so the // total high-water memory usage is anz * max (csize, // sizeof(int64_t)). T is always hypersparse. // If op is not NULL, then Swork can be transplanted into T in // GB_builder, instead. However, this requires the tuples to be // sorted on input, which is possible but rare for GB_transpose. info = GB_builder ( T, // create T using a static header ctype, // T is of type ctype avdim, // T->vlen = A->vdim, always > 1 avlen, // T->vdim = A->vlen, always > 1 C_is_csc, // T has the same CSR/CSC format as C &iwork, // iwork_handle, becomes T->i on output &iwork_size, &jwork, // jwork_handle, freed on output &jwork_size, &Swork, // Swork_handle, freed on output &Swork_size, false, // tuples are not sorted on input true, // tuples have no duplicates anz, // size of iwork, jwork, and Swork true, // is_matrix: unused NULL, NULL, // original I,J indices: not used here S, // array of values of type scode, not modified anz, // number of tuples NULL, // no dup operator needed (input has no duplicates) scode, // type of S or Swork Context ) ; // GB_builder always frees jwork, and either frees iwork or // transplants it in to T->i and sets iwork to NULL. So iwork and // jwork are always NULL on output. GB_builder does not modify S. ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ; //------------------------------------------------------------------ // free prior space and transplant T into C //------------------------------------------------------------------ // Free the prior content of the input matrix, if done in-place. // Ap, Ah, and Ai have already been freed, but Ax has not. GB_FREE_IN_PLACE_A ; if (info != GrB_SUCCESS) { // out of memory in GB_builder GB_FREE_C ; return (info) ; } // Transplant T in to the result C. The matrix T is not shallow // and no typecasting is done, so this will always succeed. ASSERT (!GB_JUMBLED (T)) ; info = GB_transplant (*Chandle, ctype, &T, Context) ; ASSERT (info == GrB_SUCCESS) ; } else { //================================================================== // transpose via bucket sort //================================================================== // This method does not operate on the matrix in-place, so it must // create a temporary matrix T. Then the input matrix is freed and // replaced with the new matrix T. // T = A' and typecast to ctype info = GB_transpose_bucket (T, ctype, C_is_csc, A, op1, op2, scalar, binop_bind1st, nworkspaces_bucket, nthreads_bucket, Context) ; // free prior content, if C=A' is being done in-place if (in_place_A) { ASSERT (A == (*Chandle)) ; GB_phbix_free (A) ; } else if (in_place_C) { GB_phbix_free (C) ; } if (info != GrB_SUCCESS) { // out of memory in GB_transpose_bucket GB_FREE_C ; return (info) ; } ASSERT_MATRIX_OK (T, "T from bucket", GB0) ; ASSERT (GB_JUMBLED_OK (T)) ; // allocate the output matrix C (just the header) // if *Chandle == NULL, allocate a new header; otherwise reuse info = GB_new (Chandle, C_static_header, // old/new header ctype, avdim, avlen, GB_Ap_null, C_is_csc, GxB_SPARSE, A_hyper_switch, 0, Context) ; if (info != GrB_SUCCESS) { // out of memory ASSERT (!in_place) ; // cannot fail if in-place, ASSERT (!C_static_header) ; // or if C has a static header GB_FREE_C ; GB_phbix_free (T) ; return (info) ; } // Transplant T back into C and free T. T is not shallow and // no typecast is done so this will always succeed. info = GB_transplant (*Chandle, ctype, &T, Context) ; ASSERT (info == GrB_SUCCESS) ; } } //-------------------------------------------------------------------------- // get the output matrix //-------------------------------------------------------------------------- C = (*Chandle) ; ASSERT (GB_JUMBLED_OK (C)) ; //-------------------------------------------------------------------------- // apply a positional operator, after transposing the matrix //-------------------------------------------------------------------------- if (op_is_positional) { // the positional operator is applied in-place to the values of C op1 = save_op1 ; op2 = save_op2 ; // Cx = op (C) info = GB_apply_op ((GB_void *) C->x, op1, // positional op only op2, scalar, binop_bind1st, C, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_C ; return (info) ; } } //-------------------------------------------------------------------------- // conform the result to the desired sparisty structure of A //-------------------------------------------------------------------------- // transplant the control settings from A to C C->hyper_switch = A_hyper_switch ; C->bitmap_switch = A_bitmap_switch ; C->sparsity = A_sparsity ; ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ; info = GB_conform (C, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_C ; return (info) ; } ASSERT_MATRIX_OK (*Chandle, "Chandle conformed in GB_transpose", GB0) ; return (GrB_SUCCESS) ; }
GB_unop__identity_int16_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_fp32) // op(A') function: GB (_unop_tran__identity_int16_fp32) // C type: int16_t // A type: float // cast: int16_t cij = GB_cast_to_int16_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = GB_cast_to_int16_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = GB_cast_to_int16_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_fp32) ( int16_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int16_t z = GB_cast_to_int16_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; int16_t z = GB_cast_to_int16_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===--------------------------------------------------------------------===// // ExprIterator - Iterators for iterating over Stmt* arrays that contain // only Expr*. This is needed because AST nodes use Stmt* arrays to store // references to children (to be compatible with StmtIterator). //===--------------------------------------------------------------------===// class Stmt; class Expr; class ExprIterator { Stmt** I; public: ExprIterator(Stmt** i) : I(i) {} ExprIterator() : I(0) {} ExprIterator& operator++() { ++I; return *this; } ExprIterator operator-(size_t i) { return I-i; } ExprIterator operator+(size_t i) { return I+i; } Expr* operator[](size_t idx); // FIXME: Verify that this will correctly return a signed distance. signed operator-(const ExprIterator& R) const { return I - R.I; } Expr* operator*() const; Expr* operator->() const; bool operator==(const ExprIterator& R) const { return I == R.I; } bool operator!=(const ExprIterator& R) const { return I != R.I; } bool operator>(const ExprIterator& R) const { return I > R.I; } bool operator>=(const ExprIterator& R) const { return I >= R.I; } }; class ConstExprIterator { const Stmt * const *I; public: ConstExprIterator(const Stmt * const *i) : I(i) {} ConstExprIterator() : I(0) {} ConstExprIterator& operator++() { ++I; return *this; } ConstExprIterator operator+(size_t i) const { return I+i; } ConstExprIterator operator-(size_t i) const { return I-i; } const Expr * operator[](size_t idx) const; signed operator-(const ConstExprIterator& R) const { return I - R.I; } const Expr * operator*() const; const Expr * operator->() const; bool operator==(const ConstExprIterator& R) const { return I == R.I; } bool operator!=(const ConstExprIterator& R) const { return I != R.I; } bool operator>(const ConstExprIterator& R) const { return I > R.I; } bool operator>=(const ConstExprIterator& R) const { return I >= R.I; } }; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void* operator new(size_t bytes) throw() { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void* data) throw() { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingLocal : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; /// Whether this initializer list initializes a std::initializer_list /// object. unsigned InitializesStdInitializerList : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { // FIXME: this is wasteful on 64-bit platforms. void *Aligner; StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, ASTContext& C, unsigned alignment = 8) throw(); void* operator new(size_t bytes, ASTContext* C, unsigned alignment = 8) throw(); void* operator new(size_t bytes, void* mem) throw() { return mem; } void operator delete(void*, ASTContext&, unsigned) throw() { } void operator delete(void*, ASTContext*, unsigned) throw() { } void operator delete(void*, std::size_t) throw() { } void operator delete(void*, void*) throw() { } public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) { StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } public: Stmt(StmtClass SC) { StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). LLVM_ATTRIBUTE_USED void dump() const; LLVM_ATTRIBUTE_USED void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; /// dumpColor - same as dump(), but forces color highlighting. LLVM_ATTRIBUTE_USED void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// hasImplicitControlFlow - Some statements (e.g. short circuited operations) /// contain implicit control-flow in the order their subexpressions /// are evaluated. This predicate returns true if this statement has /// such implicit control-flow. Such statements are also specially handled /// within CFGs. bool hasImplicitControlFlow() const; /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef StmtRange child_range; typedef ConstStmtRange const_child_range; child_range children(); const_child_range children() const { return const_cast<Stmt*>(this)->children(); } child_iterator child_begin() { return children().first; } child_iterator child_end() { return children().second; } const_child_iterator child_begin() const { return children().first; } const_child_iterator child_end() const { return children().second; } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBracLoc, RBracLoc; public: CompoundStmt(ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statment with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(0), LBracLoc(Loc), RBracLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(0) { CompoundStmtBits.NumStmts = 0; } void setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_back() { return !body_empty() ? Body[size()-1] : 0; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_back() const { return !body_empty() ? Body[size()-1] : 0; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBracLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBracLoc; } SourceLocation getLBracLoc() const { return LBracLoc; } void setLBracLoc(SourceLocation L) { LBracLoc = L; } SourceLocation getRBracLoc() const { return RBracLoc; } void setRBracLoc(SourceLocation L) { RBracLoc = L; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts); } const_child_range children() const { return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(0), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {} SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(0) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension SourceLocation EllipsisLoc; public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = 0; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { LabelDecl *TheDecl; Stmt *SubStmt; SourceLocation IdentLoc; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt), IdentLoc(IL) { } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; const Attr *Attrs[1]; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(this->Attrs, Attrs.data(), Attrs.size() * sizeof(Attr*)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(Attrs, 0, NumAttrs * sizeof(Attr*)); } public: static AttributedStmt *Create(ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return ArrayRef<const Attr*>(Attrs, NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = 0); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements. SwitchCase *FirstCase; SourceLocation SwitchLoc; /// If the SwitchStmt is a switch on an enum value, this records whether /// all the enum values were covered by CaseStmts. This value is meant to /// be a hint for possible clients. unsigned AllEnumCasesCovered : 1; public: SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase; } /// \brief Set the case list for this switch statement. /// /// The caller is responsible for incrementing the retain counts on /// all of the SwitchCase statements in this list. void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { AllEnumCasesCovered = 1; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return (bool) AllEnumCasesCovered; } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; public: WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation DoLoc; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation ForLoc; SourceLocation LParenLoc, RParenLoc; public: ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {} /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { Stmt *RetExpr; SourceLocation RetLoc; const VarDecl *NRVOCandidate; public: ReturnStmt(SourceLocation RL) : Stmt(ReturnStmtClass), RetExpr(0), RetLoc(RL), NRVOCandidate(0) { } ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetExpr((Stmt*) E), RetLoc(RL), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; IdentifierInfo **Names; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Names(0), Exprs(0) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; public: GCCAsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(0), Clobbers(0) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, char Modifier) : MyKind(Operand), Str(), OperandNo(OpNo) { Str += Modifier; } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { assert(isString()); return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const { assert(isOperand()); return Str[0]; } }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } void setOutputsAndInputsAndClobbers(ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; std::string AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; public: MSAsmStmt(ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<IdentifierInfo*> names, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(0), Constraints(0), Clobbers(0) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// const std::string *getAsmString() const { return &AsmStr; } std::string *getAsmString() { return &AsmStr; } void setAsmString(StringRef &E) { AsmStr = E.str(); } /// Assemble final IR asm string. std::string generateAsmString(ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// StringRef getClobber(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[0]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this' or by reference, etc. enum VariableCaptureKind { VCK_This, VCK_ByRef }; /// \brief Describes the capture of either a variable or 'this'. class Capture { VarDecl *Var; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = 0) : Var(Var), Loc(Loc) { switch (Kind) { case VCK_This: assert(Var == 0 && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { if (capturesThis()) return VCK_This; return VCK_ByRef; } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return Var == 0; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return Var != 0; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture does not capture 'this'. VarDecl *getCapturedVar() const { assert(!capturesThis() && "No variable available for 'this' capture"); return Var; } }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The implicit outlined function. CapturedDecl *TheCapturedDecl; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() const { return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1); } Capture *getStoredCaptures() const; public: static CapturedStmt *Create(ASTContext &Context, Stmt *S, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return const_cast<CapturedStmt *>(this)->getCapturedStmt(); } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() const { return TheCapturedDecl; } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef const Capture *capture_iterator; /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr **>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); }; } // end namespace clang #endif
vector_add_mp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #define n 100000 int main() { double a[n],b[n], c[n], random_a, random_b; float startTime, endTime,execTime; int i; int omp_rank; srand(time(0)); startTime = omp_get_wtime(); #pragma omp parallel private (i) shared (a,b,c) { #pragma omp for for(i=0;i<n;i++) { random_a = rand() , random_b = rand(); omp_rank = omp_get_thread_num(); a[i] = i * random_a; b[i] = i * random_b; for(int j=1;j<n;j++) c[i] = a[i] + b[i]; //printf("The value of a[%d] = %lf and b[%d] = %lf and result c[%d] = %lf Thread rank = %d\n", i, a[i], i, b[i], i, c[i], omp_rank); } } endTime = omp_get_wtime(); execTime = endTime - startTime; printf("%f \n",execTime); return(0); }
linAlgAXMY.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(axmy)(const dlong & N, const dfloat& alpha, const dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a){ #ifdef __NEKRS__OMP__ #pragma omp parallel for #endif for(int i=0;i<N;++i){ const dfloat ai = cpu_a[i]; const dfloat wi = cpu_w[i]; cpu_a[i] = alpha*ai*wi; } } extern "C" void FUNC(axmyMany)(const dlong & N, const dlong & Nfields, const dlong & offset, const dlong & mode, const dfloat & alpha, const dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a){ #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) #endif for(int fld=0;fld<Nfields;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i + mode * fld * offset]; cpu_a[id] = alpha*ai*wi; } } } extern "C" void FUNC(axmyVector)(const dlong & N, const dlong & offset, const dlong & mode, const dfloat & alpha, const dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a){ #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) #endif for(int fld=0;fld<p_NVec;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i + mode * fld * offset]; cpu_a[id] = alpha*ai*wi; } } }
rt_dlange.c
#include "runtime.h" void RT_CORE_dlange(Quark *quark, Quark_Task_Flags *task_flags, int norm, int M, int N, const double *A, int LDA, int szeA, int szeW, double *result) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dlange( quark, task_flags, norm, M, N, A, LDA, szeA, szeW, result); } else if (plasma->runtime == PLASMA_OMPSS) { szeW = max(1, szeW); double *work = malloc(szeW * sizeof(double)); #pragma omp register ([szeW]work) #pragma omp target device (smp) no_copy_deps #pragma omp task in([szeA]A) out([1]result) *result = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(norm), M, N, A, LDA, work); } } void RT_CORE_dlange_f1(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum norm, int M, int N, const double *A, int LDA, int szeA, int szeW, double *result, double *fake, int szeF) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dlange_f1( quark, task_flags, norm, M, N, A, LDA, szeA, szeW, result, fake, szeF); } else if (plasma->runtime == PLASMA_OMPSS) { szeW = max(1, szeW); double *work = malloc(szeW * sizeof(double)); if (result == fake) #pragma omp target device (smp) no_copy_deps #pragma omp task in([szeA]A) out([1]result) *result = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(norm), M, N, A, LDA, work); else #pragma omp target device (smp) no_copy_deps #pragma omp task in([szeA]A) out([1]result) out([szeF]fake) *result = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(norm), M, N, A, LDA, work); } } #if 0 void RT_CORE_dlansy(Quark *quark, Quark_Task_Flags *task_flags, int norm, PLASMA_enum uplo, int N, const double *A, int LDA, int szeA, int szeW, double *result) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dlansy( quark, task_flags, norm, uplo, N, A, LDA, szeA, szeW, result); } else if ( plasma->runtime == PLASMA_OMPSS ) { szeW = max(1, szeW); double *work = malloc(szeW * sizeof(double)); #pragma omp task in([szeA]A) out([1]result) no_copy_deps *result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm), lapack_const(uplo), N, A, LDA, work); } } void RT_CORE_dlansy_f1(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum norm, PLASMA_enum uplo, int N, const double *A, int LDA, int szeA, int szeW, double *result, double *fake, int szeF) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dlansy_f1( quark, task_flags, norm, uplo, N, A, LDA, szeA, szeW, result, fake, szeF); } else if ( plasma->runtime == PLASMA_OMPSS ) { szeW = max(1, szeW); double *work = malloc(szeW * sizeof(double)); if ( result == fake ) { #pragma omp task in([szeA]A) out([1]result) no_copy_deps *result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm), lapack_const(uplo), N, A, LDA, work); } else { #pragma omp task in([szeA]A) out([1]result) fake([szeF]fake) no_copy_deps *result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm), lapack_const(uplo), N, A, LDA, work); } } } #endif
loopct_r2.c
/* * Input: ntabs nchannels padded_size * Output: ntabs ntimes -nchannels ; ntimes < padded_size * * We process a finished tab directly, so no need to build up the full ntabs array */ void deinterleave(const char *page, char *transposed, const int ntabs, const int nchannels, const int ntimes, const int padded_size) { int tab; for (tab = 0; tab < ntabs; tab++) { int channel; #pragma omp parallel for for (channel = 0; channel < nchannels; channel+=2) { const char *channelA = &page[(tab*nchannels + channel + 0)*padded_size]; const char *channelB = &page[(tab*nchannels + channel + 1)*padded_size]; int time; for (time = 0; time < ntimes; time++) { // reverse freq order to comply with header transposed[time*nchannels+nchannels-(channel+0)-1] = channelA[time]; transposed[time*nchannels+nchannels-(channel+1)-1] = channelB[time]; } } } }
lu2_mpi.c
//#define DO_RCOMM_LAST //#define CONCURRENT_UCOMM // lu2_mpi.c // // test program for blocked LU decomposition // // Time-stamp: <2019-05-21 12:16:12 makino> //#define NOBLAS //#define TIMETEST #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <fcntl.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <getopt.h> #include "lu2_mp.h" #include <emmintrin.h> typedef double v2df __attribute__((vector_size(16))); typedef union {v2df v; double s[2];}v2u; #include <lu2tlib.h> #include <lu2tlib.h> #include <lu2lib.h> #define LIBTESTGEMMMAIN #include "libtestgemm.h" void timer_init(); double cpusec(); double wsec(); omp_lock_t my_lock; void printmat_MP(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms); #define RDIM (n+16) void copymats( int n, double a[n][RDIM], double a2[n][RDIM]) { int i, j; for(i=0;i<n;i++){ for(j=0;j<n+2;j++) a2[i][j] = a[i][j]; } } void dumpsubmat(char * s,int n1, double mat[][n1], int nrow, int ncolumn, PPARMS parms, PCONTROLS controls) { int ip, i, j; return; fprintf(stderr, "npr, npc = %d %d\n", parms->nprow, parms->npcol); sleep(MP_myprocid()*2+1); fprintf(stderr,"\n\n\n\n\n"); fprintf(stderr,"%s\n", s); for(i=0;i<nrow;i++){ fprintf(stderr,"%3d ", i); for(j=0;j<ncolumn;j++) fprintf(stderr," %6.2f", mat[i][j]); fprintf(stderr,"\n"); } fprintf(stderr,"\n\n\n\n\n"); fflush(stderr); } void copysubmat0(int n1, double src[][n1], int n2, double dest[][n2], int nrow, int ncolumn ) { int i, j; for(i=0;i<nrow;i++){ for(j=0;j<ncolumn;j++) dest[i][j] = src[i][j]; } } void copysubmat8(int n1, double src[][n1], int n2, double dest[][n2], int nrow, int ncolumn ) { int i; if (nrow * ncolumn > 10000){ #pragma omp parallel for private(i) schedule(static,64) for(i=0;i<nrow;i++){ v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); __builtin_prefetch(src[i+8],0,0); d[0]=s[0]; d[1]=s[1]; d[2]=s[2]; d[3]=s[3]; } }else{ for(i=0;i<nrow;i++){ v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); __builtin_prefetch(src[i+8],0,0); d[0]=s[0]; d[1]=s[1]; d[2]=s[2]; d[3]=s[3]; } } } void copysubmat16(int n1, double src[][n1], int n2, double dest[][n2], int nrow, int ncolumn ) { int i; if (nrow * ncolumn > 10000){ #pragma omp parallel for private(i) schedule(static,64) for(i=0;i<nrow;i++){ v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); __builtin_prefetch(src[i+8],0,0); d[0]=s[0]; d[1]=s[1]; d[2]=s[2]; d[3]=s[3]; d[4]=s[4]; d[5]=s[5]; d[6]=s[6]; d[7]=s[7]; } }else{ for(i=0;i<nrow;i++){ v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); __builtin_prefetch(src[i+8],0,0); d[0]=s[0]; d[1]=s[1]; d[2]=s[2]; d[3]=s[3]; d[4]=s[4]; d[5]=s[5]; d[6]=s[6]; d[7]=s[7]; } } } void copysubmat(int n1, double src[][n1], int n2, double dest[][n2], int nrow, int ncolumn ) { // assume that ncolum is multiple of 8 and // address is 16-byte aligined int i; int j; if (ncolumn < 8){ copysubmat0( n1, src, n2, dest, nrow, ncolumn); return; } #if 1 if (ncolumn == 8){ copysubmat8( n1, src, n2, dest, nrow, ncolumn); return; } if (ncolumn == 16){ copysubmat16( n1, src, n2, dest, nrow, ncolumn); return; } #endif BEGIN_TIMER(t); if (nrow * ncolumn > 30000){ #pragma omp parallel for private(i) schedule(static,64) for(i=0;i<nrow;i++){ int j; // for(j=0;j<ncolumn;j++) dest[i][j] = src[i][j]; v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); for(j=0;j<ncolumn/2;j+=8){ // __builtin_prefetch(s+j+96,0,0); #if 0 d[j]=s[j]; d[j+1]=s[j+1]; d[j+2]=s[j+2]; d[j+3]=s[j+3]; d[j+4]=s[j+4]; d[j+5]=s[j+5]; d[j+6]=s[j+6]; d[j+7]=s[j+7]; #else __builtin_ia32_movntpd((double*)&d[j], s[j]); __builtin_ia32_movntpd((double*)&d[j+1], s[j+1]); __builtin_ia32_movntpd((double*)&d[j+2], s[j+2]); __builtin_ia32_movntpd((double*)&d[j+3], s[j+3]); __builtin_ia32_movntpd((double*)&d[j+4], s[j+4]); __builtin_ia32_movntpd((double*)&d[j+5], s[j+5]); __builtin_ia32_movntpd((double*)&d[j+6], s[j+6]); __builtin_ia32_movntpd((double*)&d[j+7], s[j+7]); #endif } } }else{ for(i=0;i<nrow;i++){ int j; // for(j=0;j<ncolumn;j++) dest[i][j] = src[i][j]; v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); for(j=0;j<ncolumn/2;j+=8){ #if 0 d[j]=s[j]; d[j+1]=s[j+1]; d[j+2]=s[j+2]; d[j+3]=s[j+3]; d[j+4]=s[j+4]; d[j+5]=s[j+5]; d[j+6]=s[j+6]; d[j+7]=s[j+7]; #else __builtin_ia32_movntpd((double*)&d[j], s[j]); __builtin_ia32_movntpd((double*)&d[j+1], s[j+1]); __builtin_ia32_movntpd((double*)&d[j+2], s[j+2]); __builtin_ia32_movntpd((double*)&d[j+3], s[j+3]); __builtin_ia32_movntpd((double*)&d[j+4], s[j+4]); __builtin_ia32_movntpd((double*)&d[j+5], s[j+5]); __builtin_ia32_movntpd((double*)&d[j+6], s[j+6]); __builtin_ia32_movntpd((double*)&d[j+7], s[j+7]); #endif } } } END_TIMER(t,43,nrow*ncolumn*1.0); #if 0 for(i=0; i< nrow; i++){ for(j=0; j<ncolumn; j++){ printf(" %6.2f", src[i][j]); } printf("\n"); } printf("\n\n"); for(i=0; i< nrow; i++){ for(j=0; j<ncolumn; j++){ printf(" %6.2f", dest[i][j]); } printf("\n"); } printf("\n"); #endif } void copybvect( int n, double a[][RDIM], double b[]) { int i; for(i=0;i<n;i++)b[i] = a[i][n]; } void showresult(int n, double a[n][RDIM], double x[]) { int i, j; double emax = 0; for(i=0;i<n;i++){ int k; double b2=0; // printf("%3d: ", i); // for(j=0;j<n;j++) printf(" %10.3e", a[i][j]); for(j=0;j<n;j++) b2 += a[i][j] * x[j]; double err = b2-a[i][n]; emax = (fabs(err) > emax) ? fabs(err):emax; // printf(" %10.3e %10.3e %10.3e %10.3e \n", x[i], a[i][n], b2, err); } printf("Emax= %10.3e\n", emax); } void readmat( int n, double a[n][RDIM]) { int i, j; for(i=0;i<n;i++){ for(j=0;j<n+1;j++) scanf("%le", &(a[i][j])); } } void randomsetmat( int n, int seed, double a[n][RDIM]) { long int i, j; srand48((long) seed); for(i=0;i<n;i++){ // printf("i=%d\n", i); for(j=0;j<n;j++) { double * ap = a[i]; ap[j]=drand48()-0.5; } // printf("n, i=%d\n", i); a[i][n]=drand48()-0.5;; } } void MP_randomsetmat(int nncol, int nnrow,double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int set_b, double b[]) { long int i, j; int seed; MP_message("enter MP_randomsetmat"); srandom(parms->seed*(MP_myprocid()+1)); for(i=0; i< MP_myprocid()+100; i++) seed = random(); srand48((long) seed); dprintf(9,"seed, nncol,nnrow, ncols, nrows = %d %d %d %d %d\n", seed,nncol, nnrow, controls->ncol, controls->nrow); for(i=0;i<controls->nrow;i++){ // printf("i=%d\n", i); for(j=0;j<controls->ncol;j++) { double * ap = a[i]; ap[j]=drand48()-0.5; // dprintf(9,"ap %d %d = %g\n", i,j, ap[j]); } // printf("n, i=%d\n", i); } for(i=0;i<controls->nrow;i++){ // b[i]=1; b[i]=drand48()-0.5; } MPI_Bcast(b, controls->nrow, MPI_DOUBLE, 0, controls->row_comm); if (set_b){ for(i=0;i<controls->nrow;i++){ a[i][controls->ncol]=b[i]; } } MP_message("end MP_randomsetmat"); } void calclocalnorm(int nncol, int nnrow,double a[nnrow][nncol], int nrow, int ncol, double ao[], double a1[]) { int i, j; // fprintf(stderr, "nncol=%d nnrow=%d ncol=%d nrow=%d\n", nncol, nnrow, ncol, nrow); for(j=0;j<nrow;j++) ao[j]=0; for(i=0;i<ncol;i++) a1[i]=0; for(j=0;j<nrow; j++){ for(i=0;i<ncol;i++) { double aa = fabs(a[j][i]); ao[j] += aa; a1[i] += aa; } } } double vnormi(double a[], int n) { double x = 0; int i; for(i=0;i<n;i++){ double y = fabs(a[i]); if(y > x) x = y; } return x; } void MP_calcnorm(int nncol, int nnrow,double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, double *norm1, double *norminf) { int i, j; int nrow = controls->nrow; int ncol = controls->ncol; MP_message("enter MP_calcnorm"); double ao[nnrow]; double aosum[nnrow]; double a1[nncol]; double a1sum[nncol]; print_current_time("call calculocalnorm"); calclocalnorm(nncol, nnrow, a, nrow, ncol, ao, a1); print_current_time("end calculocalnorm"); MPI_Allreduce(ao,aosum, nrow, MPI_DOUBLE, MPI_SUM, controls->row_comm); MPI_Allreduce(a1,a1sum, ncol, MPI_DOUBLE, MPI_SUM, controls->col_comm); double aolocal = vnormi(aosum, nrow); double a1local = vnormi(a1sum, ncol); double aoglobal, a1global; MPI_Allreduce(&aolocal, &aoglobal, 1, MPI_DOUBLE, MPI_MAX, controls->col_comm); MPI_Allreduce(&a1local, &a1global, 1, MPI_DOUBLE, MPI_MAX, controls->col_comm); *norm1 = a1global; *norminf = aoglobal; MP_message("end MP_calcnorm"); } void MP_calcvnorm(double b[], PPARMS parms, PCONTROLS controls, double *norm1, double *norminf) { int i, j; int nrow = controls->nrow; MP_message("enter MP_calcvnorm"); double bo=0; double b1=0; for(i=0;i<nrow; i++)b1 += fabs(b[i]); bo = vnormi(b, nrow); double b1sum, boglobal; MPI_Allreduce(&b1,&b1sum, 1, MPI_DOUBLE, MPI_SUM, controls->col_comm); MPI_Allreduce(&bo, &boglobal, 1, MPI_DOUBLE, MPI_MAX, controls->col_comm); *norm1 = b1sum; *norminf = boglobal; MP_message("end MP_calcvnorm"); } void printmat( int n, double a[n][RDIM]) { int i, j; for(i=0;i<n;i++){ printf("%3d: ", i); for(j=0;j<n+1;j++) printf(" %10.3e", a[i][j]); printf("\n"); } printf("\n"); } void printsqmat( int n, double a[n][n]) { int i, j; for(i=0;i<n;i++){ printf("%3d: ", i); for(j=0;j<n;j++) printf(" %10.3e", a[i][j]); printf("\n"); } printf("\n"); } void backward_sub(int n,double a[n][RDIM], double b[]) { int i,j,k; for (i=0;i<n;i++)b[i] = a[i][n]; for(j=n-2;j>=0;j--) for(k=j+1;k<n;k++) b[j] -= b[k]*a[j][k]; } void lu( int n, double a[n][RDIM], double b[]) { int i, j, k; for(i=0;i<n-1;i++){ // select pivot double amax = fabs(a[i][i]); int p=i; for(j=i+1;j<n;j++){ if (fabs(a[j][i]) > amax){ amax = fabs(a[j][i]); p = j; } } // exchange rows if (p != i){ for(j=i;j<n+1;j++){ double tmp = a[p][j]; a[p][j] = a[i][j]; a[i][j]=tmp; } } // normalize row i double ainv = 1.0/a[i][i]; // fprintf(stderr,"%d %e\n", i, ainv); for(k=i+1;k<n+1;k++) a[i][k]*= ainv; // subtract row i from all lower rows for(j=i+1;j<n;j++){ // fprintf(stderr,"j=%d \n",j); for(k=i+1;k<n+1;k++) a[j][k] -= a[j][i] * a[i][k]; } } printmat(n,a); a[n-1][n] /= a[n-1][n-1]; backward_sub(n,a,b); } int MP_find_pivot(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current) { int index; // fprintf(stderr, "%d: hav_current_col = %d\n", MP_myprocid(), // have_current_col(current, parms, controls)); if (have_current_col(current, parms, controls)){ int first=first_row(current, parms, controls); double fmax =0; int imax = -1; int i; int ii = local_colid(current, parms, controls); for(i=first; i< controls->nrow;i++){ if (fabs(a[i][ii]) > fmax){ fmax = fabs(a[i][ii]); imax = i; } } index = global_rowid(imax, parms, controls);// <- wrong!! MP_max_and_maxloc(&fmax, &index, controls->col_comm); } MPI_Bcast(&index,sizeof(int),MPI_BYTE, pcolid(current,parms,controls), controls->row_comm); MPI_Barrier(MPI_COMM_WORLD); // fprintf(stderr,"MP find pivot returns %d\n", index); return index; } int MP_find_pivot_transposed(int nnrow, int nncol, double a[nncol][nnrow], PPARMS parms, PCONTROLS controls, int current) { int index; // fprintf(stderr, "%d: hav_current_col = %d\n", MP_myprocid(), // have_current_col(current, parms, controls)); if (have_current_col(current, parms, controls)){ int first=first_row(current, parms, controls); double fmax =0; int imax = -1; int i; int ii = local_colid(current, parms, controls)%nncol; #if 0 for(i=first; i< controls->nrow;i++){ if (fabs(a[ii][i]) > fmax){ fmax = fabs(a[ii][i]); imax = i; } } #else if (controls->nrow-first > 0){ imax = cblas_idamax(controls->nrow-first, a[ii]+first, 1)+first; fmax = fabs(a[ii][imax]); }else{ imax = 0; fmax = -1; } #endif index = global_rowid(imax, parms, controls);// <- wrong!! // fprintf(stderr,"find_pivot local max, index =%10.3e, %d\n", fmax, index); MP_max_and_maxloc(&fmax, &index, controls->col_comm); // fprintf(stderr,"find_pivot g max, index =%10.3e, %d\n", fmax, index); } return index; } void local_row_exchange(int nnrow,int nncol,double a[nnrow][nncol], int row1,int row2) { int i; double *ap1 = a[row1]; double *ap2 = a[row2]; for(i=0;i<nncol;i++){ double tmp=ap1[i]; ap1[i]=ap2[i]; ap2[i]=tmp; } } void local_row_exchange_blocked(int nnrow,int nncol,double a[nnrow][nncol], int row1,int row2, int c1, int c2) { int i; double *ap1 = a[row1]; double *ap2 = a[row2]; for(i=c1;i<c2;i++){ double tmp=ap1[i]; ap1[i]=ap2[i]; ap2[i]=tmp; } } void local_row_exchange_blocked_transposed(int nnrow,int nncol, double a[nncol][nnrow], int row1,int row2, int c1, int c2) { int i; double *ap1 = a[row1]; double *ap2 = a[row2]; for(i=c1;i<c2;i++){ double tmp=a[i][row1]; a[i][row1]=a[i][row2]; a[i][row2]=tmp; } } void MP_swap_row_ptop(int nnrow, int nncol, double a[nnrow][nncol], int myrow, int procswap, PCONTROLS controls) { double atmp[nncol]; int i; MPI_Status mpstatus; for(i=0;i<nncol;i++)atmp[i]=a[myrow][i]; MPI_Sendrecv(atmp, nncol, MPI_DOUBLE, procswap, MPSWAPTAG, a[myrow],nncol, MPI_DOUBLE, procswap, MPSWAPTAG, controls->col_comm, &mpstatus); } void MP_swap_row_ptop_blocked(int nnrow, int nncol, double a[nnrow][nncol], int myrow, int procswap, PCONTROLS controls, int c1, int c2) { double atmp[nncol]; int i; MPI_Status mpstatus; if (c2 > c1){ int ndata = c2-c1; for(i=0;i<ndata;i++)atmp[i]=a[myrow][i+c1]; MPI_Sendrecv(atmp, ndata, MPI_DOUBLE, procswap, MPSWAPTAG, a[myrow]+c1,ndata, MPI_DOUBLE, procswap, MPSWAPTAG, controls->col_comm, &mpstatus); } } void MP_swap_row_ptop_blocked_transposed(int nnrow, int nncol, double a[nncol][nnrow], int myrow, int procswap, PCONTROLS controls, int c1, int c2) { double atmp[nncol]; double atmp2[nncol]; int i; MPI_Status mpstatus; if (c2 > c1){ int ndata = c2-c1; for(i=0;i<ndata;i++)atmp[i]=a[i+c1][myrow]; MPI_Sendrecv(atmp, ndata, MPI_DOUBLE, procswap, MPSWAPTAG, atmp2,ndata, MPI_DOUBLE, procswap, MPSWAPTAG, controls->col_comm, &mpstatus); for(i=0;i<ndata;i++)a[i+c1][myrow]=atmp2[i]; } } int MP_swap_rows(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int pivot) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; convert_global_index_to_local_rows(current, &current_proc, &current_lloc, parms, controls); convert_global_index_to_local_rows(pivot, &pivot_proc, &pivot_lloc, parms, controls); if (current_proc == pivot_proc){ if (current_proc == controls->rank_in_col) local_row_exchange(nnrow,nncol,a,current_lloc,pivot_lloc); }else if (current_proc == controls->rank_in_col){ MP_swap_row_ptop(nnrow,nncol,a,current_lloc,pivot_proc, controls); }else if (pivot_proc == controls->rank_in_col){ MP_swap_row_ptop(nnrow,nncol,a,pivot_lloc,current_proc, controls); } } int MP_swap_rows_blocked(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int pivot, int c1, int c2) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; convert_global_index_to_local_rows(current, &current_proc, &current_lloc, parms, controls); convert_global_index_to_local_rows(pivot, &pivot_proc, &pivot_lloc, parms, controls); if (current_proc == pivot_proc){ if (current_proc == controls->rank_in_col) local_row_exchange_blocked(nnrow,nncol,a,current_lloc, pivot_lloc,c1,c2); }else if (current_proc == controls->rank_in_col){ MP_swap_row_ptop_blocked(nnrow,nncol,a,current_lloc,pivot_proc, controls,c1,c2); }else if (pivot_proc == controls->rank_in_col){ MP_swap_row_ptop_blocked(nnrow,nncol,a,pivot_lloc,current_proc, controls,c1,c2); } } int MP_swap_rows_blocked_transposed(int nnrow, int nncol, double a[nncol][nnrow], PPARMS parms, PCONTROLS controls, int current, int pivot, int c1, int c2) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; convert_global_index_to_local_rows(current, &current_proc, &current_lloc, parms, controls); convert_global_index_to_local_rows(pivot, &pivot_proc, &pivot_lloc, parms, controls); // dprintf(9,"swap_rows_transposed cp,pp,rank_in_col=%d %d %d\n", // current_proc, pivot_proc,controls->rank_in_col); if (current_proc == pivot_proc){ if (current_proc == controls->rank_in_col) local_row_exchange_blocked_transposed(nnrow,nncol,a,current_lloc, pivot_lloc,c1,c2); }else if (current_proc == controls->rank_in_col){ MP_swap_row_ptop_blocked_transposed(nnrow,nncol,a, current_lloc,pivot_proc, controls,c1,c2); }else if (pivot_proc == controls->rank_in_col){ MP_swap_row_ptop_blocked_transposed(nnrow,nncol,a,pivot_lloc, current_proc, controls,c1,c2); } // dprintf(9,"swap_rows_transposed end\n"); } int MP_scale_row(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ double scale = a[current_lrow][current_lcol]; int i; MPI_Bcast(&scale,sizeof(double),MPI_BYTE, current_pcol, controls->row_comm); scale = 1.0/scale; for(i=0;i<nncol;i++)a[current_lrow][i] *= scale; } } int MP_scale_row_blocked(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int c1, int c2, int singlecol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ double scale = a[current_lrow][current_lcol]; int i; if (! singlecol){ MPI_Bcast(&scale,sizeof(double),MPI_BYTE, current_pcol, controls->row_comm); } scale = 1.0/scale; if (controls->rank_in_row != current_pcol){ for(i=c1;i<c2;i++)a[current_lrow][i] *= scale; }else{ for(i=c1;i<c2;i++) if (i != current_lcol) a[current_lrow][i] *= scale; } } } int MP_scale_row_blocked_using_scale(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int c1, int c2, int singlecol,double scaleval) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ int i; for(i=c1;i<c2;i++)a[current_lrow][i] *= scaleval; } } double scaleval(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); double scale = a[current_lrow][current_lcol]; return 1.0/scale; } int MP_construct_scalevector(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nb, double scale[]) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ int i; for(i=0;i<nb;i++){ scale[i]= 1.0/ a[current_lrow+i][current_lcol+i]; } } } int MP_scale_row_blocked_transposed(int nnrow, int nncol, double a[nncol][nnrow], PPARMS parms, PCONTROLS controls, int current, int c1, int c2) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); current_lcol %= nncol; if (current_prow == controls->rank_in_col){ double scale = a[current_lcol][current_lrow]; int i; scale = 1.0/scale; if (controls->rank_in_row != current_pcol){ for(i=c1;i<c2;i++)a[i][current_lrow] *= scale; }else{ for(i=c1;i<c2;i++) if (i != current_lcol) a[i][current_lrow] *= scale; } } } int MP_update_single(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j; double arow[nncol]; double acol[nnrow]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startcol; int startrow; int nb = parms->nb; if (current_prow > controls->rank_in_col){ startrow = (current_lrow/nb +1)*nb; }else if (current_prow == controls->rank_in_col){ startrow =current_lrow+1; }else{ startrow = (current_lrow/nb)*nb; } if (current_pcol > controls->rank_in_row){ startcol = (current_lcol/nb +1)*nb; }else if (current_pcol == controls->rank_in_row){ startcol =current_lcol+1; }else{ startcol = (current_lcol/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++)acol[i]=a[i+startrow][current_lcol]; } if (current_prow == controls->rank_in_col){ for (i=0;i<ncol-startcol;i++)arow[i]=a[current_lrow][i+startcol]; } MPI_Bcast(arow,sizeof(double)*(ncol-startcol),MPI_BYTE, current_prow, controls->col_comm); MPI_Bcast(acol,sizeof(double)*(nrow-startrow),MPI_BYTE, current_pcol, controls->row_comm); for (i=startcol;i<ncol;i++){ for (j=startrow;j<nrow;j++){ a[j][i] -= acol[j-startrow]*arow[i-startcol]; } } } int MP_update_single_blocked(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int c1, int c2) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j; double arow[nncol]; double acol[nnrow]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (current_prow > controls->rank_in_col){ startrow = (current_lrow/nb +1)*nb; }else if (current_prow == controls->rank_in_col){ startrow =current_lrow+1; }else{ startrow = (current_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++)acol[i]=a[i+startrow][current_lcol]; } if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++)arow[i]=a[current_lrow][i+c1]; } MPI_Bcast(arow,sizeof(double)*(c2-c1),MPI_BYTE, current_prow, controls->col_comm); MPI_Bcast(acol,sizeof(double)*(nrow-startrow),MPI_BYTE, current_pcol, controls->row_comm); for (i=c1;i<c2;i++){ for (j=startrow;j<nrow;j++){ a[j][i] -= acol[j-startrow]*arow[i-c1]; } } } static void vsmulandsub0(int r0, int r1, double al[], double ar[], double s) { int i; for (i=r0;i<r1;i++)al[i] -= ar[i]*s; } static void vsmulandsub(int r0, int r1, double al[], double ar[], double s) { int j; if (r1 - r0 < 16){ vsmulandsub0(r0, r1, al, ar, s); return; } while (r0 & 7){ al[r0] -= ar[r0]*s; r0++; } while (r1 & 7){ al[r1-1] -= ar[r1-1]*s; r1--; } v2df * arv = (v2df*) (ar+r0); v2df * alv = (v2df*) (al+r0); v2df ss = (v2df){s,s}; // for(j=r0;j<r1;j++) // al[j] -= ar[j]*s; for(j=0;j<(r1-r0)/2;j+=4){ alv[j] -= arv[j]*ss; alv[j+1] -= arv[j+1]*ss; alv[j+2] -= arv[j+2]*ss; alv[j+3] -= arv[j+3]*ss; __builtin_prefetch(alv+j+32,1,3); __builtin_prefetch(arv+j+32,0,0); } } int MP_update_single_blocked_transposed(int nnrow, int nncol, double a[nncol][nnrow], PPARMS parms, PCONTROLS controls, int current, int c1, int c2, double * arow, double * acol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j; int ncol = controls->ncol+1; int nrow = controls->nrow; // dprintf(9,"enter MP_update_single_blocked_transposed\n"); convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); current_lcol %= nncol; int startrow; int nb = parms->nb; if (current_prow > controls->rank_in_col){ startrow = (current_lrow/nb +1)*nb; }else if (current_prow == controls->rank_in_col){ startrow =current_lrow+1; }else{ startrow = (current_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++)acol[i+startrow]=a[current_lcol][i+startrow]; } // if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++)arow[i]=a[i+c1][current_lrow]; // } // Since the value of current_prow is broadcasted, there // is no need for other processors to execute the above // loop. However, OpenMPI complains.... // dprintf(9," MP_update_single_blocked_transposed bcast %d\n",c2-c1); MPI_Bcast(arow,sizeof(double)*(c2-c1),MPI_BYTE, current_prow, controls->col_comm); // dprintf(9," MP_update_single_blocked_transposed end bcast %d\n",c2-c1); // the following way does not look optimal yet... int ii; #pragma omp parallel for private(ii) schedule(static) for(ii=0;ii<4;ii++){ int k; int nr = (nrow-startrow)/4; int i1 = nr*ii+startrow; int i2 = i1+nr; if (ii==3) i2 =nrow; for (k=c1;k<c2;k++){ vsmulandsub(i1, i2, a[k],acol,arow[k-c1]); } } // dprintf(9,"end MP_update_single_blocked_transposed\n"); } int MP_update_single_blocked_global(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int c1, int c2, int global_startrow) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j; double arow[nncol]; double acol[nnrow]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++)acol[i]=a[i+startrow][current_lcol]; } if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++)arow[i]=a[current_lrow][i+c1]; } MPI_Bcast(arow,sizeof(double)*(c2-c1),MPI_BYTE, current_prow, controls->col_comm); MPI_Bcast(acol,sizeof(double)*(nrow-startrow),MPI_BYTE, current_pcol, controls->row_comm); for (i=c1;i<c2;i++){ for (j=startrow;j<nrow;j++){ a[j][i] -= acol[j-startrow]*arow[i-c1]; } } } int MP_process_lmat(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int global_startrow, double acol[][nbin]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ copysubmat(nncol, (double(*)[])(a[startrow]+current_lcol), nbin, acol, nrow-startrow, nbin); } MPI_Bcast(acol,sizeof(double)*(nrow-startrow)*nbin,MPI_BYTE, current_pcol, controls->row_comm); return nrow-startrow; } int MP_prepare_lmat(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int global_startrow, double acol[][nbin]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++) for(j=0;j<nbin;j++) acol[i][j]=a[i+startrow][current_lcol+j]; } return nrow-startrow; } int MP_update_multiple_blocked_global_using_lmat(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int c1, int c2, int global_startrow, double acol[][nbin]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; double arow[nbin][c2-c1]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++) for(j=0;j<nbin;j++) arow[j][i]=a[current_lrow+j][i+c1]; } MPI_Bcast(arow,sizeof(double)*(c2-c1)*nbin,MPI_BYTE, current_prow, controls->col_comm); mydgemm(nrow-startrow, c2-c1, nbin, -1.0, &(acol[0][0]), nbin, &(arow[0][0]), c2-c1, 1.0, &(a[startrow][c1]), nncol ); } int MP_bcast_umat(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int c1, int c2, int global_startrow, double arow[nbin][c2-c1]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_prow == controls->rank_in_col){ copysubmat(nncol, (double(*)[])(a[current_lrow]+c1), c2-c1, arow, nbin, c2-c1); } MP_mybcast(arow,sizeof(double)*(c2-c1)*nbin, current_prow, controls->col_comm); } int startrow_for_update(PPARMS parms, PCONTROLS controls, int current, int global_startrow) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } return startrow; } int MP_update_using_lu(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int c1, int c2, int global_startrow, double acol[][nbin], double arow[nbin][c2-c1]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } BEGIN_TIMER(timer); mydgemm(nrow-startrow, c2-c1, nbin, -1.0, &(acol[0][0]), nbin, &(arow[0][0]), c2-c1, 1.0, &(a[startrow][c1]), nncol ); END_TIMER(timer,19,((double)(nrow-startrow))*(c2-c1)*nbin*2); } int MP_update_multiple_blocked_global(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int c1, int c2, int global_startrow, int singlecol) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; double arow[nbin][c2-c1]; double acol[nnrow][nbin]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++) for(j=0;j<nbin;j++) acol[i][j]=a[i+startrow][current_lcol+j]; } if (! singlecol){ MPI_Bcast(acol,sizeof(double)*(nrow-startrow)*nbin,MPI_BYTE, current_pcol, controls->row_comm); } if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++) for(j=0;j<nbin;j++) arow[j][i]=a[current_lrow+j][i+c1]; } MPI_Bcast(arow,sizeof(double)*(c2-c1)*nbin,MPI_BYTE, current_prow, controls->col_comm); mydgemm(nrow-startrow, c2-c1, nbin, -1.0, &(acol[0][0]), nbin, &(arow[0][0]), c2-c1, 1.0, &(a[startrow][c1]), nncol ); } int MP_update_multiple_blocked_global_withacol(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, double acol[nnrow][nbin], int c1, int c2, int global_startrow, int singlecol) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; double arow[nbin][c2-c1]; int ncol = controls->ncol+1; int nrow = controls->nrow; BEGIN_TIMER(timer0); convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ copysubmat(nncol, (double(*)[])(a[startrow]+current_lcol), nbin, acol, nrow-startrow, nbin); } if (! singlecol){ MPI_Bcast(acol,sizeof(double)*(nrow-startrow)*nbin,MPI_BYTE, current_pcol, controls->row_comm); } if (current_prow == controls->rank_in_col){ copysubmat(nncol, (double(*)[])(a[current_lrow]+c1), c2-c1, arow, nbin,c2-c1); } MPI_Bcast(arow,sizeof(double)*(c2-c1)*nbin,MPI_BYTE, current_prow, controls->col_comm); END_TIMER(timer0,27,((double)(nrow-startrow))*(c2-c1)); BEGIN_TIMER(timer); mydgemm(nrow-startrow, c2-c1, nbin, -1.0, &(acol[0][0]), nbin, &(arow[0][0]), c2-c1, 1.0, &(a[startrow][c1]), nncol ); if (nbin == 8){ END_TIMER(timer,13,((double)(nrow-startrow))*(c2-c1)*nbin*2); }else if (nbin == 16){ END_TIMER(timer,20,((double)(nrow-startrow))*(c2-c1)*nbin*2); }else if (nbin == 32){ END_TIMER(timer,21,((double)(nrow-startrow))*(c2-c1)*nbin*2); }else if (nbin == 64){ END_TIMER(timer,22,((double)(nrow-startrow))*(c2-c1)*nbin*2); }else if (nbin == 128){ END_TIMER(timer,23,((double)(nrow-startrow))*(c2-c1)*nbin*2); } END_TIMER(timer,26,((double)(nrow-startrow))*(c2-c1)*nbin*2); } static void MP_solve_triangle_for_unit_mat_internal(int nncol, double a[][nncol], int nb, double b[][nb], int m) { int i,ii,j,k; for(ii=0;ii<m;ii++){ for(j=ii+1;j<m;j++){ v2df acopy = (v2df){-a[j][ii],-a[j][ii]}; v2df* src = (v2df*)b[ii]; v2df* dest = (v2df*)b[j]; for(k=0;k<j/2;k++)dest[k] += acopy*src[k]; if(j&1) b[j][j-1] -= a[j][ii]*b[ii][j-1]; } } } static void MP_solve_triangle_for_unit_mat_internal_omp(int nncol, double a[][nncol], int nb, double b[][nb], int m) { int i,ii,j; for(ii=0;ii<m;ii++){ #pragma omp parallel for private(j) for(j=ii+1;j<m;j++){ int k; v2df acopy = (v2df){-a[j][ii],-a[j][ii]}; v2df* src = (v2df*)b[ii]; v2df* dest = (v2df*)b[j]; for(k=0;k<j/2;k++)dest[k] += acopy*src[k]; if(j&1) b[j][j-1] -= a[j][ii]*b[ii][j-1]; } } } void MP_solve_triangle_for_unit_mat(int nncol, double a[][nncol], int nb, double b[][nb], int m); static void MP_solve_triangle_for_unit_mat_recursive(int nncol, double a[][nncol], int nb, double b[][nb], int m) { int i,ii,j,k; if (m < 128){ MP_solve_triangle_for_unit_mat_internal(nncol, a, nb, b,m); return; } const int mhalf = m/2; MP_solve_triangle_for_unit_mat_recursive(nncol, a, nb, b,mhalf); mydgemm( mhalf, mhalf, mhalf, -1.0, &(a[mhalf][0]), nncol, &(b[0][0]), nb, 1.0, &(b[mhalf][0]),nb ); double bwork[mhalf][mhalf]; double bwork2[mhalf][mhalf]; for (j=0;j<mhalf;j++) for (k=0;k<mhalf;k++)bwork[j][k]=0.0; for (j=0;j<mhalf;j++)bwork[j][j]=1.0; MP_solve_triangle_for_unit_mat_recursive(nncol, (double(*)[])(&a[mhalf][mhalf]), mhalf, bwork,mhalf); for(i=0;i<mhalf;i++) for(j=0;j<mhalf;j++) bwork2[i][j]=b[i+mhalf][j]; mydgemm(mhalf, mhalf, mhalf, 1.0, (double*)bwork,mhalf, (double*)bwork2, mhalf, 0.0, &(b[mhalf][0]),nb ); for (j=0;j<mhalf;j++) for (k=0;k<j+1;k++)b[mhalf+j][mhalf+k]=bwork[j][k]; } void MP_solve_triangle_for_unit_mat(int nncol, double a[][nncol], int nb, double b[nb][nb], int m) { int ii,j,k; BEGIN_TIMER(timer); for (j=0;j<nb;j++) for (k=0;k<nb;k++)b[j][k]=0.0; for (j=0;j<nb;j++)b[j][j]=1.0; MP_solve_triangle_for_unit_mat_recursive(nncol, (double(*)[]) (&a[0][0]), nb, b, m); END_TIMER(timer,35,((double)(nb))*nb*nb); } int MP_solve_triangle(int nncol, double a[][nncol], int ncols, int m, double acol[m][m]) { int i,ii,j,k; // current =ii // c0=i+m // c1=iend // r0=ii+1 // r1 = i+m double b[m][m]; double awork[m][ncols]; MP_solve_triangle_for_unit_mat(m,acol,m,b,m); for(j=0;j<m;j++){ for (k=0;k<ncols;k++){ awork[j][k]=a[j][k]; a[j][k]=0; } } #if 1 mydgemm(m, ncols, m, 1.0, &(b[0][0]), m, &(awork[0][0]), ncols, 0.0, &(a[0][0]), nncol ); #else for (k=0;k<ncols;k++){ for(j=0;j<m;j++) for(ii=0;ii<m;ii++) a[j][k]+= b[j][ii]*awork[ii][k]; } #endif } int MP_solve_triangle_using_inverse(int nncol, double a[][nncol], int ncols, int m, double b[m][m]) { int i,ii,j,k; double awork[m][ncols]; v2df zerov=(v2df){0.0,0.0}; if (ncols<32){ for(j=0;j<m;j++){ for (k=0;k<ncols;k++){ awork[j][k]=a[j][k]; a[j][k]=0; } } }else{ copysubmat(nncol,a,ncols,awork,m,ncols); } mydgemm(m, ncols, m, 1.0, &(b[0][0]), m, &(awork[0][0]), ncols, 0.0, &(a[0][0]), nncol ); } int MP_calculate_ld_old(int m, double a[][m], int ncols, double b[m][m], int current, PCONTROLS controls, PPARMS parms) { int i,ii,j,k; int current_prow, current_lrow; double awork[ncols][m]; for(j=0;j<ncols;j++){ for (k=0;k<m;k++){ awork[j][k]=a[j][k]; a[j][k]=0; } } convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); MPI_Bcast(b,sizeof(double)*m*m,MPI_BYTE, current_prow, controls->col_comm); #if 1 mydgemm(ncols, m, m, 1.0, &(awork[0][0]), m, &(b[0][0]), m, 0.0, &(a[0][0]), m ); #else for (k=0;k<m;k++){ for(j=0;j<ncols;j++) for(ii=0;ii<m;ii++) a[j][k]+= awork[j][ii]*b[ii][k]; } #endif } int MP_calculate_ld_phase1(int m, double a[][m], int ncols, double b[m][m], int current, PCONTROLS controls, PPARMS parms) // broadcast diagnal panel (already triangle) // so that it can then be multiplied with L panel { int i,ii,j,k; int current_prow, current_lrow; BEGIN_TIMER(timer); convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); MPI_Bcast(b,sizeof(double)*m*m,MPI_BYTE, current_prow, controls->col_comm); END_TIMER(timer,38,(m+0.0)*m); } int MP_calculate_ld_phase2(int m, double a[][m], int ncols, double awork[][m], double b[m][m], int current, PCONTROLS controls, PPARMS parms) { int i,ii,j,k; int current_prow, current_lrow; BEGIN_TIMER(timer); for(j=0;j<ncols;j++){ for (k=0;k<m;k++){ awork[j][k]=a[j][k]; a[j][k]=0; } } mydgemm(ncols, m, m, 1.0, &(awork[0][0]), m, &(b[0][0]), m, 0.0, &(a[0][0]), m ); END_TIMER(timer,6,((double)(ncols))*m*m*2); } int MP_calculate_ld(int m, double a[][m], int ncols, double awork[][m], double b[m][m], int current, PCONTROLS controls, PPARMS parms) { MP_calculate_ld_phase1(m, a, ncols, b, current, controls, parms); MP_calculate_ld_phase2(m, a, ncols, awork, b, current, controls, parms); } int MP_update_multiple_using_diagonal(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int firstrow, int c1, int c2,int nrows, double acolinv[nrows][nrows]) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ MP_solve_triangle_using_inverse(nncol, (double(*)[])(&a[current_lrow][c1]), c2-c1, nrows,acolinv); } } int MP_store_diagonal_inverse(int nnrow, int nb, double dinv[nnrow][nb], PPARMS parms, PCONTROLS controls, int firstrow, double acolinv[nb][nb]) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ for (i=0;i<nb;i++){ for(j=0;j<nb;j++){ dinv[current_lrow+i][j]=acolinv[i][j]; } } } } int MP_process_diagonal(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int firstrow, int nrows, double acolinv[nrows][nrows], int singlecol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; double acol[nrows][nrows]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ if (current_pcol == controls->rank_in_row){ int endrow; int nrhalf = nrows/2; endrow = current_lrow+nrows; for (i=0;i<endrow-current_lrow;i++){ v2df * src = (v2df*)&(a[i+current_lrow][current_lcol]); v2df *dest = (v2df*)&(acol[i][0]); for (ii=0;ii<nrhalf;ii++){ // acol[i][ii]=a[i+current_lrow][current_lcol+ii]; dest[ii]=src[ii]; // need to be fixed } } } if (current_pcol == controls->rank_in_row){ #if 0 dprintf("update_local firstrow=%d\n", firstrow); for(i=0;i<nrows;i++){ fprintf(stderr, "i= %2d:", i); for(j=0;j<nrows;j++){ fprintf(stderr, " %10.3e",acol[i][j]); } fprintf(stderr,"\n"); } #endif MP_solve_triangle_for_unit_mat(nrows,acol,nrows,acolinv,nrows); } if (! singlecol){ MPI_Bcast(acolinv,sizeof(double)*nrows*nrows,MPI_BYTE, current_pcol, controls->row_comm); } } } int MP_process_diagonal_phase1(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int firstrow, int nrows, double acolinv[nrows][nrows], int singlecol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; double acol[nrows][nrows]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ if (current_pcol == controls->rank_in_row){ copysubmat(nncol, (double(*)[])(a[current_lrow]+current_lcol), nrows, acol, nrows, nrows); MP_solve_triangle_for_unit_mat(nrows,acol,nrows,acolinv,nrows); } } } int MP_process_diagonal_phase2(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int firstrow, int nrows, double acolinv[nrows][nrows], int singlecol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; double acol[nrows][nrows]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ if (! singlecol){ MPI_Bcast(acolinv,sizeof(double)*nrows*nrows,MPI_BYTE, current_pcol, controls->row_comm); } } } void backward_sub_mpi(int nnrow, int nncol, double a[nnrow][nncol],double b[], PCONTROLS controls, PPARMS parms) { int i,j,k; int previous_pcol = -1; int n = controls->nrow*parms->nprow; int current_prow; int current_lrow; int current_pcol; int current_lcol; MPI_Status status; for (i=0;i<controls->nrow;i++)b[i] = a[i][controls->ncol]; for(i=n-1;i>=1;i--){ convert_global_index_to_local_rows(i, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(i, &current_pcol, &current_lcol, parms, controls); if (previous_pcol == -1) previous_pcol = current_pcol; if (current_pcol != previous_pcol){ // process column changed, current column should // receive b from previous column if(controls->rank_in_row == previous_pcol){ MPI_Send(b, controls->nrow, MPI_DOUBLE, current_pcol, MPBSTAG,controls->row_comm); }else if (controls->rank_in_row == current_pcol){ MPI_Recv(b, controls->nrow, MPI_DOUBLE, previous_pcol, MPBSTAG,controls->row_comm, &status); } } if(controls->rank_in_row == current_pcol){ // have the current column of a double btmp = b[current_lrow]; MPI_Bcast(&btmp,1,MPI_DOUBLE,current_prow, controls->col_comm); int jmax; int nb = parms->nb; if (controls->rank_in_col == current_prow){ jmax = current_lrow; }else if (controls->rank_in_col < current_prow){ jmax = ((current_lrow/nb)+1)*nb; } else{ jmax = (current_lrow/nb)*nb; } for(j=0;j<jmax;j++){ b[j] -= btmp*a[j][current_lcol]; } } previous_pcol=current_pcol; } MPI_Bcast(b,controls->nrow,MPI_DOUBLE, current_pcol, controls->row_comm); for (i=0;i<controls->nrow;i++)a[i][controls->ncol] = b[i]; } void backward_sub_blocked_mpi(int nnrow, int nncol, double a[nnrow][nncol], int nb, double dinv[nnrow][nb], double b[], PCONTROLS controls, PPARMS parms) { int i,ii,j,k; int previous_pcol = -1; int n = controls->nrow*parms->nprow; int current_prow; int current_lrow; int current_pcol; int current_lcol; MPI_Status status; // dprintf(9,"enter backward_sub_blocked_mpi\n"); for (i=0;i<controls->nrow;i++)b[i] = a[i][controls->ncol]; for(ii=n-1;ii>=1;ii-=nb){ // dprintf(9,"ii=%d\n", ii); convert_global_index_to_local_cols(ii, &current_pcol, &current_lcol, parms, controls); convert_global_index_to_local_rows(ii, &current_prow, &current_lrow, parms, controls); if (previous_pcol == -1) previous_pcol = current_pcol; if (parms->npcol > 1){ if (ii != n-1){ if(controls->rank_in_row == previous_pcol){ MPI_Send(b, controls->nrow, MPI_DOUBLE, current_pcol, MPBSTAG,controls->row_comm); }else if (controls->rank_in_row == current_pcol){ MPI_Recv(b, controls->nrow, MPI_DOUBLE, previous_pcol, MPBSTAG,controls->row_comm, &status); } } } // dprintf(9,"send/receive end ii=%d\n", ii); if(controls->rank_in_row == current_pcol){ double bwork[nb]; int jend; if (controls->rank_in_col == current_prow){ int jmin = current_lrow - nb+1; #if 0 for(k=0;k<nb;k++){ int jmax; jmax = current_lrow-k; bwork[k]=b[jmax]; double btmp=bwork[k]; for(j=jmin;j<jmax;j++){ b[j] -= btmp*a[j][current_lcol-k]; } } #endif #if 0 for(j=current_lrow-1;j>=current_lrow-nb+1;j--){ int r0 = current_lrow-j+1; int c0 = current_lcol-j+1; for(k=0;k<current_lrow-j;k++){ b[j] -= b[current_lrow-k]*a[j][current_lcol-k]; } } #endif #if 1 for(j=current_lrow-1;j>=current_lrow-nb+1;j--){ int r0 = j+1; int c0 = current_lcol-current_lrow+r0; int kmax = current_lrow-j; double btmp = b[j]; double * bp = b+r0; double * ap = &(a[j][c0]); // double * apn = &(a[j-1][c0]); // for (k=0;k<kmax; k+= 16)__builtin_prefetch(apn+k,0,0); for(k=0;k<kmax;k++){ btmp -= bp[k]*ap[k]; } b[j]=btmp; } #endif for(k=0;k<nb;k++){ int jmax; jmax = current_lrow-k; bwork[k]=b[jmax]; } // for(j=jmin;j<=current_lrow;j++){ // dprintf(9,"ad[%d] = %10.4g %10.4g\n", j,a[j][current_lcol-1], // a[j][current_lcol]); // } jend = jmin; if (parms->nprow > 1) MPI_Bcast(bwork,nb,MPI_DOUBLE,current_prow,controls->col_comm); // dprintf(9,"source bcast end ii=%d\n", ii); }else{ if (parms->nprow > 1) MPI_Bcast(bwork,nb,MPI_DOUBLE,current_prow,controls->col_comm); // dprintf(9,"dest bcast end ii=%d\n", ii); if (controls->rank_in_col < current_prow){ jend = ((current_lrow/nb)+1)*nb; } else{ jend = (current_lrow/nb)*nb; } } double bwork2[jend]; double bwork3[jend]; #if 0 for(j=0;j<jend;j++){ bwork2[j]=0; for(k=0;k<nb;k++){ bwork2[j] += bwork[k]*a[j][current_lcol-k]; } } #endif double bwork4[nb]; for(k=0;k<nb;k++) bwork4[k]=bwork[nb-1-k]; #pragma omp parallel for private(j,k) for(j=0;j<jend;j++){ double btmp=0; double *ap=&(a[j][current_lcol-nb+1]); for(k=0;k<nb;k++){ btmp += bwork4[k]*ap[k]; } bwork2[j]=btmp; } #define DUPOSTMUL #ifndef DUPOSTMUL for(j=0;j<jend;j++) b[j] -= bwork2[j]; #else #pragma omp parallel for private(j,k) schedule(static) for(j=0;j<jend;j++){ bwork3[j]=0; int jmin = (j /nb)*nb; for(k=0;k<nb;k++){ bwork3[j] += bwork2[k+jmin]*dinv[j][k]; } } for(j=0;j<jend;j++) b[j] -= bwork3[j]; #endif // dprintf(9,"calc end ii=%d\n", ii); } previous_pcol=current_pcol; } if (parms->npcol > 1) MPI_Bcast(b,controls->nrow,MPI_DOUBLE, current_pcol, controls->row_comm); for (i=0;i<controls->nrow;i++)a[i][controls->ncol] = b[i]; // dprintf(9,"backward_sub all t end \n"); } void check_solution_mpi(int nnrow, int nncol, double a[nnrow][nncol], double b[], double bcopy[], PCONTROLS controls, PPARMS parms) { int i,j,k; int nrow = controls->nrow; int ncol=controls->ncol; MPI_Allgather(b,nrow,MPI_DOUBLE,bcopy,nrow,MPI_DOUBLE, controls->col_comm); double bcopy2[ncol]; for(i=0;i<ncol;i++){ int iglobal=global_colid(i, parms,controls); int rowpid, rowlid; convert_global_index_to_local_rows(iglobal, &rowpid, &rowlid, parms, controls); int index= nrow*rowpid+rowlid; bcopy2[i]=bcopy[index]; // dprintf(9,"b[%d]=%g %g\n", i, bcopy2[i], bcopy[i]); } for(i=0;i<nrow;i++){ double btmp=0; for(j=0;j<ncol;j++) { btmp+= a[i][j]*bcopy2[j]; // dprintf(9,"a[%d][%d]=%g, bcopy2=%g %g\n", i, j, // a[i][j], bcopy2[j],btmp); } bcopy[i]=btmp; } MPI_Allreduce(bcopy,b,nrow,MPI_DOUBLE, MPI_SUM, controls->row_comm); for (i=0;i<controls->nrow;i++)a[i][controls->ncol] = b[i]; } double print_errors(double b[], double b0[], PCONTROLS controls, PPARMS parms) { int i,j,k; int nrow=controls->nrow; double esum = 0; double emax = 0; for (i=0;i<nrow;i++){ double err = b[i]-b0[i]; esum+= err*err; if (emax < fabs(err)) emax = fabs(err); } dprintf(9," esum = %e\n", esum); double totalerror=0; double totalemax=0; MPI_Reduce(&esum,&totalerror, 1, MPI_DOUBLE, MPI_SUM, 0, controls->col_comm); MPI_Reduce(&emax,&totalemax, 1, MPI_DOUBLE, MPI_MAX, 0, controls->col_comm); if (controls->rank_in_col == 0){ dprintf(0,"Errors = %e %e %e %e\n\n", sqrt(totalerror), totalemax, esum, emax); printf("\n Error = %e %e\n", sqrt(totalerror), totalemax); } return totalemax; } double Mmax(double a, double b) { if (a>b){ return a; }else{ return b; } } void HPLlogprint(FILE * f, int N, int NB, int nprow, int npcol, double elapsed) { fprintf(f, "%s%s\n", "======================================", "======================================" ); fprintf(f,"%s%s\n", "T/V N NB P Q", " Time Gflops" ); fprintf(f, "%s%s\n", "--------------------------------------", "--------------------------------------" ); double Gflops = ( ( (double)(N) / 1.0e+9 ) * ( (double)(N) / elapsed ) ) * ( ( 2.0 / 3.0 ) * (double)(N) + ( 3.0 / 2.0 ) ); fprintf(f,"W%c%1d%c%c%1d%c%1d%12d %5d %5d %5d %18.2f %18.3e\n", 'R',0,'1', 'R', 2, 'C', 32, N, NB, nprow, npcol, elapsed, Gflops ); } void HPLerrorprint(FILE * f, double emax, double a1, double ao, double b1, double bo, double n) { double epsil =2e-16; double thrsh = 16; double resid1 = emax / (epsil * a1 * n ); double resid2 = emax / (epsil * a1 * b1 ); double resid3 = emax / (epsil * ao * bo * n); // fprintf(f,"err= %e Anorms= %e %e Xnorms= %e %e\n",resid0, a1, ao, b1, bo); int kpass =0; int kfail= 0; if( ( Mmax( resid1, resid2 ) < thrsh ) && ( resid3 < thrsh ) ) (kpass)++; else (kfail)++; fprintf(f,"%s%s\n", "--------------------------------------", "--------------------------------------" ); fprintf(f,"%s%16.7f%s%s\n", "||Ax-b||_oo / ( eps * ||A||_1 * N ) = ", resid1, " ...... ", ( resid1 < thrsh ? "PASSED" : "FAILED" ) ); fprintf(f,"%s%16.7f%s%s\n", "||Ax-b||_oo / ( eps * ||A||_1 * ||x||_1 ) = ", resid2, " ...... ", ( resid2 < thrsh ? "PASSED" : "FAILED" ) ); fprintf(f,"%s%16.7f%s%s\n", "||Ax-b||_oo / ( eps * ||A||_oo * ||x||_oo ) = ", resid3, " ...... ", ( resid3 < thrsh ? "PASSED" : "FAILED" ) ); if( ( resid1 >= thrsh ) || ( resid2 >= thrsh ) || ( resid3 >= thrsh ) ) { fprintf(f,"%s%18.6f\n", "||Ax-b||_oo . . . . . . . . . . . . . . . . . = ", emax ); fprintf(f,"%s%18.6f\n", "||A||_oo . . . . . . . . . . . . . . . . . . . = ", ao ); fprintf(f, "%s%18.6f\n", "||A||_1 . . . . . . . . . . . . . . . . . . . = ", a1 ); fprintf(f,"%s%18.6f\n", "||x||_oo . . . . . . . . . . . . . . . . . . . = ", bo ); fprintf(f,"%s%18.6f\n", "||x||_1 . . . . . . . . . . . . . . . . . . . = ", b1 ); } fprintf(f, "%s%s\n", "======================================", "======================================" ); fprintf(f, "\n%s %6d %s\n", "Finished", 1, "tests with the following results:" ); fprintf(f," %6d %s\n", kpass, "tests completed and passed residual checks," ); fprintf(f," %6d %s\n", kfail, "tests completed and failed residual checks," ); fprintf(f," %6d %s\n", 0, "tests skipped because of illegal input values." ); fprintf(f,"%s%s\n", "--------------------------------------", "--------------------------------------" ); fprintf(f,"\nEnd of Tests.\n" ); fprintf(f,"%s%s\n", "======================================", "======================================" ); } void printmat_MP(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms) { int procid =MP_proccount(); int ii, i, j; MPI_Barrier(MPI_COMM_WORLD); for (ii=0;ii<procid; ii++){ // fprintf(stderr,"printmatmp, ids = %d %d\n", ii, MP_myprocid()); if (MP_myprocid() == ii){ fprintf(stderr,"Printmat_MP: Proc %d, loc = %d %d\n",MP_myprocid(), controls->rank_in_row, controls->rank_in_col); for(i=0;i<controls->nrow;i++){ fprintf(stderr,"%3d: ", i); for(j=0;j<controls->ncol+1;j++) fprintf(stderr," %10.3e", a[i][j]); fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } MPI_Barrier(MPI_COMM_WORLD); } } void lu_forward_onestep_mpi(int i,int nnrow, int nncol, double a[nnrow][nncol], double b[], PCONTROLS controls, PPARMS parms) { // dprintf(9,"lu_mpi enter i=%d\n", i); int imax= MP_find_pivot(nnrow, nncol, a, parms, controls, i); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,imax,0,nncol); // printmat_MP(nnrow, nncol, a, controls, parms); // dprintf(9,"lu_mpi after swap_rows i=%d\n", i); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,0,nncol,0); // printmat_MP(nnrow, nncol, a, controls, parms); // dprintf(9,"lu_mpi after scale_rows i=%d\n", i); MP_update_single(nnrow, nncol, a, parms, controls,i); // printmat_MP(nnrow, nncol, a, controls, parms); // dprintf(9,"lu_mpi after update_single i=%d\n", i); } void column_decomposition_mpi(int ifirst,int nb,int nnrow, int nncol, double a[nnrow][nncol], double b[], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; int c1, c2; // dprintf(9,"column_decomposition_mpi ifirst=%d nb=%d\n",ifirst,nb); convert_global_col_range_to_local_range(ifirst, ifirst+nb-1,&c1, &c2, parms, controls); c2 ++; for (ii=0;ii<nb;ii++){ i = ii+ifirst; int imax= MP_find_pivot(nnrow, nncol, a, parms, controls, i); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,imax,c1,c2); pv[ii]=imax; MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,1); int cu1, cu2; convert_global_col_range_to_local_range(i+1, ifirst+nb-1,&cu1, &cu2, parms, controls); cu2 ++; MP_update_single_blocked(nnrow, nncol, a, parms, controls,i,cu1,cu2); } } void print2dmat_MP(int nnrow, int nncol, double a[nnrow][nncol], char* s) { int ii, i, j; fprintf(stderr,"Print2dmat_MP: Proc %d, name=%s \n",MP_myprocid(), s); for(i=0;i<nnrow;i++){ fprintf(stderr,"%3d: ", i); for(j=0;j<nncol;j++) fprintf(stderr," %10.3e", a[i][j]); fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } void column_decomposition_mpi_transposed(int ifirst,int nb,int nnrow, int nncol, double a[nncol][nnrow], double b[], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; int c1, c2; double arow[nncol]; double acol[nnrow]; // dprintf(9,"column_decomposition_mpi ifirst=%d nb=%d\n",ifirst,nb); convert_global_col_range_to_local_range(ifirst, ifirst+nb-1,&c1, &c2, parms, controls); c1 %= nb; c2 %= nb; c2 ++; int havec=have_current_col(ifirst, parms, controls); if (havec){ // print2dmat_MP(nb, nnrow, a, "transposed a"); for (ii=0;ii<nb;ii++){ i = ii+ifirst; // dprintf(9,"coldec trans i=%d\n", i); int imax= MP_find_pivot_transposed(nnrow, nb, a, parms, controls, i); // dprintf(9,"coldec trans i=%d imax=%d find pivot end\n", i,imax); MP_swap_rows_blocked_transposed(nnrow, nb, a, parms, controls,i,imax,c1,c2); // dprintf(9,"coldec trans i=%d swap rows end\n", i); pv[ii]=imax; // print2dmat_MP(nb, nnrow, a, "after swap"); MP_scale_row_blocked_transposed(nnrow, nb, a, parms, controls,i,c1,c2); // print2dmat_MP(nb, nnrow, a, "after scale"); // dprintf(9,"coldec trans i=%d scale rows end\n", i); int cu1, cu2; convert_global_col_range_to_local_range(i+1, ifirst+nb-1,&cu1, &cu2, parms, controls); cu1 %= nb; cu2 %= nb; cu2 ++; if(ii<nb-1){ // dprintf(9,"coldec trans i=%d call update\n", i); MP_update_single_blocked_transposed(nnrow, nb, a, parms, controls, i,cu1,cu2,arow,acol); } // dprintf(9,"coldec trans i=%d update end\n", i); } } } void transpose_rowtocol8(int nnrow, int nncol, double a[nnrow][nncol], double at[][nnrow], int istart) { int i,j,k; const int m=8; int mend; #pragma omp parallel for private(i,j,k) schedule(static) for(i=istart;i<nnrow;i+=m){ double atmp[m][m]; // BEGIN_TSC; for(k=0;k<m;k++){ v2df * ak = (v2df*) a[i+k]; v2df * akk = (v2df*) atmp[k]; asm("prefetchnta %0"::"m"(a[i+k+m*2][0]):"memory"); // asm("prefetchnta %0"::"m"(a[i+k+m*2][8]):"memory"); // __builtin_prefetch(a[i+k+m*2],0,0); // __builtin_prefetch(a[i+k+m*2]+8,0,0); akk[0] =ak[0]; akk[1] =ak[1]; akk[2] =ak[2]; akk[3] =ak[3]; } // END_TSC(t,17); // { // BEGIN_TSC; for(j=0;j<m;j++){ v2df * atk= (v2df*)(at[j]+i); atk[0]=(v2df){atmp[0][j],atmp[1][j]}; atk[1]=(v2df){atmp[2][j],atmp[3][j]}; atk[2]=(v2df){atmp[4][j],atmp[5][j]}; atk[3]=(v2df){atmp[6][j],atmp[7][j]}; } // END_TSC(t2,18); // } int istart) } } void transpose_rowtocol8_0(int nnrow, int nncol, double a[nnrow][nncol], double at[][nnrow], int istart) { int i,j,k; const int m=8; double atmp[m][m]; for(i=istart;i<nnrow;i+=m){ for(k=0;k<m;k++){ double *ak = a[i+k]; atmp[0][k] =ak[0]; atmp[1][k] =ak[1]; atmp[2][k] =ak[2]; atmp[3][k] =ak[3]; atmp[4][k] =ak[4]; atmp[5][k] =ak[5]; atmp[6][k] =ak[6]; atmp[7][k] =ak[7]; } for(j=0;j<m;j++){ v2df * atp = (v2df*) atmp[j]; v2df * ap = (v2df*) (at[j]+i); *(ap)=*(atp); *(ap+1)=*(atp+1); *(ap+2)=*(atp+2); *(ap+3)=*(atp+3); } } } void MP_transpose_rowtocol(int nnrow, int nncol, double a[nnrow][nncol],int m, double at[][nnrow], int istart) { if (m == 8){ transpose_rowtocol8(nnrow, nncol, a, at, istart); return; } int i,j,k; double atmp[m][m]; for(i=istart;i<nnrow;i+=m){ for(k=0;k<m;k++){ for(j=0;j<m;j++){ atmp[j][k] =a[i+k][j]; } } for(j=0;j<m;j++){ for(k=0;k<m;k++){ at[j][i+k]=atmp[j][k]; } } } } void transpose_coltorow8(int nnrow,int nncol, double a[nnrow][nncol], double at[][nnrow], int istart) { int i,j,k; const int m=8; double atmp[m][m]; #pragma omp parallel for private(i,j,k,atmp) schedule(static) for(i=istart;i<nnrow;i+=m){ for(j=0;j<m;j++){ double * atj = at[j]+i; // __builtin_prefetch(at[j]+i+m+m,0,0); // inserting prefetch here causes speed down... atmp[0][j] =atj[0]; atmp[1][j] =atj[1]; atmp[2][j] =atj[2]; atmp[3][j] =atj[3]; atmp[4][j] =atj[4]; atmp[5][j] =atj[5]; atmp[6][j] =atj[6]; atmp[7][j] =atj[7]; } for(k=0;k<m;k++){ v2df * atp = (v2df*) atmp[k]; v2df * ap = (v2df*) a[i+k]; *(ap)=*(atp); *(ap+1)=*(atp+1); *(ap+2)=*(atp+2); *(ap+3)=*(atp+3); } } } void MP_transpose_coltorow(int nnrow,int nncol, double a[nnrow][nncol],int m, double at[][nnrow], int istart) { if (m == 8){ transpose_coltorow8(nnrow, nncol, a, at, istart); return; } int i,j,k; double atmp[m][m]; for(i=istart;i<nnrow;i+=m){ for(j=0;j<m;j++){ double * atj = at[j]+i; for(k=0;k<m;k+=2){ atmp[k][j] =atj[k]; atmp[k+1][j] =atj[k+1]; // atmp[k+2][j] =atj[k+2]; // atmp[k+3][j] =atj[k+3]; } } for(k=0;k<m;k++){ double * aik = a[i+k]; for(j=0;j<m;j+=2){ aik[j] = atmp[k][j]; aik[j+1] = atmp[k][j+1]; // aik[j+2] = atmp[k][j+2]; // aik[j+3] = atmp[k][j+3]; } } } } void column_decomposition_mpi_with_transpose(int ifirst,int nb, int nnrow, int nncol, double a[nnrow][nncol], double b[], int pv[], PCONTROLS controls, PPARMS parms) { double awork[nb][nnrow]; int c1, c2; // dprintf(9,"column_decomposition_mpi_wt ifirst=%d nb=%d\n",ifirst,nb); convert_global_col_range_to_local_range(ifirst, ifirst+nb-1,&c1, &c2, parms, controls); c2 ++; int localfirst=first_row(ifirst, parms, controls); // dprintf(9,"column_decomposition_mpi_wt c1=%d c2=%d lfirst=%d\n", // c1,c2,localfirst); BEGIN_TIMER(timer0); MP_transpose_rowtocol(nnrow,nncol, (double(*)[]) (&a[0][c1]), nb, awork,localfirst); END_TIMER(timer0,10,((double)nb)*(nnrow-localfirst)); BEGIN_TIMER(timer1); column_decomposition_mpi_transposed(ifirst,nb,nnrow,nncol, awork, b, pv,controls,parms); END_TIMER(timer1,11,((double)nb)*nb*(nnrow-localfirst)); BEGIN_TIMER(timer2); MP_transpose_coltorow(nnrow,nncol, (double(*)[]) (&a[0][c1]), nb, awork,localfirst); END_TIMER(timer2,12,((double)nb)*(nnrow-localfirst)); } void process_right_part_mpi(int ifirst,int nb,int ncols, int nnrow, int nncol, double a[nnrow][nncol], double b[], int pv[], PCONTROLS controls, PPARMS parms, int singlecol) { int i,ii; int c1, c2; // dprintf(9,"process_rp ifirst=%d nb=%d\n", ifirst,nb); double acolinv[nb][nb]; MP_process_diagonal(nnrow, nncol, a, parms, controls, ifirst,nb,acolinv,singlecol); convert_global_col_range_to_local_range(ifirst+nb, ifirst+nb+ncols-1, &c1, &c2, parms, controls); c2++; if (ncols <= 0) c2 = nncol; for (ii=0;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,singlecol); } // dprintf(9,"process_rp, ifirst=%d\n", ifirst); // printmat_MP(nnrow, nncol, a, controls, parms); // MP_update_multiple_blocked_local(nnrow, nncol, a, parms, controls, // ifirst,c1,c2,nb); MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); MP_update_multiple_blocked_global(nnrow, nncol, a, parms, controls, ifirst,nb,c1,c2,ifirst+nb,1); } void process_right_part_mpi_withacol(int ifirst,int nb,int ncols, int nnrow, int nncol, double a[nnrow][nncol], double acol[nnrow][nb], double b[], int pv[], PCONTROLS controls, PPARMS parms, int singlecol) { int i,ii; int c1, c2; // dprintf(9,"process_rp ifirst=%d nb=%d\n", ifirst,nb); double acolinv[nb][nb]; BEGIN_TIMER(timer2); MP_process_diagonal(nnrow, nncol, a, parms, controls, ifirst,nb,acolinv,singlecol); END_TIMER(timer2,24,((double)nb)*nb*nb/6.0); BEGIN_TIMER(timer3); convert_global_col_range_to_local_range(ifirst+nb, ifirst+nb+ncols-1, &c1, &c2, parms, controls); c2++; if (ncols <= 0) c2 = nncol; for (ii=0;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,singlecol); } // dprintf(9,"process_rp, ifirst=%d\n", ifirst); // printmat_MP(nnrow, nncol, a, controls, parms); // MP_update_multiple_blocked_local(nnrow, nncol, a, parms, controls, // ifirst,c1,c2,nb); END_TIMER(timer3,36,((double)nb)*(c2-c1)*2); BEGIN_TIMER(timer0); MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); END_TIMER(timer0,24,((double)nb)*nb*(c2-c1)*2); BEGIN_TIMER(timer1); MP_update_multiple_blocked_global_withacol(nnrow, nncol, a, parms, controls, ifirst,nb,acol,c1,c2,ifirst+nb,1); END_TIMER(timer1,25,((double)nb)*nb*(c2-c1)*2); } void process_right_part_mpi_using_dl_old(int ifirst,int nb,int ncols, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; int c1, c2; // dprintf(9,"process_rp ifirst=%d nb=%d\n", ifirst,nb); convert_global_col_range_to_local_range(ifirst+nb, ifirst+nb+ncols-1, &c1, &c2, parms, controls); c2++; if (ncols <= 0) c2 = nncol; for (ii=0;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,0); } // dprintf(9,"process_rp, ifirst=%d\n", ifirst); // printmat_MP(nnrow, nncol, a, controls, parms); // MP_update_multiple_blocked_local(nnrow, nncol, a, parms, controls, // ifirst,c1,c2,nb); MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); MP_update_multiple_blocked_global_using_lmat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,acol); // MP_update_multiple_blocked_global(nnrow, nncol, a, parms, controls, // ifirst,nb,c1,c2,ifirst+nb,0); } void process_right_part_mpi_using_dl(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; // dprintf(9,"process_rp_using_dl, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); for (ii=0;ii<nb;ii++){ i = ii+ifirst; // dprintf(9,"process_rp, i=%d\n", i); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); // dprintf(9,"process_rp swap rows end, i=%d\n", i); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,0); } // dprintf(9,"process_rp, ifirst=%d\n", ifirst); // printmat_MP(nnrow, nncol, a, controls, parms); // dprintf(9,"process_rp, enter DTRSM part c1, c2=%d %d\n",c1, c2); // MP_update_multiple_blocked_local(nnrow, nncol, a, parms, controls, // ifirst,c1,c2,nb); MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); // dprintf(9,"process_rp, enter update part c1, c2=%d %d\n",c1, c2); MP_update_multiple_blocked_global_using_lmat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,acol); // dprintf(9,"process_rp, update part end\n"); // MP_update_multiple_blocked_global(nnrow, nncol, a, parms, controls, // ifirst,nb,c1,c2,ifirst+nb,0); // dprintf(9,"process_rp_using_dl end, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); } void process_right_part_mpi_using_dls(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], int pv[], double scale[], PCONTROLS controls, PPARMS parms) { int i,ii; // dprintf(9,"process_rp_using_dls, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); for (ii=0;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked_using_scale(nnrow, nncol, a, parms, controls,i,c1,c2,0,scale[ii]); } MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); MP_update_multiple_blocked_global_using_lmat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,acol); } void dump_vector(char * s, int * x, int n) { return; fprintf(stderr,"%s:", s); int i; for(i=0;i<n;i++) fprintf(stderr," %2d", x[i]); fprintf(stderr,"\n"); } int generate_src_and_dst_lists(int ifirst, int pv[], int src[], int dst[], int * length, PCONTROLS controls, PPARMS parms) // creates src + dest list from pivot list { int n =parms->n; int nb=parms->nb; int work[n]; int i; for(i=ifirst; i<n;i++)work[i]=i; for(i=ifirst; i<ifirst+nb;i++){ int p = pv[i-ifirst]; int tmp = work[i]; work[i]=work[p]; work[p]=tmp; } // dump_vector("work", work, n-ifirst); int ii=0; for(i=ifirst; i<n;i++){ if ((work[i] != i) || (i<ifirst+nb)){ dst[ii]=i; src[ii]=work[i]; ii++; } } *length=ii; return ii; } void copysubvect(double *src, double * dst, int length) // assume that the length is multiple of 8 and // first location is 16-byte-alligned { int j; v2df * s = (v2df*) src; v2df * d = (v2df*) dst; for(j=0;j<length/2;j+=4){ d[j]=s[j]; d[j+1]=s[j+1]; d[j+2]=s[j+2]; d[j+3]=s[j+3]; } } void scalesubvect(double scale, double *src, double * dst, int length) // assume that the length is multiple of 8 and // first location is 16-byte-alligned { int j; for(j=0;j<length;j++){ // fprintf(stderr,"j=%d src=%e dest=%e\n", j, src[j], dst[j]); dst[j]=src[j]*scale; } } void scalesubvect_old(double scale, double *src, double * dst, int length) // assume that the length is multiple of 8 and // first location is 16-byte-alligned { int j; v2df * s = (v2df*) src; v2df * d = (v2df*) dst; v2df ss = {scale,scale}; for(j=0;j<length/2;j+=4){ d[j]=ss*s[j]; d[j+1]=ss*s[j+1]; d[j+2]=ss*s[j+2]; d[j+3]=ss*s[j+3]; } } void local_swap_using_src_and_dest(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double arow[nb][c2-c1], double scale[], int src[], int dst[], int length, PCONTROLS controls, PPARMS parms) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; int i; int j; convert_global_index_to_local_rows(ifirst, &current_proc, &current_lloc, parms, controls); if(current_proc == controls->rank_in_col){ printf("current_proc=%d, rank=%d\n",current_proc, controls->rank_in_col); copysubmat(nncol, (double(*)[])(a[current_lloc]+c1), c2-c1, arow, nb, c2-c1); } // the following is only for p= (q=?) 1 #pragma omp parallel for private(i) for(i=0;i<length;i++){ if(dst[i]<ifirst+nb){ double * s = a[src[i]]+c1; double sc = scale[dst[i]-ifirst]; if (src[i]<ifirst+nb)s=arow[src[i]-ifirst]; scalesubvect(sc,s,&(a[dst[i]][c1]),c2-c1); } } #pragma omp parallel for private(i) for(i=0;i<length;i++){ if(dst[i]>=ifirst+nb){ copysubvect(arow[src[i]-ifirst],&(a[dst[i]][c1]),c2-c1); } } } void global_swap_using_src_and_dest(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double arow[][c2-c1], double ar2[][c2-c1], double scale[], int src[], int dst[], int length, PCONTROLS controls, PPARMS parms) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; int i; int j; int myp = controls->rank_in_col; // fprintf(stderr,"global_swap ifirst=%d nb=%d c1=%d c2=%d\n",ifirst,nb,c1,c2); convert_global_index_to_local_rows(ifirst, &current_proc, &current_lloc, parms, controls); // Bcast scale in vertical direction // need not be done each time... MPI_Bcast(scale, nb, MPI_DOUBLE, current_proc, controls->col_comm); // fprintf(stderr,"global_swap first Bcast end\n"); // // create the data for bcast // // first, create list of lines to send on each processors in a column // // dprintf(0,"myp, ncol, nrow, npcol, nprow = %d %d %d %d %d\n", // myp, nncol, nnrow, parms->npcol, parms->nprow); int nlines[parms->nprow]; int iloc[parms->nprow]; int startloc[parms->nprow]; int bdst[length]; dump_vector("src", src, length); dump_vector("dst", dst, length); for(i=0;i<parms->nprow;i++)nlines[i]=0; for(i=0;i<length;i++){ if(dst[i]<ifirst+nb){ int cp, cl; convert_global_index_to_local_rows(src[i], &cp, &cl, parms, controls); nlines[cp]++; } } startloc[0]=0; for(i=0;i<parms->nprow-1;i++)startloc[i+1] = startloc[i]+nlines[i]; // dump_vector("nlines", nlines, parms->nprow); // dump_vector("startloc", startloc, parms->nprow); // // now each processor knows where to store its data // int ii =0; for(i=0;i<parms->nprow; i++)iloc[i]=0; for(i=0;i<length;i++){ // fprintf(stderr,"convert_index i=%d length=%d\n", i, length); if(dst[i]<ifirst+nb){ int cp, cl; convert_global_index_to_local_rows(src[i], &cp, &cl, parms, controls); // fprintf(stderr, "i=%d,ii=%d, dst=%d, src=%d, cp=%d, cl=%d\n", // i, ii, src[i], dst[i], cp, cl); bdst[startloc[cp]+iloc[cp]]=dst[i]-ifirst; iloc[cp]++; if (cp == myp){ // I have this data double * s = a[cl]+c1; double sc = scale[dst[i]-ifirst]; // fprintf(stderr, "places to copy: %d %d scale=%e c1=%d,c2=%d\n", // startloc[myp]+ii, dst[i]-ifirst, sc, c1,c2); // fprintf(stderr,"s[0]=%e\n", s[0]); // fprintf(stderr,"dst[0]=%e\n", *ar2[startloc[myp]+ii]); scalesubvect(sc,s,ar2[startloc[myp]+ii],c2-c1); // fprintf(stderr, "end scalesubvect\n"); ii++; } } } // fprintf(stderr,"loop end\n"); // dump_vector("bdst", bdst, nb); // // now ar2 have (local share of) scaled umat // So do the Bcast dumpsubmat("ar2 before bcast", c2-c1, ar2, nb, c2-c1, parms, controls); for(i=0;i<parms->nprow;i++){ // fprintf(stderr,"bcast proc %d lines %d\n", i, nlines[i]); if (nlines[i]){ MPI_Bcast(ar2[startloc[i]],(c2-c1)*nlines[i],MPI_DOUBLE, i, controls->col_comm); } } dumpsubmat("ar2 after bcast", c2-c1, ar2, nb, c2-c1, parms, controls); // dprintf(0,"global_swap, bcast part end\n"); int nlinesd[parms->nprow]; int startlocd[parms->nprow]; int sdst[length]; for(i=0;i<parms->nprow;i++)nlinesd[i]=0; for(i=0;i<parms->nprow;i++)iloc[i]=0; ii=0; for(i=0;i<length;i++){ if(dst[i]>=ifirst+nb){ int cpd, cld; convert_global_index_to_local_rows(dst[i], &cpd, &cld, parms, controls); if(cpd == myp){ sdst[ii]= cld; ii++; } nlinesd[cpd]++; } } // dump_vector("nlinesd", nlinesd, parms->nprow); // dump_vector("sdst", sdst, nlinesd[myp]); startlocd[0]=0; for(i=0;i<parms->nprow-1;i++)startlocd[i+1] = startlocd[i]+nlinesd[i]; // // now each processor knows where to store its data // if(current_proc == myp){ // // this is swap data. All data comes from current_proc for(i=0;i<length;i++){ if(dst[i]>=ifirst+nb){ int cpd, cld; int cps, cls; convert_global_index_to_local_rows(dst[i], &cpd, &cld, parms, controls); convert_global_index_to_local_rows(src[i], &cps, &cls, parms, controls); copysubvect(&(a[cls][c1]),arow[startlocd[cpd]+iloc[cpd]],c2-c1); iloc[cpd]++; } } // // now arow (in current_proc) has all data to swap out // So send them out for(i=0;i<parms->nprow;i++){ if ((i != current_proc) && (nlinesd[i])){ MPI_Send(arow[startlocd[i]], nlinesd[i]*(c2-c1), MPI_DOUBLE, i, MPSENDBASETAG+i,controls->col_comm); } } }else{ // // other processors: receive data // MPI_Status status; if (nlinesd[myp]){ MPI_Recv(arow[startlocd[myp]], nlinesd[myp]*(c2-c1), MPI_DOUBLE, current_proc, MPSENDBASETAG+myp,controls->col_comm, &status); } } // // now all data to be stored are in arow and ar2 // first store arow (swapdata) to real locations // dprintf(0,"p2p swap end\n"); for(i=0;i<nlinesd[myp]; i++){ copysubvect(arow[startlocd[myp]+i],&(a[sdst[i]][c1]), c2-c1); } // dprintf(0,"p2p local swap end\n"); // then store arow2 (UMAT) to arow for(i=0;i<nb; i++){ // dprintf(0,"bdst[%d]=%d\n", i, bdst[i]); copysubvect(ar2[i],arow[bdst[i]], c2-c1); } // dprintf(0,"umat local copy end\n"); // and copy arow back to a in the case of current_proc if(current_proc == myp){ copysubmat(c2-c1, arow, nncol, (double(*)[])(a[current_lloc]+c1), nb, c2-c1); } // dprintf(0,"umat local copyback on current row end\n"); } void process_right_part_mpi_using_dls_phase1(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], double arow[nb][c2-c1], int pv[], int src[], int dst[], int length, double scale[], PCONTROLS controls, PPARMS parms) { dump_vector("pv", pv, nb); // fprintf(stderr,"Enter dls_phase1\n"); if (parms->vcommscheme == 0){ // fprintf(stderr,"Enter global_swap_using...\n"); global_swap_using_src_and_dest(ifirst, nb, c1, c2, nnrow, nncol, a, arow, (double(*)[])(arow[nb]), scale, src, dst, length, controls,parms); return; } int i,ii, j; // arow can be used as work area before bcast_umat // // dprintf(9,"process_rp_using_dls_p1, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); #if 1 for (ii=0;ii<nb;ii++){ i = ii+ifirst; // fprintf(stderr,"dls_phase 1 ii=%d\n",ii); // dprintf(0,"pv[%2d]=%2d\n", ii, pv[ii]); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked_using_scale(nnrow, nncol, a, parms, controls,i,c1,c2,0,scale[ii]); } #else local_swap_using_src_and_dest(ifirst, nb, c1, c2, nnrow, nncol, a, arow, scale, src, dst, length, controls, parms); #endif // print_current_time("end swap and scale"); MP_bcast_umat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,arow); // print_current_time("end bcast_umat"); } void process_right_part_mpi_using_dls_phase1_old(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], double arow[nb][c2-c1], int pv[], double scale[], PCONTROLS controls, PPARMS parms) { int i,ii; // arow can be used as work area before bcast_umat // // dprintf(9,"process_rp_using_dls_p1, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); for (ii=0;ii<nb;ii++){ i = ii+ifirst; // dprintf(9,"process_rp, i=%d\n", i); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); // dprintf(9,"process_rp swap rows end, i=%d\n", i); MP_scale_row_blocked_using_scale(nnrow, nncol, a, parms, controls,i,c1,c2,0,scale[ii]); } // print_current_time("end swap and scale"); MP_bcast_umat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,arow); // dprintf(9,"end bcast_umat\n"); } void process_right_part_mpi_using_dls_phase2(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], double arow[nb][c2-c1], int pv[], double scale[], PCONTROLS controls, PPARMS parms) { int i,ii; // dprintf(9,"process_rp_using_dls_p2, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); // MP_update_multiple_blocked_global_using_lmat(nnrow, nncol, a, parms, // controls,ifirst,nb,c1,c2, // ifirst+nb,acol); #if 1 // MP_bcast_umat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, // ifirst+nb,acol,arow); gdrsetforceswapab(); MP_update_using_lu(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,acol,arow); gdrresetforceswapab(); #endif } void column_decomposition_recursive_mpi_old(int ifirst,int nb,int nnrow, int nncol, double a[nnrow][nncol], double b[], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; dprintf(9,"column recursive %d %d\n", ifirst, nb); int havec=have_current_col(ifirst, parms, controls); if (!havec) return; if (nb <= 8){ dprintf(9,"column recursive calling transpose %d %d\n", ifirst, nb); column_decomposition_mpi_with_transpose(ifirst, nb, nnrow, nncol, a, b, pv,controls,parms); dprintf(9,"column recursive return from transpose %d %d\n", ifirst, nb); }else{ dprintf(9,"column recursive left part %d %d\n", ifirst, nb/2); column_decomposition_recursive_mpi_old(ifirst, nb/2, nnrow, nncol, a, b, pv,controls,parms); dprintf(9,"column process right part %d %d\n", ifirst, nb/2); process_right_part_mpi(ifirst, nb/2, nb/2, nnrow, nncol, a, b, pv,controls, parms,1); dprintf(9,"column recursive right part %d %d\n", ifirst+nb/2, nb/2); column_decomposition_recursive_mpi_old(ifirst+nb/2, nb/2, nnrow, nncol, a, b, pv+nb/2,controls,parms); // process the swap of rows for the left half int c1, c2; convert_global_col_range_to_local_range(ifirst, ifirst+nb/2, &c1, &c2, parms, controls); dprintf(9,"column recursive left part swap %d %d\n", ifirst, nb/2); for (ii=nb/2;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,1); } dprintf(9,"column recursive left part swap end %d %d\n", ifirst, nb/2); } } void column_decomposition_recursive_mpi(int ifirst,int nb,int nnrow, int nncol, double a[nnrow][nncol], double (*acol)[], double b[], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; int havec=have_current_col(ifirst, parms, controls); if (!havec) return; // print_current_time("enter column_deconp"); // dprintf(9,"enter column_decomp, i, nb=%d %d\n", ifirst,nb); if (nb <= 8){ BEGIN_TIMER(timer); column_decomposition_mpi_with_transpose(ifirst, nb, nnrow, nncol, a, b, pv,controls,parms); END_TIMER(timer,7,((double)(nb))*(nnrow-ifirst)); }else{ column_decomposition_recursive_mpi(ifirst, nb/2, nnrow, nncol, a, acol, b, pv,controls,parms); BEGIN_TIMER(timer0); process_right_part_mpi_withacol(ifirst, nb/2, nb/2, nnrow, nncol, a, acol, b, pv,controls, parms,1); END_TIMER(timer0,8,((double)(nb/2))*(nb/2)*(nnrow-ifirst)); column_decomposition_recursive_mpi(ifirst+nb/2, nb/2, nnrow, nncol, a, acol, b, pv+nb/2,controls,parms); // process the swap of rows for the left half int c1, c2; convert_global_col_range_to_local_range(ifirst, ifirst+nb/2, &c1, &c2, parms, controls); BEGIN_TIMER(timer1); for (ii=nb/2;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,1); } END_TIMER(timer1,9,((double)(nb/2))*(nb/2)); } // print_current_time("end column_deconp"); // dprintf(9,"end column_decomp, i, nb=%d %d\n", ifirst,nb); } void lu_mpi(int nnrow, int nncol, double a[nnrow][nncol], double b[], PCONTROLS controls, PPARMS parms) { int i, j, k, n; n = controls->nrow*parms->nprow; for(i=0;i<n;i++){ lu_forward_onestep_mpi(i, nnrow, nncol, a, b, controls, parms); } backward_sub_mpi(nnrow, nncol, a, b,controls, parms); // printmat_MP(nnrow, nncol, a, controls, parms); } void lu_mpi_blocked(int nnrow, int nncol, double a[nnrow][nncol], double b[], PCONTROLS controls, PPARMS parms) { int i, j, k, n, nb; n = controls->nrow*parms->nprow; nb = parms->nb; int pv[nb]; double acolinv[nb][nb]; double acol[nnrow][nb]; for(i=0;i<n;i+=nb){ // dprintf(9,"Enter lu_mpi_blocked, i = %d\n",i); column_decomposition_recursive_mpi_old(i, nb, nnrow, nncol, a, b, pv,controls, parms); MP_process_diagonal(nnrow, nncol, a, parms, controls, i,nb,acolinv,0); MP_process_lmat(nnrow, nncol, a, parms, controls,i,nb,i+nb,acol); int c1, c2; convert_global_col_range_to_local_range(i+nb, i+nb-1,&c1, &c2, parms, controls); process_right_part_mpi_using_dl(i, nb, c1, nncol, nnrow, nncol, a, acolinv, acol, pv,controls, parms); // process_right_part_mpi_using_dl_old(i, nb, 0, nnrow, nncol, a, acolinv, // acol, pv,controls, parms); } backward_sub_mpi(nnrow, nncol, a, b,controls, parms); // printmat_MP(nnrow, nncol, a, controls, parms); } int local_check_and_continue_rcomm_transfer(); void process_right_part_mpi_using_dls_concurrent(int i,int nb,int cfirst, int nnrow, int nncol, double a[nnrow][nncol], double aip[nb][nb], double acp[nnrow][nb], double * arow, double * arow2, int * pvp, double * scalep, PCONTROLS controls, PPARMS parms) { int cs,ce,cenext; double (*arp)[]; double (*arp2)[]; arp= (double(*)[]) arow; arp2= (double(*)[]) arow2; int ninc; // ninc= nb*4; // if (ninc > 4096) ninc = nb*2; ninc= nb; // should not make bigger than 8 (arow memory limit might be exceeded) // ninc= nb/2; #ifndef CONCURRENT_UCOMM // non-overlapping comm // ninc=nncol; #endif BEGIN_TIMER(timerx); int n=parms->n; int src[n]; int dst[n]; int length; dumpsubmat("a before phase1", nncol, a, controls->nrow, controls->ncol, parms, controls); generate_src_and_dst_lists(i, pvp, src, dst, &length,controls, parms); for(cs=cfirst; cs<nncol;cs+= ninc){ ce = cs + ninc; if (ce >= controls->ncol)ce = nncol; if (ce >nncol) ce = nncol; if (ce != nncol){ cenext = ce+ninc; if (cenext >= controls->ncol)cenext = nncol; } if (cs == cfirst){ // dprintf(9,"using_dls call first phase1\n"); BEGIN_TIMER(timer); print_current_time("enter first dls_phase_1"); process_right_part_mpi_using_dls_phase1(i, nb, cs, ce, nnrow, nncol, a, aip, acp,arp, pvp,src, dst, length, scalep, controls, parms); print_current_time("end first dls_phase_1"); dumpsubmat("a after phase1", nncol, a, controls->nrow, controls->ncol, parms, controls); dumpsubmat("arow", ce-cs, arp, nb, ce-cs, parms, controls); END_TIMER(timer,3,((double)(ce-cs))*nb); // print_current_time("end first dls_phase_1"); // dprintf(9,"using_dls end first phase1\n"); } #ifndef DO_RCOMM_LAST if (controls->check_rcomm_transfer){ // dprintf(9,"using_dls call local_check_and_continue_rcomm\n"); int rcomm_state = local_check_and_continue_rcomm_transfer(); // dprintf(9,"using_dls, cs=%d, rcomm=%d\n", cs, rcomm_state); } #endif #ifdef CONCURRENT_UCOMM omp_set_nested(1); #pragma omp parallel #pragma omp sections #endif { #ifdef CONCURRENT_UCOMM #pragma omp section #endif { print_current_time("enter dls_phase_2"); BEGIN_TIMER(timer); process_right_part_mpi_using_dls_phase2(i, nb, cs, ce, nnrow, nncol, a, aip, acp,arp, pvp,scalep, controls, parms); gdrsetskipsendjmat(); double x=parms->n - i; END_TIMER(timer,2,x*nb*(ce-cs)); print_current_time("end dls_phase_2"); } #ifdef CONCURRENT_UCOMM #pragma omp section #endif if (ce < nncol){ #ifdef CONCURRENT_UCOMM usleep(10000); #endif print_current_time("enter dls_phase_1"); BEGIN_TIMER(timer); process_right_part_mpi_using_dls_phase1(i, nb, ce, cenext, nnrow, nncol, a, aip, acp,arp2, pvp, src, dst, length, scalep, controls, parms); END_TIMER(timer,3,((double)(ce-cs)*nb)); print_current_time("end dls_phase_1"); } } swapvoidptr((void**)&arp2, (void**)&arp); if (ce == nncol) cs = nncol; } gdrresetskipsendjmat(); double x=parms->n - i; END_TIMER(timerx,1,x*(x-nb)*nb*2); } void MP_process_row_comm_using_Bcast(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms, int i, int nb, int n, int * pvp2, double * scalep2, double (*aip2)[], double (*acp2)[]) { // print_current_time("enter process_row_comm"); if (i+nb < n){ int current_prow; int current_lrow; int ii = i + nb; convert_global_index_to_local_rows(ii, &current_prow, &current_lrow, parms, controls); int pcol = pcolid(ii,parms,controls); if (current_prow == controls->rank_in_col){ MPI_Bcast(aip2,sizeof(double)*nb*nb,MPI_BYTE, pcol, controls->row_comm); } int nrows=MP_prepare_lmat(nnrow, nncol, a, parms, controls, ii,nb,ii+nb,acp2); MPI_Bcast(pvp2,sizeof(int)*nb,MPI_BYTE, pcol, controls->row_comm); MPI_Bcast(scalep2,sizeof(double)*nb,MPI_BYTE, pcol, controls->row_comm); MPI_Bcast(acp2,sizeof(double)*nrows*nb,MPI_BYTE, pcol, controls->row_comm); } print_current_time("end process_row_comm"); } void MP_process_row_comm(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms, int i, int nb, int n, int * pvp2, double * scalep2, double (*aip2)[], double (*acp2)[]) { print_current_time("enter process_row_comm"); if (i+nb < n){ MPI_Status status; int current_prow; int current_lrow; int ii = i + nb; BEGIN_TIMER(timer); convert_global_index_to_local_rows(ii, &current_prow, &current_lrow, parms, controls); int nextp= (controls->rank_in_row+1)%parms->npcol; int prevp= (controls->rank_in_row-1+parms->npcol)%parms->npcol; int pcol = pcolid(ii,parms,controls); if (current_prow == controls->rank_in_col){ #if 0 if (controls->rank_in_row != pcol){ MPI_Recv(aip2, sizeof(double)*nb*nb,MPI_BYTE, prevp,MPRCOMMTAG, controls->row_comm, &status); } if (nextp != pcol){ MPI_Send(aip2, sizeof(double)*nb*nb,MPI_BYTE, nextp,MPRCOMMTAG, controls->row_comm); } #endif // dprintf(9,"call bcast aip2\n"); MPI_Bcast(aip2,sizeof(double)*nb*nb,MPI_BYTE, pcol, controls->row_comm); } // dprintf(9,"call prepare lmat\n"); int nrows=MP_prepare_lmat(nnrow, nncol, a, parms, controls, ii,nb,ii+nb,acp2); // dprintf(9,"call bcast pvp2\n"); MPI_Bcast(pvp2,sizeof(int)*nb,MPI_BYTE, pcol, controls->row_comm); // dprintf(9,"call bcast scalp2\n"); MPI_Bcast(scalep2,sizeof(double)*nb,MPI_BYTE, pcol, controls->row_comm); // dprintf(9,"call bcast acp2\n"); // MPI_Bcast(acp2,sizeof(double)*nrows*nb,MPI_BYTE, // pcol, controls->row_comm); MP_mybcast(acp2,sizeof(double)*nrows*nb, pcol, controls->row_comm); END_TIMER(timer,0,(double)((n-i-nb)*nb)); } print_current_time("end process_row_comm"); } static RCOMMT rcomm; #define MAXPENDINGMESSAGE 100 void register_singlemessage_to_rcomm(PRCOMMT rcomm, void * p, int length) { int i = rcomm->nmessages; (rcomm->message+i)->mptr=p; (rcomm->message+i)->length=length; (rcomm->message+i)->message_state = INITIALIZED; if (rcomm->first){ (rcomm->message+i)->message_state = RECEIVED; if (rcomm->last){ (rcomm->message+i)->message_state = SENT; } } rcomm->nmessages++; } int check_and_continue_rcomm_transfer(PRCOMMT rcomm, int blockmode) { int i; int nend = 0; MPI_Status status; int received_count = 0; int sent_count = 0; for(i=0;i<rcomm->nmessages; i++){ PMESSAGE mp = rcomm->message+i; if ((mp->message_state == INITIALIZED) && (i<MAXPENDINGMESSAGE+received_count)){ // fprintf(stderr,"new Irecv at %d\n",i); int retval=MPI_Irecv(mp->mptr, mp->length, MPI_BYTE,rcomm->prevp, MPNRMESSAGETAG+i,rcomm->comm, &(mp->request)); if(retval != MPI_SUCCESS)MP_error("MPI_Irecv error in start_rcomm"); mp->message_state = RECEIVING; } if (mp->message_state == RECEIVING){ int flag; if (blockmode){ MPI_Wait(&(mp->request), &status); flag = 1; }else{ MPI_Test(&(mp->request),&flag, &status); } if (flag)mp->message_state = RECEIVED; } if (mp->message_state == RECEIVED){ if(received_count < i)received_count = i; if (rcomm->last){ mp->message_state = SENT; }else{ if (i < sent_count + MAXPENDINGMESSAGE){ // fprintf(stderr,"new Isend at %d\n",i); int retval=MPI_Isend(mp->mptr, mp->length, MPI_BYTE,rcomm->nextp, MPNRMESSAGETAG+i,rcomm->comm, &(mp->request)); if(retval != MPI_SUCCESS)MP_error("MPI_Isend error in check_rcomm"); mp->message_state = SENDING; } } } if (mp->message_state == SENDING){ int flag; if (blockmode){ MPI_Wait(&(mp->request), &status); flag = 1; }else{ MPI_Test(&(mp->request),&flag, &status); } if (flag)mp->message_state = SENT; } if (mp->message_state == SENDING) nend ++; if (mp->message_state == SENT){ if (sent_count < i)sent_count = i; } } return rcomm->nmessages - nend; } int local_check_and_continue_rcomm_transfer() { return check_and_continue_rcomm_transfer(&rcomm, 0); } void start_rcomm_transfer(PRCOMMT rcomm) { int i; // dprintf(9,"Enter start_rcomm\n"); for(i=0;(i<rcomm->nmessages) &&(i<MAXPENDINGMESSAGE); i++){ PMESSAGE mp = rcomm->message+i; if (mp->message_state == INITIALIZED){ // dprintf(9,"Irecv register i=%d length=%d\n",i, mp->length); int retval=MPI_Irecv(mp->mptr, mp->length, MPI_BYTE,rcomm->prevp, MPNRMESSAGETAG+i,rcomm->comm, &(mp->request)); if(retval != MPI_SUCCESS)MP_error("MPI_Irecv error in start_rcomm"); mp->message_state = RECEIVING; } // dprintf(9,"start_rcom, i=%d status=%d\n", i, mp->message_state); } check_and_continue_rcomm_transfer(rcomm,0); } void register_to_rcomm(PRCOMMT rcomm, void * p, int length) { int i; for (i=0;i<length; i+= MAXRCOMMMESSAGE){ int len = MAXRCOMMMESSAGE; if (i+len > length) len = length-i; register_singlemessage_to_rcomm(rcomm, ((char*)p)+i, len); } } int MP_process_row_comm_init(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms, int i, int nb, int n, int * pvp2, double * scalep2, double (*aip2)[], double (*acp2)[]) { print_current_time("enter process_row_comm_init"); controls->check_rcomm_transfer = 1; int nrows=0; if (i+nb < n){ int current_prow; int current_lrow; int ii = i + nb; BEGIN_TIMER(timer); convert_global_index_to_local_rows(ii, &current_prow, &current_lrow, parms, controls); rcomm.nextp= (controls->rank_in_row+1)%parms->npcol; rcomm.prevp= (controls->rank_in_row-1+parms->npcol)%parms->npcol; rcomm.nmessages=0; rcomm.first = (controls->rank_in_row == pcolid(ii,parms,controls)); rcomm.last = (rcomm.nextp == pcolid(ii,parms,controls)); rcomm.comm = controls->row_comm; rcomm.preceive=0; rcomm.psend=0; if (current_prow == controls->rank_in_col){ register_to_rcomm(&rcomm, aip2, nb*nb*sizeof(double)); } nrows=MP_prepare_lmat(nnrow, nncol, a, parms, controls, ii,nb,ii+nb,acp2); register_to_rcomm(&rcomm, pvp2, sizeof(int)*nb); register_to_rcomm(&rcomm, scalep2,sizeof(double)*nb); register_to_rcomm(&rcomm, acp2,sizeof(double)*nrows*nb); start_rcomm_transfer(&rcomm); } print_current_time("end process_row_comm_init"); return nrows; } void MP_process_row_comm_test(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms, int i, int nb, int n, int * pvp2, double * scalep2, double (*aip2)[], double (*acp2)[]) { MP_process_row_comm_init(nnrow, nncol, a, controls, parms, i, nb, n, pvp2, scalep2, aip2, acp2); check_and_continue_rcomm_transfer(&rcomm,1); // MPI_Barrier(controls->row_comm); } void lu_mpi_blocked_lookahead(int nnrow, int nncol, double a[nnrow][nncol], double (*acol)[],double (*acol2)[], double (*dinv)[], double arow[],double arow2[], double b[], PCONTROLS controls, PPARMS parms) { int i, j, k, n, nb; n = controls->nrow*parms->nprow; nb = parms->nb; int pv[nb]; int pv2[nb]; double scale[nb]; double scale2[nb]; double acolinv[nb][nb]; double acolinv2[nb][nb]; double (*aip)[]; double (*aip2)[]; double (*acp)[]; double (*acp2)[]; double (*arp)[]; double (*arp2)[]; double (*aptmp)[]; int * pvp; int * pvp2; int * pvptmp; double * scalep; double * scalep2; double * scaleptmp; i=0; print_current_time("enter first rfact"); BEGIN_TIMER(timertotal); BEGIN_TIMER(timerx); BEGIN_TIMER(timercd); // void gdrsetusemultithread(); column_decomposition_recursive_mpi(i, nb, nnrow, nncol, a, acol, b, pv,controls, parms); END_TIMER(timercd,30,(double)((n-i-nb)*nb)); BEGIN_TIMER(timer00); print_current_time("end first rfact"); // dprintf(9,"call bcast pv\n"); MPI_Bcast(pv,sizeof(int)*nb,MPI_BYTE, pcolid(i,parms,controls), controls->row_comm); MP_construct_scalevector(nnrow, nncol, a, parms, controls, i, nb, scale); MPI_Bcast(scale,sizeof(double)*nb,MPI_BYTE, pcolid(i,parms,controls), controls->row_comm); print_current_time("enter process_diagonal"); MP_process_diagonal(nnrow, nncol, a, parms, controls, i,nb,acolinv,0); print_current_time("end MP_process_diagonal"); int nrows=MP_process_lmat(nnrow, nncol, a, parms, controls,i,nb,i+nb,acol); print_current_time("end MP_process_lmat"); MP_calculate_ld(nb, acol, nrows, acol2, acolinv,i,controls, parms); END_TIMER(timerx,5,(double)((n-i-nb)*nb)); END_TIMER(timer00,37,0.0); END_TIMER(timer00,39,0.0); print_current_time("end first lookahead"); aip=acolinv; acp = acol; aip2=acolinv2; acp2 = acol2; pvp=pv; pvp2=pv2; scalep=scale; scalep2=scale2; for(i=0;i<n;i+=nb){ // void gdrsetusemultithread(); // fprintf(stderr,"lu2_mpi i=%d\n",i); int c1, c2, cfirst; int havec = have_current_col(i+nb, parms, controls); convert_global_col_range_to_local_range(i+nb, i+nb*2-1,&c1, &c2, parms, controls); arp= (double(*)[]) arow; c2 ++; BEGIN_TIMER(timerx); print_current_time("enter rfact"); int src[n]; int dst[n]; int length; dump_vector("pvp", pvp, nb); generate_src_and_dst_lists(i, pvp, src, dst, &length,controls, parms); if ((i+nb < n) && havec){ // if (0){ int ii = i + nb; BEGIN_TIMER(timer00); // dprintf(9,"rfact call dls_phase1, ii, nb=%d %d\n", ii, nb); dumpsubmat("a before first phase1", nncol, a, controls->nrow, controls->ncol, parms, controls); process_right_part_mpi_using_dls_phase1(i, nb, c1, c2, nnrow, nncol, a, aip, acp,arp, pvp,src, dst, length, scalep, controls, parms); dumpsubmat("a after first phase1", nncol, a, controls->nrow, controls->ncol, parms, controls); // dprintf(9,"rfact call dls_phase2, ii, nb=%d %d\n", ii, nb); process_right_part_mpi_using_dls_phase2(i, nb, c1, c2, nnrow, nncol, a, aip, acp,arp, pvp,scalep, controls, parms); // dprintf(9,"rfact call column_dec ii, nb=%d %d\n", ii, nb); END_TIMER(timer00,37,0.0); END_TIMER(timer00,40,0.0); BEGIN_TIMER(timercd); column_decomposition_recursive_mpi(ii, nb, nnrow, nncol, a, acp2, b, pvp2,controls, parms); END_TIMER(timercd,30,(double)((n-i-nb)*nb)); // dprintf(9,"rfact call MP_construct_scale ii, nb=%d %d\n", ii, nb); MP_construct_scalevector(nnrow, nncol, a, parms, controls, ii, nb, scalep2); // dprintf(9,"rfact end, ii, nb=%d %d\n", ii, nb); cfirst=c2; }else{ cfirst=c1; } BEGIN_TIMER(timer01); if (i+nb < n){ int ii = i + nb; // dprintf(9,"call phase1 %d\n",i); MP_process_diagonal_phase1(nnrow, nncol, a, parms, controls, ii,nb,aip2,0); // dprintf(9,"call phase1 end\n"); } END_TIMER(timer01,37,0.0); END_TIMER(timer01,41,0.0); END_TIMER(timerx,5,(double)((n-i-nb)*nb)); print_current_time("end rfact"); // dprintf(9,"call process_row_comm\n"); BEGIN_TIMER(timer03); #ifndef DO_RCOMM_LAST nrows=MP_process_row_comm_init(nnrow, nncol, a, controls, parms, i, nb, n, pvp2, scalep2,aip2, acp2); #endif // dprintf(9,"call process_right_part_... %d\n",i); // check_and_continue_rcomm_transfer(&rcomm,1); // test to finish rcomm here -- 2011/6/19 process_right_part_mpi_using_dls_concurrent(i, nb, cfirst, nnrow, nncol, a, aip, acp, arow, arow2, pvp, scalep, controls, parms); BEGIN_TIMER(timery); BEGIN_TIMER(timer02); // dprintf(9,"end process_right_part_... %d\n",i); #ifdef DO_RCOMM_LAST nrows=MP_process_row_comm_init(nnrow, nncol, a, controls, parms, i, nb, n, pvp2, scalep2,aip2, acp2); #endif check_and_continue_rcomm_transfer(&rcomm,1); // dprintf(9,"end check_and_continue... %d\n",i); print_current_time("end lmat/dls"); #ifndef DUPOSTMUL MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, i,c1,nncol,nb,aip); #else MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, i,controls->ncol,controls->ncol+1,nb,aip); #endif MP_store_diagonal_inverse(nnrow, nb, dinv, parms,controls, i, aip); print_current_time("end mult_diag"); if (i+nb < n){ int ii = i + nb; MP_calculate_ld_phase1(nb, acp2, nrows, aip2, ii, controls, parms); print_current_time("end ld_phase1"); MP_calculate_ld_phase2(nb, acp2, nrows, acp, aip2, ii,controls, parms); print_current_time("end ld_phase2"); } // fprintf(stderr, "before swap\n"); dump_vector("pvp2", pvp2, nb); dump_vector("pvp ", pvp, nb); swapvoidptr((void**)&acp2, (void**)&acp); swapvoidptr((void**)&aip2, (void**)&aip); swapvoidptr((void**)&pvp2, (void**)&pvp); swapvoidptr((void**)&scalep2, (void**)&scalep); // fprintf(stderr, "after swap\n"); dump_vector("pvp2", pvp2, nb); dump_vector("pvp ", pvp, nb); if (MP_myprocid()==0) fprintf(stderr,"lu_mpi look i=%d end\n", i); END_TIMER(timer02,37,0.0); END_TIMER(timer02,42,0.0); END_TIMER(timery,5,0.0); } END_TIMER(timertotal,4,((double)(n))*((double)n)*(n*2.0/3.0)); print_current_time("enter backward_sub"); BEGIN_TIMER(timerback); backward_sub_blocked_mpi(nnrow, nncol, a,nb, dinv, b,controls, parms); END_TIMER(timerback,28,(double)(n)*n); print_current_time("end backward_sub"); // printmat_MP(nnrow, nncol, a, controls, parms); } void usage() { fprintf(stderr,"lu2_mpi options:\n"); fprintf(stderr," -h: This help\n"); fprintf(stderr," -s: seed (default=1)\n"); fprintf(stderr," -n: size of matrix (default=8192)\n"); fprintf(stderr," -r: allocate processors in row-major order \n"); fprintf(stderr," -b: block size (default=2048)\n"); fprintf(stderr," -p: processors in row (default=1)\n"); fprintf(stderr," -q: processors in column (default=1)\n"); fprintf(stderr," -g: usehugetlbfs (default=no)\n"); fprintf(stderr," -w: two process per node mode (use two cards)\n"); fprintf(stderr," -B: first card ID (default=0)\n"); fprintf(stderr," -N: Number of cards per MPI process(default=1)\n"); fprintf(stderr," -T: Max process ID for timing info(default=0: all)\n"); fprintf(stderr," -v: scheme for vertical communication (default=0: advanced, else: simple)\n"); fprintf(stderr," -S: stress factor for thermal test(default=0: no additional stress)\n"); } extern char *optarg; extern int optind; void print_parms(FILE* stream, PPARMS parms) { fprintf(stream,"N=%d Seed=%d NB=%d\n", parms->n,parms->seed,parms->nb); fprintf(stream,"P=%d Q=%d Procs row major=%d usehuge=%d\n", parms->nprow,parms->npcol, parms->procs_row_major, parms->usehugepage); fprintf(stream,"usetwocards=%d ncards=%d cardid=%d maxpt=%d vcommscheme=%d stress=%d\n", parms->twocards, parms->ncards, parms->firstcard, parms->maxpidfortiming, parms->vcommscheme, parms->stress_factor); fprintf(stream,"bfsize=%d bflimit=%d bfscale=%d\n", parms->bfsize, parms->bflimit, parms->bfscale); } void read_parms(int argc, char * argv[], PPARMS parms) { int ch; static struct option longopts[] = { { "help", no_argument, 0, 'h' }, { "block_size", optional_argument, NULL, 'b' }, { "seed", optional_argument, NULL, 's' }, { "ndim_matrix", required_argument, NULL, 'n' }, { "processors_in_row", optional_argument, NULL, 'p' }, { "processors_in_column", optional_argument, NULL, 'q' }, { "processors_row_major", no_argument, 0, 'r' }, { "usehugepage", no_argument, 0, 'g' }, { "usetwocards", no_argument, 0, 'w' }, { "ncards", optional_argument, NULL, 'N' }, { "max_pid_for_timing", optional_argument, NULL, 'T' }, { "vcomm_scheme", optional_argument, NULL, 'v' }, { "stress_factor", optional_argument, NULL, 'S' }, { NULL, 0, NULL, 0 } }; MP_message("enter read_parms"); parms->seed=1; parms->n=8192; parms->nb = 2048; parms->nprow=1; parms->npcol=1; parms->procs_row_major = 0; parms->usehugepage = 0; parms->twocards = 0; parms->ncards = 1; parms->firstcard = 0; parms->maxpidfortiming = 0; parms->vcommscheme = 0; parms->stress_factor=0; parms->bflimit = 0; parms->bfsize = 0; parms->bfscale = 1; for(argc=0;argv[argc]!=NULL;argc++); // the above is necessary to fixup the argc value messed up by MPICH... // JM 2009/9/21 while((ch=getopt_long(argc,argv,"B:N:S:T:b:ghn:p:q:rs:v:w",longopts, NULL))!= -1){ fprintf(stderr,"optchar = %c optarg=%s\n", ch,optarg); switch (ch) { case 'B': parms->firstcard = atoi(optarg); break; case 'N': parms->ncards = atoi(optarg); break; case 'S': parms->stress_factor = atoi(optarg); break; case 'T': parms->maxpidfortiming = atoi(optarg); break; case 'b': parms->nb = atoi(optarg); break; case 'g': parms->usehugepage = 1; break; case 's': parms->seed = atoi(optarg); break; case 'n': parms->n = atoi(optarg); break; case 'p': parms->npcol = atoi(optarg); break; case 'q': parms->nprow = atoi(optarg); break; case 'v': parms->vcommscheme = atoi(optarg); break; case 'r': parms->procs_row_major = 1; break; case 'w': parms->twocards = 1; break; case 'h': usage(); exit(1); case '?':usage(); exit(1); break; default:break; } } argc -= optind; argv += optind; print_parms(stderr, parms); print_parms(stdout, parms); fprintf(stderr,"P, Q, MP_proccount = %d %d %d\n", parms->npcol, parms->nprow,MP_proccount()); if (parms->nprow*parms->npcol != MP_proccount()){ MP_error("P*Q != np"); } } void MP_broadcast_parms(PPARMS parms) { fprintf(stderr,"broadcast_parms size= %d\n", sizeof(PARMS)); MP_bcast((void*)parms, sizeof(PARMS)); MP_message("broadcast_parms normal end"); print_parms(stderr, parms); } void set_parms(int argc, char * argv[], PPARMS parms) { MP_message("Enter set_parms"); if (MP_myprocid()==0)read_parms(argc, argv, parms); MP_message("read_parms end"); MP_broadcast_parms(parms); MP_message("broadcast end"); libtestg_parms.bfsize = parms->bfsize; libtestg_parms.bflimit = parms->bflimit; libtestg_parms.bfscale = 1.0/parms->bfscale; if (parms->bfscale<0) libtestg_parms.bfscale = -parms->bfscale; fprintf(stderr,"Proc=%d matparms= %d %d %e\n", MP_myprocid(), libtestg_parms.bfsize, libtestg_parms.bflimit, libtestg_parms.bfscale); } void setup_controls(PPARMS parms, PCONTROLS controls) { controls->nrow = parms->n/parms->nprow; controls->ncol = parms->n/parms->npcol; controls->nnrow = controls->nrow; controls->nncol = controls->ncol+NNCOLINC; controls->check_rcomm_transfer = 0; } int main(int argc, char * argv[]) { int ch; PARMS parms; CONTROLS controls; int i; MP_initialize(&argc,&argv); MP_message("Return from MP_initialize"); set_parms(argc, argv, &parms); setup_controls(&parms, &controls); MP_message("Return from setup_controls"); int nb, n, seed; nb = parms.nb; n = parms.n; seed = parms.seed; double (*a)[]; double (*acol)[]; double (*acol2)[]; double (*dinv)[]; double *arow, *arow2; int * pv; double *b, *b0, *bcopy; long int nl=n; set_max_pid_for_print_time(parms.maxpidfortiming); gdrsetboardid(parms.firstcard); if (parms.twocards){ gdrsetboardid(MP_myprocid() %2); } gdrdgemm_set_stress_factor(parms.stress_factor); if (parms.maxpidfortiming){ if (MP_myprocid() >= parms.maxpidfortiming)set_matmul_msg_level(0); } if(parms.ncards > 1) gdrsetnboards(parms.ncards); if (parms.usehugepage){ dprintf(9,"hugetlb: usehuge = %d\n", parms.usehugepage); MP_allocate_hugetlbfs("/mnt/huge/aaa", (void**)&a, (void**)&acol, (void**)&acol2,(void**)&dinv,(void**)&arow, (void**)&arow2, controls.nnrow, nb,controls.nncol); }else{ int nbb = nb+32; dprintf(9,"malloc: usehuge = %d\n", parms.usehugepage); a = (double(*)[]) malloc(sizeof(double)*controls.nnrow*controls.nncol); acol = (double(*)[]) malloc(sizeof(double)*controls.nnrow*nbb); acol2 = (double(*)[]) malloc(sizeof(double)*controls.nnrow*nbb); dinv = (double(*)[]) malloc(sizeof(double)*controls.nnrow*nbb); arow = (double*) malloc(sizeof(double)*nb*nbb*2); arow2 = (double*) malloc(sizeof(double)*nb*nbb*2); } fprintf(stderr,"Return from MP_allocate\n"); // init_matoutfid(); if(MP_myprocid())set_debug_level(8); { // omp_set_nested(1); // fprintf(stderr,"Omp_get_nested=%d\n", omp_get_nested()); } b = (double*)malloc(sizeof(double)*controls.nnrow); b0 = (double*)malloc(sizeof(double)*controls.nnrow); bcopy = (double*)malloc(sizeof(double)*parms.n); pv = (int*)malloc(sizeof(int)*controls.nnrow); MP_setup_communicators(&parms, &controls); fprintf(stderr,"Return from MP_setup_comm"); sleep(MP_myprocid()%10); if (controls.nnrow > nb*4){ reset_gdr(nb, acol2, nb, acol, controls.nnrow); }else{ reset_gdr(nb, arow2, nb, arow, nb*4); } fprintf(stderr,"Proc %d Return from reset_gdr\n", MP_myprocid()); if (seed == 0){ readmat(n,a); }else{ MP_randomsetmat(controls.nncol, controls.nnrow, a,&parms, &controls,1,b0); } #if 1 MP_sync(); if (controls.nnrow > nb*4){ reset_gdr(nb, acol2, nb, acol, controls.nnrow); }else{ reset_gdr(nb, arow2, nb, arow, nb*4); } #endif MP_sync(); fprintf(stderr,"Proc %d Return from reset_gdr\n", MP_myprocid()); timer_init(); init_timer(); if (MP_myprocid()==0){ print_current_datetime("Start calculation "); } init_current_time(); MP_message("enter lu_mpi"); lu_mpi_blocked_lookahead(controls.nnrow, controls.nncol, a, acol,acol2,dinv,arow,arow2, b,&controls, &parms); // dprintf(9,"end lu_mpi_lookahead\n"); // printmat_MP(controls.nnrow, controls.nncol, a, &controls, &parms); double ctime=cpusec(); double wtime=wsec(); if (MP_myprocid()==0){ print_current_datetime("End calculation "); } if (seed == 0){ readmat(n,a); }else{ MP_randomsetmat(controls.nncol, controls.nnrow, a,&parms, &controls,0,b0); } dprintf(9,"end randomsetmat\n"); double a1, ao, b1, bo; MP_calcnorm(controls.nncol, controls.nnrow, a, &parms, &controls, &a1, &ao); MP_calcvnorm(b, &parms, &controls, &b1, &bo); check_solution_mpi(controls.nnrow, controls.nncol, a, b, bcopy, &controls, &parms); // dprintf(9,"end check_solution\n"); // printmat_MP(controls.nnrow, controls.nncol, a, &controls, &parms); double einf = print_errors( b,b0,&controls, &parms); print_timers_mpi(0); if (MP_myprocid()==0){ double nd = parms.n; HPLlogprint(stderr, parms.n, parms.nb, parms.nprow, parms.npcol, wtime); HPLerrorprint(stderr, einf, a1, ao, b1, bo, nd); fflush(stderr); fflush(stdout); fprintf(stderr,"\n"); fprintf(stderr,"\n"); fprintf(stderr,"\n"); fprintf(stderr,"\n"); HPLlogprint(stdout, parms.n, parms.nb, parms.nprow, parms.npcol, wtime); HPLerrorprint(stdout, einf, a1, ao, b1, bo, nd); // printf("\n\n Norms = %e %e %e %e\n", a1, ao, b1, bo); double speed = nd*nd*nd*2.0/3.0/wtime/1e9; fprintf(stderr,"\n\n cpusec = %g wsec=%g %g Gflops\n\n\n", ctime, wtime, speed); printf("\n\n cpusec = %g wsec=%g %g Gflops\n\n\n", ctime, wtime, speed); fflush(stdout); } // MP_message("ALL end --- perform MPI_Barrier\n"); MPI_Barrier(MPI_COMM_WORLD); // MP_message("MPI_Barrier end\n"); MPI_Barrier(MPI_COMM_WORLD); // MP_message("Second MPI_Barrier end\n"); MPI_Finalize(); // MP_message("MPI_Finalize end\n"); return 0; }
hello.c
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel printf("Hello world!\n"); return 0; }
residualbased_central_differences_strategy.h
/* ============================================================================== KratosStructuralApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel [email protected] [email protected] [email protected] [email protected] - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain - Ruhr-University Bochum, Institute for Structural Mechanics, Germany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ /* ********************************************************* * * Last Modified by: $Author: Nelson $ * Date: $Date: 2009-09-18 $ * Revision: $Revision: 1.0 $ * * ***********************************************************/ /// WARNING = Los desplazameitos son obtenidos en le paso /// n+1; las velocidades y aceleraciones son obtenidas para el paso n. #if !defined(KRATOS_RESIDUALBASED_CENTRAL_DIFERENCES_STRATEGY) #define KRATOS_RESIDUALBASED_CENTRAL_DIFERENCES_STRATEGY /* System includes */ #include <limits> #include<iostream> #include<iomanip> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "includes/variables.h" #include "containers/array_1d.h" /* For contact analysis */ #include "custom_strategies/schemes/forward_increment_lagrange_multiplier_scheme.h" #include "custom_utilities/boundary_conditions_and_contact_utilities.h" #include "custom_utilities/joint.h" #include "custom_utilities/disconnect_utility.h" namespace Kratos { enum Constraint_Enforcement {Penalty_Methods, Lagrange_Multiplier_Methods}; template< class TSparseSpace, class TDenseSpace, class TLinearSolver> class ResidualBasedCentralDiferencesStrategy : public SolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver> { public: KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedCentralDiferencesStrategy); typedef SolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType; typedef ConditionsContainerType::iterator ConditionsContainerIterator; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef BoundaryConditionsAndContactUtilities BoundaryAndContactType; typedef ForwardIncrementLagrangeMultiplierScheme LagrangeMultiplierType; typedef Joint<4> Joint2D; /// Notes: /* a) Penalty methods corresponf to elastic colision and therefore the kinectic energy is preservated. It need some damping to disipated kinetic energy. b) Using lagrangian multiplier aproach, the kinetic energy is disipated during the contact iteraction and therefore no contact damping is required. */ ///@param ///CE Constraint_Enforcement& ///alpha_damp, Para calcular la matriz de amortiguamiento proporcional a la masa ///fraction_delta_time Fracion del timepo critico (0-1) ///max_delta_time, Maximo incremento de tiempo ResidualBasedCentralDiferencesStrategy( ModelPart& model_part, const Constraint_Enforcement& CE, const int dimension, const double damping_ratio, /// para calcular la matriz de amortiguamiento proporcional a la masa const double fraction_delta_time, const double max_delta_time, const double penalty_factor, const bool CalculateReactions, const bool ComputeContactConditions, const bool MoveMeshFlag, typename TLinearSolver::Pointer pNewLinearSolver, typename TSchemeType::Pointer pScheme, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver ) : SolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver>(model_part, MoveMeshFlag) { std::cout <<"DYNAMIC SOLVER ANALYSIS FOR FINITE ELEMENT METHODS"<< std::endl; std::cout <<"TIME INTEGRATION METHOD = Central Differences "<< std::endl; std::cout <<"IMPLEMENTED BY = Ing. Nelson Lafontaine "<< std::endl; mdimension = dimension; mfraction_delta_time = fraction_delta_time; mmax_delta_time = max_delta_time; mCalculateReactionsFlag = CalculateReactions; mComputeContactConditions = ComputeContactConditions; mElementsAreInitialized = false; mConditionsAreInitialized = false; mInitialConditions = false; mCalculateOldTime = false; mSolutionStepIsInitialized = false; mInitializeWasPerformed = false; mClearContactConditions = false; mLocalSearchInitialize = false; mComputeTime = false; minitial_conditions_size = 0; mcontact_conditions_size = 0; mtimestep = 0.00; mCE = CE; mpBCCU_Pointer = typename BoundaryAndContactType::Pointer (new BoundaryAndContactType(model_part, mdimension, penalty_factor)); if(mCE==Lagrange_Multiplier_Methods) { mpLagrangianMultiplier = typename ForwardIncrementLagrangeMultiplierScheme::Pointer (new ForwardIncrementLagrangeMultiplierScheme(model_part, mdimension) ); } mpLinearSolver = pNewLinearSolver; mpBuilderAndSolver = pNewBuilderAndSolver; mpScheme = pScheme; mpBuilderAndSolver->SetReshapeMatrixFlag(false); mdamping_coeficients = false; mdamping_ratio = damping_ratio; malpha_damp = 0.00; mbeta_damp = 0.00; mpenalty_factor = penalty_factor; } virtual ~ResidualBasedCentralDiferencesStrategy () {} double Solve() { KRATOS_TRY std::cout<<std::fixed<<std::setw(15)<<std::scientific<<std::setprecision(5); ModelPart& r_model_part = BaseType::GetModelPart(); ConditionsArrayType& pConditions = r_model_part.Conditions(); //NodesArrayType& pNodes = r_model_part.Nodes(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); #ifdef _OPENMP double start_prod = omp_get_wtime(); #endif std::cout<<"------------------------------------------------------------------------"<<std::endl; std::cout<<" KRATOS STRUCTURAL APPLICATION. TIME STEPS = " << CurrentProcessInfo[TIME_STEPS] <<std::endl; std::cout<<"------------------------------------------------------------------------"<<std::endl; std::cout<<"INITIALIZE SOLVE"<<std::endl; ///Inicializa los elementos y condiciones. Crea los elementos del Boundary y los Joints para Heuristic o DG if(mInitializeWasPerformed == false) { //DofsArrayType& rDofSet = mpBuilderAndSolver->GetDofSet(); minitial_conditions_size = pConditions.size(); mDTU.CreateJoints(r_model_part,mdimension); Initialize(); mpBCCU_Pointer->CreateBoundaries(minitial_conditions_size); /* /// inicializndo la matrzi A int size = pNodes.size() * mdimension; mpBuilderAndSolver->SetUpDofSet(mpScheme, BaseType::GetModelPart()); mpBuilderAndSolver->SetUpSystem(BaseType::GetModelPart()); TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpA.swap(pNewA); TSystemMatrixType& mA = *mpA; mA.resize(size, size, false); /// Solo se crea una sola vez mpBuilderAndSolver->BuildLHS_Complete(mpScheme, r_model_part, mA); */ } ///Initialize solution step InitializeSolutionStep(); ///Computa el tiempo critico /// poner t const if(mComputeTime==false) { ComputeCriticalTime(); //mComputeTime = true; } ///General Information of the model //Infomation(); if(mInitialConditions==false) { ComputeInitialConditions(); GetForce(); } /// Predict displacements /// Computa el nuevo desplazamiento a partir de los pasos anteriores en n+1 ComputeIntermedialVelocityAndNewDisplacement(); /// Computa los valores del paso de las velocidades y aceleraciones ComputeOldVelocitiesAndAccelerations(); if(mCalculateReactionsFlag) CalculateReaction(); /// Computa los boundaries del modelo antes de la atualizacion de los desplazamientos /* if(mComputeContactConditions==true ) { //mpBCCU_Pointer->CreateBoundaries(minitial_conditions_size); ///crea las superficies de contacto // if(mCe==Lagrange_Multiplier_Methods) // mpBCCU_Pointer->LocalSearch(); } */ /// Actualizacion de los desplazamientos if(BaseType::MoveMeshFlag() == true) BaseType::MoveMesh(); ///Computing The new internal and external forces for n+1 step GetForce(); /// Discontinum Mechanics //ComputeInterfaceForces(); /// Fragmentation and fracture for DEM //Heuristic_Formula(mDTU.Begin(), mDTU.End()); ///damping forces ComputeDampingForces(); /// Compute contact force and displacements correcions /// realizando el bounding box /// verifico si hay condiciones de contacto y corrijo el desplazamiento if(mComputeContactConditions==true) { if(mCE==Penalty_Methods) { //const bool rflag = false; //ResetFlagComputeBoundaryContour(rflag) //mpBCCU_Pointer->Clear(minitial_conditions_size); //CreateBoundaries(minitial_conditions_size); //mpBCCU_Pointer->CreateBoundaries(minitial_conditions_size); /// lista de los elementos de contorno mpBCCU_Pointer->ComputeContactForce(); } if(mCE==Lagrange_Multiplier_Methods) { ConditionsContainerIterator end_previos; ConditionsContainerIterator end_actual; mpBCCU_Pointer->CreateBoundaries(minitial_conditions_size); /// Crea las superficies de contacto mcontact_conditions_size = pConditions.size(); /// Master Condition size + condiciones de inicio mpBCCU_Pointer->CreateLinkingConditionsBasedOnLocalSearch(minitial_conditions_size); /// Crea las nuevas condiciones de contacto int dist = CheckObjectContact(mcontact_conditions_size, end_previos, end_actual); /// crea los iteradores del contacto if(dist!=0) Update(end_previos,end_actual); } } if(mComputeContactConditions==true) { if(mCE==Lagrange_Multiplier_Methods) { ComputeNormalContactReactions(mcontact_conditions_size); CheckConditionsStatus(mcontact_conditions_size); } } ///Finalize solution step FinalizeSolutionStep(); ///Computing energies CalculateEnergies(); #ifdef _OPENMP double stop_prod = omp_get_wtime(); std::cout << " Time solving = "<< stop_prod - start_prod << " seconds" << std::endl; #endif std::cout <<"FINISHED SOLVE"<<std::endl; return 0.00; KRATOS_CATCH("") } void Clear() { KRATOS_TRY TSystemVectorType& mDx = *mpDx; SparseSpaceType::Clear(mpDx); SparseSpaceType::Resize(mDx, 0); TSystemVectorType& mFint = *mpFint; SparseSpaceType::Clear(mpFint); SparseSpaceType::Resize(mFint, 0); //setting to zero the internal flag to ensure that the dof sets are recalculated mpBuilderAndSolver->SetDofSetIsInitializedFlag(false); KRATOS_CATCH(""); } void ComputeDampingForces() { ComputeViscousDampingForces(); ComputeNonViscousDampingForces(); } void ComputeViscousDampingForces() { ComputeDampingForcesWithStiffness(); } void ComputeNonViscousDampingForces() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); //ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); NodesArrayType& pNodes = r_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> nodes_partition; CreatePartition(number_of_threads, pNodes.size(), nodes_partition); const double damp = mdamping_ratio; array_1d<double, 3> DampingForces; array_1d<double,3> rhs_aux; #pragma omp parallel for private(rhs_aux,DampingForces) for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator it_begin=pNodes.ptr_begin()+ nodes_partition[k]; typename NodesArrayType::iterator it_end=pNodes.ptr_begin() + nodes_partition[k+1]; for (NodesArrayType::iterator it= it_begin; it!=it_end; ++it) { rhs_aux = it->FastGetSolutionStepValue(RHS); array_1d<double,3> rhs = it->FastGetSolutionStepValue(RHS); const array_1d<double,3>& Vel = it->FastGetSolutionStepValue(VELOCITY); noalias(DampingForces) = (damp * norm_2(rhs_aux) / norm_2(Vel) ) * Vel; noalias(rhs) = rhs_aux + DampingForces; } } KRATOS_CATCH("") } /// for discontinum Galerking methods void ComputeInterfaceForces() { KRATOS_TRY /* ModelPart& r_model_part = BaseType::GetModelPart(); // ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ElementsArrayType& pElements = r_model_part.Elements(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); array_1d<double, 3> rRightHandSideVector; #pragma omp parallel for private(rRightHandSideVector) for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { if(it->GetProperties()[IS_DISCRETE]>=1.00){ it->Calculate(INTERFACE_FORCES, rRightHandSideVector, CurrentProcessInfo); } } } */ KRATOS_CATCH("") } /// Computa las fuerzas viscosas de amortiguamiento alfa * K_elem * Velocidad void ComputeDampingForcesWithStiffness() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ElementsArrayType& pElements = r_model_part.Elements(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); unsigned int index = 0; unsigned int dim_2 = 0; Matrix rLeftHandSideMatrix; Vector rRightHandSideVector; Vector Velocities; #pragma omp parallel for private(index, dim_2, rLeftHandSideMatrix,Velocities, rRightHandSideVector) for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { /// Las fuerzas de contacto a los elementos que estan en contacto if((it)->GetValue(IS_TARGET)==true) { it->CalculateLocalSystem(rLeftHandSideMatrix, rRightHandSideVector, CurrentProcessInfo); Element::GeometryType& geom = it->GetGeometry(); const unsigned int& dim = geom.WorkingSpaceDimension(); const unsigned int& number_of_nodes = geom.size(); index = 0; dim_2 = dim * number_of_nodes; Velocities.resize(dim_2); rRightHandSideVector.resize(dim_2); for (unsigned int i = 0; i <geom.size(); i++) { array_1d<double,3>& Vel = geom[i].FastGetSolutionStepValue(VELOCITY); Velocities[index] = Vel[0]; Velocities[index+1] = Vel[1]; if(dim==2) { index = index + 2; } else { Velocities[index+2] = Vel[2]; index = index + 3; } } noalias(rRightHandSideVector) = -mbeta_damp * prod(rLeftHandSideMatrix, Velocities); for (unsigned int i = 0; i <geom.size(); i++) { array_1d<double,3>& rhs = geom[i].FastGetSolutionStepValue(RHS); geom[i].SetLock(); index = i*dim; rhs[0] = rhs[0] + rRightHandSideVector[index]; rhs[1] = rhs[1] + rRightHandSideVector[index+1]; rhs[2] = 0.00; if(dim==3) rhs[2]= rhs[2] + rRightHandSideVector[index+2]; geom[i].UnSetLock(); } } } } KRATOS_CATCH("") } // Calcula fuerza de contacto void ComputeNormalContactReactions(const unsigned int& initial_conditions_size) { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ConditionsContainerType& pConditions = r_model_part.ConditionsArray(); ConditionsContainerIterator end_previos = pConditions.begin() + initial_conditions_size; ConditionsContainerIterator end_actual = pConditions.end(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); array_1d<double,3> Output; for(ConditionsContainerIterator rcond = end_previos; rcond!=end_actual; rcond++) (*rcond)->Calculate(NORMAL, Output, CurrentProcessInfo); KRATOS_CATCH("") } // Borra las condiciones de contacto anteriores sin borrar las otras condiciones de las estructura void CheckConditionsStatus(const unsigned int& initial_conditions_size) { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ConditionsContainerType& pConditions = r_model_part.ConditionsArray(); ConditionsContainerIterator end_previos = pConditions.begin() + initial_conditions_size; ConditionsContainerIterator end_actual = pConditions.end(); pConditions.erase(end_previos, end_actual); KRATOS_CATCH("") } /// me permite los iteradores de los links contacts int CheckObjectContact(const unsigned int& initial_conditions_size, ConditionsContainerIterator& end_previos, ConditionsContainerIterator& end_actual) { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ConditionsContainerType& pConditions = r_model_part.ConditionsArray(); end_previos = pConditions.begin() + initial_conditions_size; end_actual = pConditions.end(); return (std::distance(end_previos, end_actual)); KRATOS_CATCH("") } /// comuta el D(n+1) a partir de V(n+1/2) void ComputeIntermedialVelocityAndNewDisplacement() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); NodesArrayType& pNodes = r_model_part.Nodes(); const double current_delta_time = CurrentProcessInfo[DELTA_TIME]; const double mid_delta_time = 0.50*(molddelta_time + current_delta_time); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); array_1d<double,3> mid_neg_velocity; /// V(n-1/2) array_1d<double,3> mid_pos_velocity; /// V(n+1/2) //array_1d<double,3> new_displacement; /// V(n+1/2) #pragma omp parallel for private(mid_pos_velocity, mid_neg_velocity) for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { /// Nota = Se ha agregado a la ecuacion la velocidad. Antes no habia actual_velocity; //array_1d<double,3>& actual_velocity = i->FastGetSolutionStepValue(VELOCITY); /// Estamos en paso T(n+1) array_1d<double,3>& actual_displacement = i->FastGetSolutionStepValue(DISPLACEMENT); /// Estamos en paso T(n+1) array_1d<double,3>& current_displacement = i->FastGetSolutionStepValue(DISPLACEMENT,1); /// U(n) array_1d<double,3>& old_displacement = i->FastGetSolutionStepValue(DISPLACEMENT,2); /// U(n-1) array_1d<double,3>& current_Rhs = i->FastGetSolutionStepValue(RHS); /// Fext(n) - Fint(n) const double& nodal_mass = i->FastGetSolutionStepValue(NODAL_MASS); const double nodal_damping = malpha_damp * nodal_mass; const double factor_a = 2.00 * nodal_mass + nodal_damping * current_delta_time; const double factor_b = 2.00 * nodal_mass - nodal_damping * molddelta_time; const double factor_c = 2.00 * mid_delta_time; noalias(mid_neg_velocity) = (1.00/molddelta_time) * (current_displacement - old_displacement); noalias(mid_pos_velocity) = (factor_b/factor_a) * mid_neg_velocity + (factor_c/factor_a) * current_Rhs; //+ actual_velocity; /// Update to the new displacement ///current_displacement = current_displacement + mid_pos_velocity * current_delta_time; /// Una aproximacion de la velocidad if( (i->pGetDof(DISPLACEMENT_X))->IsFixed() == false ) actual_displacement[0] = current_displacement[0] + mid_pos_velocity[0] * current_delta_time; if( i->pGetDof(DISPLACEMENT_Y)->IsFixed() == false ) actual_displacement[1] = current_displacement[1] + mid_pos_velocity[1] * current_delta_time; if( i->HasDofFor(DISPLACEMENT_Z)) { if( i->pGetDof(DISPLACEMENT_Z)->IsFixed() == false ) actual_displacement[2] = current_displacement[2] + mid_pos_velocity[2] * current_delta_time; } } } //std::cout<< " OLD TIMESTEP = "<< molddelta_time << " SECONDS" << std::endl; //std::cout<< " CURRENT TIMESTEP = "<< current_delta_time << " SECONDS" << std::endl; //std::cout<< " AVERAGE TIMESTEP = "<< mid_delta_time << " SECONDS" << std::endl; KRATOS_CATCH("") } void Initialize() { KRATOS_TRY /// Inicializando los elemtos if(mElementsAreInitialized == false) InitializeElements(); /// Inicializando las condiciones if(mConditionsAreInitialized == false) InitializeConditions(); mInitializeWasPerformed = true; KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void CalculateNodalMass() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ElementsArrayType& pElements = r_model_part.Elements(); Matrix MassMatrix; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); unsigned int index = 0; #pragma omp parallel for private(index, MassMatrix) for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { Element::GeometryType& geom = it->GetGeometry(); // Nodos del elemento (it)->CalculateMassMatrix(MassMatrix, CurrentProcessInfo); const unsigned int& dim = geom.WorkingSpaceDimension(); index = 0; for (unsigned int i = 0; i <geom.size(); i++) { double& mass = geom(i)->FastGetSolutionStepValue(NODAL_MASS); geom(i)->SetLock(); index = i*dim; mass = mass + MassMatrix(index,index); geom(i)->UnSetLock(); } } } KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void InitializeElements() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ElementsArrayType& pElements = r_model_part.Elements(); Matrix MassMatrix; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); unsigned int index = 0; #pragma omp parallel for private(index, MassMatrix) for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { Element::GeometryType& geom = it->GetGeometry(); // Nodos del elemento (it)->Initialize(); (it)->Set(ACTIVE, true); (it)->CalculateMassMatrix(MassMatrix, CurrentProcessInfo); const unsigned int& dim = geom.WorkingSpaceDimension(); index = 0; for (unsigned int i = 0; i <geom.size(); i++) { double& mass = geom(i)->FastGetSolutionStepValue(NODAL_MASS); geom(i)->SetLock(); index = i*dim; mass = mass + MassMatrix(index,index); geom(i)->UnSetLock(); } } } //r_model_part.GetCommunicator().AssembleCurrentData(NODAL_MASS); mElementsAreInitialized = true; KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** ///WARNING = Falta colocar el contacto void InitializeConditions() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); //ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ConditionsArrayType& pConditions = r_model_part.Conditions(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> condition_partition; CreatePartition(number_of_threads, pConditions.size(), condition_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { typename ConditionsArrayType::iterator it_begin=pConditions.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::iterator it_end=pConditions.ptr_begin()+condition_partition[k+1]; for (ConditionsArrayType::iterator it= it_begin; it!=it_end; ++it) { (it) -> Initialize(); } } mConditionsAreInitialized = true; KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void InitializeSolutionStep() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ElementsArrayType& pElements = r_model_part.Elements(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ConditionsArrayType& pConditions = r_model_part.Conditions(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; vector<unsigned int> condition_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); CreatePartition(number_of_threads, pConditions.size(), condition_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { (it) -> InitializeSolutionStep(CurrentProcessInfo); } } #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { typename ConditionsArrayType::iterator it_begin=pConditions.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::iterator it_end=pConditions.ptr_begin()+condition_partition[k+1]; for (ConditionsArrayType::iterator it= it_begin; it!=it_end; ++it) { (it) -> InitializeSolutionStep(CurrentProcessInfo); } } KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void ComputeCriticalTime() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //double delta_time_penalty = 1.00; double delta_time_computed = 1.00; double delta_time_used = 1.00; double time = 0.00; const int step = CurrentProcessInfo[TIME_STEPS]; delta_time_computed = ComputeTime(); //Calculo los factores de alfa y beta para amortiguar la estructura // 3d static and dynamic analisis damping and Energy Disispation 19-7 if(mdamping_coeficients==false) { double wmax = 2.00 / delta_time_computed; mbeta_damp = mdamping_ratio / wmax ; malpha_damp = mdamping_ratio * wmax ; mdamping_coeficients = true; } if(mCE==Penalty_Methods || mComputeContactConditions==true) { double time_penalty = ComputeTimePenalty(); delta_time_computed = (delta_time_computed<time_penalty) ? delta_time_computed:time_penalty; } delta_time_computed = Truncar_Delta_Time(delta_time_computed); if(delta_time_computed>mmax_delta_time) { delta_time_computed = mmax_delta_time; } delta_time_used = mfraction_delta_time * delta_time_computed; CurrentProcessInfo[DELTA_TIME] = delta_time_used; mtimestep = mtimestep + delta_time_used; r_model_part.CloneTimeStep(mtimestep); time = CurrentProcessInfo[TIME]; if(mCalculateOldTime==false) { molddelta_time = delta_time_used; mCalculateOldTime = true; } std::cout<< " Time step = "<< step << " " << std::endl; std::cout<< " Factor damping for stiffness matrix (Beta) = "<< mbeta_damp << " " << std::endl; std::cout<< " Factor damping for mass matrix (Alpha) = "<< malpha_damp << " " << std::endl; std::cout<< " Safe factor for incremental critical time = "<< mfraction_delta_time << " " << std::endl; std::cout<< " Incremental critical time computed = "<< delta_time_computed << " seconds" << std::endl; std::cout<< " incremental time used in analysis = "<< delta_time_used << " seconds" << std::endl; std::cout<< " Current time = "<< time << " seconds" << std::endl; KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void Infomation() { ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); const int step = CurrentProcessInfo[TIME_STEPS]; const double time = CurrentProcessInfo[TIME]; const double delta_time_used = CurrentProcessInfo[DELTA_TIME]; std::cout<< " Time step = "<< step << " " << std::endl; std::cout<< " Factor damping for stiffness matrix (Beta) = "<< mbeta_damp << " " << std::endl; std::cout<< " Factor damping for mass matrix (Alpha) = "<< malpha_damp << " " << std::endl; std::cout<< " Safe factor for incremental critical time = "<< mfraction_delta_time << " " << std::endl; std::cout<< " incremental time used in analysis = "<< delta_time_used << " seconds" << std::endl; std::cout<< " Current time = "<< time << " seconds" << std::endl; } //*************************************************************************** //*************************************************************************** double ComputeTime() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ElementsArrayType& pElements = r_model_part.Elements(); //double step = CurrentProcessInfo[TIME_STEPS]; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); //thread_id = omp_get_thread_num(); #else int number_of_threads = 1; //thread_id = 0; #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); Vector dts(number_of_threads); double delta_time_a = 0.00; for(int i = 0; i < number_of_threads; i++) dts[i] = mmax_delta_time; // WARNING = Los threads Id siempre empiezan por cero. #pragma omp parallel for private(delta_time_a) //shared(CurrentProcessInfo) for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for(ElementsArrayType::iterator it=it_begin; it!= it_end; it++) { it->Calculate(DELTA_TIME, delta_time_a, CurrentProcessInfo); if(delta_time_a>0.00) if(delta_time_a < dts[k]) dts[k] = delta_time_a; } } return (*std::min_element(dts.begin(), dts.end())); KRATOS_CATCH("") } ///WARNING = Cuidado con los nodos que sueltos. Aquellos que no tienen masa double ComputeTimePenalty() { ModelPart& r_model_part = BaseType::GetModelPart(); //ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); NodesArrayType& pNodes = r_model_part.Nodes(); //ElementsArrayType& pElements = r_model_part.Elements(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif Vector Min_Mass_Nodal(number_of_threads); vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); Properties& prop = r_model_part.GetProperties(1); double aux = 0.00; double Penalty = (prop)[YOUNG_MODULUS]; double Mass = (r_model_part.Nodes()(1))->FastGetSolutionStepValue(NODAL_MASS); std::size_t nprop = r_model_part.NumberOfProperties(); for(std::size_t i = 1; i<= nprop; i++) { Properties& prop_aux = r_model_part.GetProperties(i); aux = (prop_aux)[YOUNG_MODULUS]; Penalty = ( Penalty > aux ) ? aux : Penalty; } Penalty = mpenalty_factor * Penalty; for(int k=0; k<number_of_threads; k++) Min_Mass_Nodal[k] = Mass; #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { double& mass = ((i)->FastGetSolutionStepValue(NODAL_MASS)); if(mass==0.00) { KRATOS_WATCH(i-> Id()) KRATOS_THROW_ERROR(std::logic_error,"Detected Nodal Mass with zero value. Please check if the model is created correctly for the current model part",""); } Min_Mass_Nodal[k] = (Min_Mass_Nodal[k] < mass) ? Min_Mass_Nodal[k] : mass; } } Mass = (*std::min_element(Min_Mass_Nodal.begin(), Min_Mass_Nodal.end())); double result = 0.50 * std::sqrt(2.00 * Mass / Penalty); return result; } //*************************************************************************** //*************************************************************************** void ComputeInitialConditions() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); NodesArrayType& pNodes = r_model_part.Nodes(); const double DeltaTime = CurrentProcessInfo[DELTA_TIME]; if(DeltaTime == 0) KRATOS_THROW_ERROR(std::logic_error,"Detected delta_time = 0. Please check if the time step is created correctly for the current model part",""); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); //array_1d<double,3> OldDisplacement; #pragma omp parallel for //private(OldDisplacement) for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; /// debiera calcularse la aceleracion inicial segun libro de Chopra capitulo 5, tabla 5.3.1 ///NOTA antes de hacer update (DISPLACEMENT) == (DISPLACEMENT,1); for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { const array_1d<double,3>& CurrentDisplacement = (i->FastGetSolutionStepValue(DISPLACEMENT)); const array_1d<double,3>& CurrentVelocity = (i->FastGetSolutionStepValue(VELOCITY)); const array_1d<double,3>& CurrentAcceleration = (i->FastGetSolutionStepValue(ACCELERATION)) + (i->FastGetSolutionStepValue(GRAVITY)); array_1d<double,3>& OldDisplacement = (i->FastGetSolutionStepValue(DISPLACEMENT,2)); /// D_1 7.145 Libro de ""Estructuras Sometiadas a Acciones Sismicas" noalias(OldDisplacement) = 0.5*DeltaTime*DeltaTime*CurrentAcceleration - DeltaTime*CurrentVelocity + CurrentDisplacement; } } mInitialConditions = true; KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void GetForce() { KRATOS_TRY /// Set to zero de RHS SetToZeroRHS(); /// Compute the global external nodal force. Calculate_Conditions_RHS_and_Add(); /// Compute the stress and body force of the element. ( No lineal analysis) Calculate_Elements_RHS_and_Add(); ///Calcula las fuerzas internas haciendo K*u por cada elemento //CalculateLHSElement(); ///Calcula la fuerza interna usando la matriz global //CalculateRHSusingGlobalLHS(); KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void SetToZeroRHS() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); NodesArrayType& pNodes = r_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { array_1d<double,3>& normal = (i->FastGetSolutionStepValue(NORMAL)); array_1d<double,3>& node_rhs = (i->FastGetSolutionStepValue(RHS)); noalias(normal) = ZeroVector(3); noalias(node_rhs) = ZeroVector(3); } } KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void CalculateRHSusingGlobalLHS() { KRATOS_TRY typedef Dof<double> TDofType; typedef PointerVectorSet<TDofType, SetIdentityFunction<TDofType> > DofsArrayType; typedef std::vector<std::size_t> EquationIdVectorType; ModelPart& r_model_part = BaseType::GetModelPart(); NodesArrayType& pNodes = r_model_part.Nodes(); DofsArrayType& rDofSet = mpBuilderAndSolver->GetDofSet(); TSystemMatrixType& mA = *mpA; const int& size = mA.size1(); TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); TSystemVectorPointerType pFnew = TSystemVectorPointerType(new TSystemVectorType(0)); mpFint.swap(pFnew); mpDx.swap(pNewDx); TSystemVectorType& mDx = *mpDx; TSystemVectorType& mFint = *mpFint; mFint.resize(size, false); mDx.resize(size, false); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> dof_partition; CreatePartition(number_of_threads, rDofSet.size(), dof_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { DofsArrayType::iterator i_begin = rDofSet.begin()+dof_partition[k]; DofsArrayType::iterator i_end = rDofSet.begin()+dof_partition[k+1]; for(DofsArrayType::iterator i_dof =i_begin; i_dof!= i_end; ++i_dof) { //for(typename DofsArrayType::iterator i_dof = rDofSet.begin() ; i_dof != rDofSet.end() ; ++i_dof) mDx[i_dof->EquationId()] = i_dof->GetSolutionStepValue(); } } TSparseSpace::Mult(mA, mDx, mFint); vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); EquationIdVectorType rResult(mdimension); if(mdimension==2) { #pragma omp parallel for firstprivate(rResult) shared(mFint) for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { array_1d<double,3>& node_rhs = (i)->FastGetSolutionStepValue(RHS); rResult[0] = (i->pGetDof(DISPLACEMENT_X))->EquationId(); rResult[1] = (i->pGetDof(DISPLACEMENT_Y))->EquationId(); node_rhs[0]-= mFint[rResult[0]]; node_rhs[1]-= mFint[rResult[1]]; } } } else { KRATOS_THROW_ERROR(std::logic_error, "CalculateRHSusingGlobalLHS() in Central Differences ", ""); /* #pragma omp parallel for private(rResult) shared(mFint) for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i){ array_1d<double,3>& node_rhs = (i->FastGetSolutionStepValue(RHS)); rResult[0] = i->GetDof(DISPLACEMENT_X ).EquationId(); rResult[1] = i->GetDof( DISPLACEMENT_Y ).EquationId(); rResult[2] = i->GetDof( DISPLACEMENT_Z ).EquationId(); node_rhs[0]-= mFint[rResult[0]]; node_rhs[1]-= mFint[rResult[1]]; node_rhs[1]-= mFint[rResult[2]]; } } */ } Clear(); KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void Calculate_Conditions_RHS_and_Add() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ConditionsArrayType& pConditions = r_model_part.Conditions(); Vector rhs_cond; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> condition_partition; CreatePartition(number_of_threads, pConditions.size(), condition_partition); unsigned int index; #pragma omp parallel for private (index, rhs_cond) for(int k=0; k<number_of_threads; k++) { typename ConditionsArrayType::iterator it_begin=pConditions.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::iterator it_end=pConditions.ptr_begin()+condition_partition[k+1]; for (ConditionsArrayType::iterator it= it_begin; it!=it_end; ++it) { Condition::GeometryType& geom = it->GetGeometry(); it->CalculateRightHandSide(rhs_cond,CurrentProcessInfo); const unsigned int& dim = geom.WorkingSpaceDimension(); for (unsigned int i = 0; i <geom.size(); i++) { index = i*dim; array_1d<double,3>& node_rhs = geom(i)->FastGetSolutionStepValue(RHS); for(unsigned int kk=0; kk<dim; kk++) { geom(i)->SetLock(); node_rhs[kk] = node_rhs[kk] + rhs_cond[index+kk]; geom(i)->UnSetLock(); } } } } KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void CalculateLHSElement() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ElementsArrayType& pElements = r_model_part.Elements(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); Matrix lhs_elem; Vector disp; Vector rhs_elem; unsigned int index; #pragma omp parallel for private (index, disp, rhs_elem, lhs_elem) for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { Element::GeometryType& geom = it->GetGeometry(); const unsigned int& dim = it->GetGeometry().WorkingSpaceDimension(); it->CalculateLeftHandSide(lhs_elem, CurrentProcessInfo); it->GetValuesVector(disp, 0); rhs_elem.resize(disp.size(), false); TSparseSpace::Mult(lhs_elem, disp, rhs_elem); for(unsigned int i = 0; i <geom.size(); i++) { index = i*dim; array_1d<double,3>& node_rhs = geom(i)->FastGetSolutionStepValue(RHS); for(unsigned int kk=0; kk<dim; kk++) { geom(i)->SetLock(); node_rhs[kk] -= rhs_elem[index+kk]; geom(i)->UnSetLock(); } } } } KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** void Calculate_Elements_RHS_and_Add() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ElementsArrayType& pElements = r_model_part.Elements(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); Vector rhs_elem; unsigned int index; #pragma omp parallel for private (index, rhs_elem) for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { Element::GeometryType& geom = it->GetGeometry(); const unsigned int& dim = it->GetGeometry().WorkingSpaceDimension(); it->CalculateRightHandSide(rhs_elem, CurrentProcessInfo); for (unsigned int i = 0; i <geom.size(); i++) { index = i*dim; array_1d<double,3>& node_rhs = geom(i)->FastGetSolutionStepValue(RHS); for(unsigned int kk=0; kk<dim; kk++) { geom(i)->SetLock(); node_rhs[kk] += /*node_rhs[kk]+*/ rhs_elem[index+kk]; geom(i)->UnSetLock(); } } } } KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** /// Encuentra las velocidades y aceleraciones del paso n. void ComputeOldVelocitiesAndAccelerations() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); NodesArrayType& pNodes = r_model_part.Nodes(); const double current_delta_time = CurrentProcessInfo[DELTA_TIME]; const double mid_delta_time = 0.50*(molddelta_time + current_delta_time); const double inv_sum_delta_time = 1.00 / (molddelta_time + current_delta_time); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); array_1d<double,3> mid_neg_velocity; /// V(n-1/2) array_1d<double,3> mid_pos_velocity; /// V(n+1/2) array_1d<double,3> mid_neg_velocity_old; /// V(n-1/2) array_1d<double,3> mid_pos_velocity_old; /// V(n+1/2) #pragma omp parallel for private (mid_pos_velocity, mid_neg_velocity, mid_neg_velocity_old, mid_pos_velocity_old) for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { array_1d<double,3>& normal = i->FastGetSolutionStepValue(NORMAL); noalias(normal) = ZeroVector(3); array_1d<double,3>& displacement = i->FastGetSolutionStepValue(DISPLACEMENT); /// Estamos en paso T(n+1) array_1d<double,3>& current_displacement = i->FastGetSolutionStepValue(DISPLACEMENT,1); /// U(n) array_1d<double,3>& olddisplacement = i->FastGetSolutionStepValue(DISPLACEMENT,2); /// U(n-1) array_1d<double,3>& veryolddisplacement = i->FastGetSolutionStepValue(DISPLACEMENT,3); /// U(n-1) noalias(mid_neg_velocity) = (1.00/molddelta_time) * (current_displacement - olddisplacement); noalias(mid_pos_velocity) = (1.00/current_delta_time) * (displacement - current_displacement); noalias(mid_neg_velocity_old) = (1.00/molddelta_time) * (olddisplacement - veryolddisplacement); noalias(mid_pos_velocity_old) = (1.00/current_delta_time) * (current_displacement - olddisplacement); array_1d<double,3>& velocity = (i->FastGetSolutionStepValue(VELOCITY)); array_1d<double,3>& acceleration = (i->FastGetSolutionStepValue(ACCELERATION)); //KRATOS_WATCH(velocity) /// X if( (i->pGetDof(DISPLACEMENT_X))->IsFixed() == false) { //if(velocity[0]==0.00) { velocity[0] = inv_sum_delta_time * (displacement[0] - olddisplacement[0]); acceleration[0] = (1.00 / mid_delta_time ) * (mid_pos_velocity[0] - mid_neg_velocity[0]); } } else { //velocity[0] = inv_sum_delta_time * (displacement[0] - olddisplacement[0]); //acceleration[0] = (1.00 / mid_delta_time ) * (mid_pos_velocity[0] - mid_neg_velocity[0]); velocity[0] = inv_sum_delta_time * (current_displacement[0] - veryolddisplacement[0]); acceleration[0] = (1.00 / mid_delta_time ) * (mid_pos_velocity_old[0] - mid_neg_velocity_old[0]); } if( i->pGetDof(DISPLACEMENT_Y)->IsFixed() == false ) { //if(velocity[1]==0.00) { velocity[1] = inv_sum_delta_time * (displacement[1] - olddisplacement[1]); acceleration[1] = (1.00 / mid_delta_time ) * (mid_pos_velocity[1] - mid_neg_velocity[1]); } } else { //velocity[1] = inv_sum_delta_time * (displacement[1] - olddisplacement[1]); //acceleration[1] = (1.00 / mid_delta_time ) * (mid_pos_velocity[1] - mid_neg_velocity[1]); velocity[1] = inv_sum_delta_time * (current_displacement[1] - veryolddisplacement[1]); acceleration[1] = (1.00 / mid_delta_time ) * (mid_pos_velocity_old[1] - mid_neg_velocity_old[1]); } /// Z if( i->HasDofFor(DISPLACEMENT_Z)) { if( i->pGetDof(DISPLACEMENT_Z)->IsFixed() == false ) { //if(velocity[2]==0.00) { velocity[2] = inv_sum_delta_time * (displacement[2] - olddisplacement[2]); acceleration[2] = (1.00 / mid_delta_time ) * (mid_pos_velocity[2] - mid_neg_velocity[2]); } } else { //velocity[2] = inv_sum_delta_time * (displacement[2] - olddisplacement[2]); //acceleration[2] = (1.00 / mid_delta_time ) * (mid_pos_velocity[2] - mid_neg_velocity[2]); velocity[2] = inv_sum_delta_time * (current_displacement[2] - veryolddisplacement[2]); acceleration[2] = (1.00 / mid_delta_time ) * (mid_pos_velocity_old[2] - mid_neg_velocity_old[2]); } } //KRATOS_WATCH(velocity) //KRATOS_WATCH("-----------------------") // if(i->Id()==374) // KRATOS_WATCH(velocity) } } KRATOS_CATCH("") } /// Computes the contact force and displacement corrections void Update(const ConditionsContainerIterator& end_previos, const ConditionsContainerIterator& end_actual) { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); const double current_delta_time = CurrentProcessInfo[DELTA_TIME]; double midtimestep = 0.50*(molddelta_time + current_delta_time); mpLagrangianMultiplier->CalculateContactForceAndDisplacementCorrections(malpha_damp, midtimestep, end_previos, end_actual); KRATOS_CATCH("") return; } void FinalizeSolutionStep() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); ElementsArrayType& pElements = r_model_part.Elements(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); ConditionsArrayType& pConditions = r_model_part.Conditions(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; vector<unsigned int> condition_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); CreatePartition(number_of_threads, pConditions.size(), condition_partition); molddelta_time = CurrentProcessInfo[DELTA_TIME]; #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { typename ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { (it) -> GetValue(IS_TARGET)=false; (it) -> FinalizeSolutionStep(CurrentProcessInfo); } } #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { typename ConditionsArrayType::iterator it_begin=pConditions.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::iterator it_end=pConditions.ptr_begin()+condition_partition[k+1]; for (ConditionsArrayType::iterator it= it_begin; it!=it_end; ++it) { (it) -> FinalizeSolutionStep(CurrentProcessInfo); } } KRATOS_CATCH("") } void ChangeContactConditions(const bool& contact) { KRATOS_TRY mComputeContactConditions = contact; KRATOS_CATCH("") } // Crea nuevamente las condiciones de contacto. // WARNING = Hay que evitar que borre las existentes void RecalculateBoundaryContours(const bool& rflag) { KRATOS_TRY std::cout<<"RECOMPUTING BOUNDARY CONTOURS " << std::endl; mpBCCU_Pointer->ResetFlagComputeBoundaryContour(rflag); KRATOS_CATCH("") } void ChangeFractionDeltaTime(const double& new_fraction_delta_time) { KRATOS_TRY mfraction_delta_time = new_fraction_delta_time; KRATOS_CATCH("") } void CalculateReaction() { ModelPart& r_model_part = BaseType::GetModelPart(); NodesArrayType& pNodes = r_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) //for(ModelPart::NodeIterator i = r_model_part.NodesBegin() ; i != r_model_part.NodesEnd() ; ++i) { array_1d<double,3>& reaction = (i->FastGetSolutionStepValue(REACTION)); const double& mass = (i->FastGetSolutionStepValue(NODAL_MASS)); const array_1d<double,3>& rhs = (i->FastGetSolutionStepValue(RHS)); const array_1d<double,3>& acceleration = (i->FastGetSolutionStepValue(ACCELERATION)); const array_1d<double,3>& velocity = (i->FastGetSolutionStepValue(VELOCITY)); //array_1d<double,3> dif = rhs; if( (i->pGetDof(DISPLACEMENT_X))->IsFixed() == true) { reaction[0] = -rhs[0] + mass * acceleration[0] + mass * malpha_damp * velocity[0]; } if( i->pGetDof(DISPLACEMENT_Y)->IsFixed() == true ) { reaction[1] = -rhs[1] + mass * acceleration[1] + mass * malpha_damp * velocity[1]; } if( i->HasDofFor(DISPLACEMENT_Z)) { if( i->pGetDof(DISPLACEMENT_Z)->IsFixed() == true ) { reaction[2] = -rhs[2] + mass * acceleration[2] + mass * malpha_damp * velocity[2]; } } } } } private: unsigned int mdimension; unsigned int minitial_conditions_size; unsigned int mcontact_conditions_size; bool mInitialCalculations; bool mElementsAreInitialized; bool mConditionsAreInitialized; bool mInitialConditions; bool mCalculateReactionsFlag; bool mReformDofSetAtEachStep; bool mInitializeWasPerformed; bool mComputeContactConditions; bool mCalculateOldTime; bool mSolutionStepIsInitialized; bool mClearContactConditions; bool mLocalSearchInitialize; bool mdamping_coeficients; bool mComputeTime; double mdamping_ratio; double malpha_damp; double mbeta_damp; double mfraction_delta_time; double mmax_delta_time; double molddelta_time; double mtimestep; /// la suma de los delta time double mpenalty_factor; Constraint_Enforcement mCE; Disconnect_Triangle_Utilities mDTU; TSystemMatrixPointerType mpA; TSystemVectorPointerType mpDx; TSystemVectorPointerType mpFint; /// Contact Condition Iterations typename BoundaryAndContactType::Pointer mpBCCU_Pointer; typename LagrangeMultiplierType::Pointer mpLagrangianMultiplier; typename TBuilderAndSolverType::Pointer mpBuilderAndSolver; typename TLinearSolver::Pointer mpLinearSolver; typename TSchemeType::Pointer mpScheme; //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(unsigned int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } inline double Truncar_Delta_Time(double& num) { bool trunc = false; double num_trucado = num; unsigned long int a = 1; unsigned long int i = 10; if(num!=0.00) { while(trunc==false) { num_trucado = num_trucado*i; a = a*i; if(num_trucado >= 1) { num_trucado = static_cast<long unsigned int>(num_trucado); num = num_trucado/a; trunc = true; } } } return num; } void CalculateEnergies() { CalculatePotecialEnergy(); CalculateKineticEnergy(); //CalculateDeformationEnergy(); } void CalculateDeformationEnergy() { } /// Asumimos que la gravedad esta en direccion Y void CalculatePotecialEnergy() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); NodesArrayType& pNodes = r_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); double h_efe; #pragma omp parallel for private(h_efe) for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { if(i->Y()>0.00) { double& y_coord = i->Y0(); const double& displ = i->FastGetSolutionStepValue(DISPLACEMENT_Y); const double& gravity = i->FastGetSolutionStepValue(GRAVITY_Y); const double& nodal_mass = i->FastGetSolutionStepValue(NODAL_MASS); double& potencial_energy = i->FastGetSolutionStepValue(POTENCIAL_ENERGY); potencial_energy = 0.00; h_efe = y_coord + displ; potencial_energy = std::fabs(nodal_mass * gravity * h_efe ); } } } KRATOS_CATCH("") } void CalculateKineticEnergy() { KRATOS_TRY ModelPart& r_model_part = BaseType::GetModelPart(); NodesArrayType& pNodes = r_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); double vel = 0.00; //double h = 0.1; #pragma omp parallel for private(vel) for(int k=0; k<number_of_threads; k++) { typename NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; typename NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { array_1d<double,3>& Velocity = i->FastGetSolutionStepValue(VELOCITY); double& mass = i->FastGetSolutionStepValue(NODAL_MASS); double& kinetic_energy = i->FastGetSolutionStepValue(KINETIC_ENERGY); vel = norm_2(Velocity); /// WARNING = solo valido para matriz de masa diagonal /// Ek = 0.50 * V^t * M * V kinetic_energy = 0.50 * mass * vel * vel; } } KRATOS_CATCH("") } void Heuristic_Formula(std::vector<Joint2D>::iterator Begin, std::vector<Joint2D>::iterator End) { KRATOS_TRY double dpefa = 0.63; double dpefb = 1.8; double dpefc = 6.0; double dpefm = 0.0; double small,sabs,o,s,o1,o2,s1,s2,op,sp,ot,st,z,sigma,tau; double e1x,e1y,h,area; int nfail; //int nsoft; double d1nccx[4]; double d1nccy[4]; ModelPart& r_model_part = BaseType::GetModelPart(); Properties& prop = r_model_part.GetProperties(1); const double& dpeft = (prop)[TENSILE_STRENGTH]; const double dpepe = mpenalty_factor * (prop)[YOUNG_MODULUS]; const double& dpefs = (prop)[SHEAR_STRENGTH]; const double& dpegf = (prop)[FRACTURE_ENERGY]; small=1E-9; //nsoft=0; for(std::vector<Joint2D>::iterator Joint = Begin; Joint != End; ++Joint) { if((Joint)->IsFail()==false) { array_1d<double,3>& node_rhs_0 = (*Joint)[0]->FastGetSolutionStepValue(RHS); array_1d<double,3>& node_rhs_1 = (*Joint)[1]->FastGetSolutionStepValue(RHS); array_1d<double,3>& node_rhs_2 = (*Joint)[2]->FastGetSolutionStepValue(RHS); array_1d<double,3>& node_rhs_3 = (*Joint)[3]->FastGetSolutionStepValue(RHS); d1nccx[0] = (*Joint)[0]->X(); d1nccx[1] = (*Joint)[1]->X(); d1nccx[2] = (*Joint)[2]->X(); d1nccx[3] = (*Joint)[3]->X(); d1nccy[0] = (*Joint)[0]->Y(); d1nccy[1] = (*Joint)[1]->Y(); d1nccy[2] = (*Joint)[2]->Y(); d1nccy[3] = (*Joint)[3]->Y(); e1x=0.50*(d1nccx[1]+d1nccx[2]-d1nccx[0]-d1nccx[3]); e1y=0.50*(d1nccy[1]+d1nccy[2]-d1nccy[0]-d1nccy[3]); h=std::sqrt(e1x*e1x+e1y*e1y); e1x=e1x/(h+small); e1y=e1y/(h+small); s1=(d1nccy[0]-d1nccy[3])*e1y+(d1nccx[0]-d1nccx[3])*e1x; s2=(d1nccy[1]-d1nccy[2])*e1y+(d1nccx[1]-d1nccx[2])*e1x; o1=(d1nccy[0]-d1nccy[3])*e1x-(d1nccx[0]-d1nccx[3])*e1y; o2=(d1nccy[1]-d1nccy[2])*e1x-(d1nccx[1]-d1nccx[2])*e1y; op=2.00*h*dpeft/dpepe; sp=2.00*h*dpefs/dpepe; ot=std::max((2.00*op),(3.00*dpegf/dpeft)); st=std::max((2.00*sp),(3.00*dpegf/dpefs)); //nfail=0; for(int integ=0; integ<3; integ++) { if(integ==0) { o=o1; s=s1; } else if(integ==2) { o=o2; s=s2; } else { o=0.50*(o1+o2); s=0.50*(s1+s2); } sabs=std::fabs(s); if((o>op)&&(sabs>sp)) { z=std::sqrt(((o-op)/ot)*((o-op)/ot)+((sabs-sp)/st)*((sabs-sp)/st)); } else if(o>op) { z=(o-op)/ot; } else if(sabs>sp) { z=(sabs-sp)/st; } else { z=0.00; } if(z>=1.00) { nfail=nfail+1; if((nfail>1)) (Joint)->SetFail(); z=1.00; } z=(1.00 - ((dpefa+dpefb-1.00)/(dpefa+dpefb))*exp(z*(dpefa+dpefc*dpefb)/((dpefa+dpefb)*(1.00-dpefa-dpefb))))*(dpefa*(1.00-z)+dpefb*pow((1.00-z),dpefc)); if(o<0.00) /* normal stress*/ { sigma=2.00*o*dpeft/op; /* sigma=R0; */ } else if(o>op) { sigma=dpeft*z; //nsoft=nsoft+1; } else { sigma=(2.00*o/op-(o/op)*(o/op))*z*dpeft; } if((sigma>0.00)&&(sabs>sp)) /* shear stress */ { tau=z*dpefs; } else if(sigma>0.00) { tau=(2.00*(sabs/sp)-(sabs/sp)*(sabs/sp))*z*dpefs; } else if(sabs>sp) { tau=z*dpefs-dpefm*sigma; } else { tau=(2.00*(sabs/sp)-(sabs/sp)*(sabs/sp))*(z*dpefs-dpefm*sigma); } if(s<0.00)tau=-tau; if(integ==0) /* nodal forces */ { area=h/6.00; /* area=h/6.0; */ node_rhs_0[0] = node_rhs_0[0] - area*(tau*e1x-sigma*e1y); node_rhs_0[1] = node_rhs_0[1] - area*(tau*e1y+sigma*e1x); node_rhs_3[0] = node_rhs_3[0] + area*(tau*e1x-sigma*e1y); node_rhs_3[1] = node_rhs_3[1] + area*(tau*e1y+sigma*e1x); } else if(integ==1) { area=h/3.00; /* area=h/3.0; */ node_rhs_0[0] = node_rhs_0[0]-area*(tau*e1x-sigma*e1y); node_rhs_0[1] = node_rhs_0[1]-area*(tau*e1y+sigma*e1x); node_rhs_3[1] = node_rhs_3[1]+area*(tau*e1y+sigma*e1x); node_rhs_3[0] = node_rhs_3[0]+area*(tau*e1x-sigma*e1y); node_rhs_1[0] = node_rhs_1[0]-area*(tau*e1x-sigma*e1y); node_rhs_1[1] = node_rhs_1[1]-area*(tau*e1y+sigma*e1x); node_rhs_2[1] = node_rhs_2[1]+area*(tau*e1y+sigma*e1x); node_rhs_2[0] = node_rhs_2[0]+area*(tau*e1x-sigma*e1y); } else { area=h/6.00; /* area=h/6.0; */ node_rhs_1[0]=node_rhs_1[0]-area*(tau*e1x-sigma*e1y); node_rhs_1[1]=node_rhs_1[1]-area*(tau*e1y+sigma*e1x); node_rhs_2[1]=node_rhs_2[1]+area*(tau*e1y+sigma*e1x); node_rhs_2[0]=node_rhs_2[0]+area*(tau*e1x-sigma*e1y); } } } } KRATOS_CATCH("") } }; } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_CENTRAL_DIFERENCES_STRATEGY */
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(8*t1+Ny+13,16)),floord(16*t2+Ny+12,16)),floord(16*t1-16*t2+Nz+Ny+11,16));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1020,1024)),ceild(16*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(8*t1+Nx+13,1024)),floord(16*t2+Nx+12,1024)),floord(16*t3+Nx+12,1024)),floord(16*t1-16*t2+Nz+Nx+11,1024));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),16*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),16*t3+14),1024*t4+1022),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ch_common.c
#define MAIN #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <assert.h> #include "ch_common.h" #include "cholesky.h" static void get_block_rank(int *block_rank, int nt); void omp_potrf(double * const A, int ts, int ld) { static int INFO; static const char L = 'L'; dpotrf_(&L, &ts, A, &ld, &INFO); } void omp_trsm(double *A, double *B, int ts, int ld) { static char LO = 'L', TR = 'T', NU = 'N', RI = 'R'; static double DONE = 1.0; dtrsm_(&RI, &LO, &TR, &NU, &ts, &ts, &DONE, A, &ld, B, &ld ); } void omp_gemm(double *A, double *B, double *C, int ts, int ld) { static const char TR = 'T', NT = 'N'; static double DONE = 1.0, DMONE = -1.0; dgemm_(&NT, &TR, &ts, &ts, &ts, &DMONE, A, &ld, B, &ld, &DONE, C, &ld); } void omp_syrk(double *A, double *B, int ts, int ld) { static char LO = 'L', NT = 'N'; static double DONE = 1.0, DMONE = -1.0; dsyrk_(&LO, &NT, &ts, &ts, &DMONE, A, &ld, &DONE, B, &ld ); } void cholesky_single(const int ts, const int nt, double* A[nt][nt]) { for (int k = 0; k < nt; k++) { #pragma omp task depend(out: A[k][k]) { omp_potrf(A[k][k], ts, ts); #ifdef DEBUG if (mype == 0) printf("potrf:out:A[%d][%d]\n", k, k); #endif } for (int i = k + 1; i < nt; i++) { #pragma omp task depend(in: A[k][k]) depend(out: A[k][i]) { omp_trsm(A[k][k], A[k][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("trsm :in:A[%d][%d]:out:A[%d][%d]\n", k, k, k, i); #endif } } for (int i = k + 1; i < nt; i++) { for (int j = k + 1; j < i; j++) { #pragma omp task depend(in: A[k][i], A[k][j]) depend(out: A[j][i]) { omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("gemm :in:A[%d][%d]:A[%d][%d]:out:A[%d][%d]\n", k, i, k, j, j, i); #endif } } #pragma omp task depend(in: A[k][i]) depend(out: A[i][i]) { omp_syrk(A[k][i], A[i][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("syrk :in:A[%d][%d]:out:A[%d][%d]\n", k, i, i, i); #endif } } } #pragma omp taskwait } inline void reset_send_flags(char *send_flags) { for (int i = 0; i < np; i++) send_flags[i] = 0; } int main(int argc, char *argv[]) { /* MPI Initialize */ int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); if (provided != MPI_THREAD_MULTIPLE) { printf("This Compiler does not support MPI_THREAD_MULTIPLE\n"); exit(0); } MPI_Comm_rank(MPI_COMM_WORLD, &mype); MPI_Comm_size(MPI_COMM_WORLD, &np); /* cholesky init */ const char *result[3] = {"n/a","successful","UNSUCCESSFUL"}; const double eps = BLAS_dfpinfo(blas_eps); if (argc < 4) { printf("cholesky matrix_size block_size check\n"); exit(-1); } const int n = atoi(argv[1]); // matrix size const int ts = atoi(argv[2]); // tile size int check = atoi(argv[3]); // check result? const int nt = n / ts; if (mype == 0) printf("nt = %d, ts = %d\n", nt, ts); /* Set block rank */ int *block_rank = malloc(nt * nt * sizeof(int)); get_block_rank(block_rank, nt); #ifdef DEBUG if (mype == 0) { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { printf("%d ", block_rank[i * nt + j]); } printf("\n"); } } #endif double *A[nt][nt], *B, *C[nt], *Ans[nt][nt]; #pragma omp parallel { #pragma omp single { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { #pragma omp task depend(out: A[i][j]) shared(Ans, A) { if (check) { MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &Ans[i][j]); initialize_tile(ts, Ans[i][j]); } if (block_rank[i*nt+j] == mype) { MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &A[i][j]); if (!check) { initialize_tile(ts, A[i][j]); } else { for (int k = 0; k < ts * ts; k++) { A[i][j][k] = Ans[i][j][k]; } } } } } #pragma omp task depend(inout: A[i][i]) shared(Ans, A) { // add to diagonal if (check) { Ans[i][i][i*ts+i] = (double)nt; } if (block_rank[i*nt+i] == mype) { A[i][i][i*ts+i] = (double)nt; } } } } // omp single } // omp parallel MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &B); for (int i = 0; i < nt; i++) { MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &C[i]); } #pragma omp single num_threads = omp_get_num_threads(); const float t3 = get_time(); if (check) cholesky_single(ts, nt, (double* (*)[nt]) Ans); const float t4 = get_time() - t3; MPI_Barrier(MPI_COMM_WORLD); if (mype == 0) printf("Starting parallel computation\n"); const float t1 = get_time(); cholesky_mpi(ts, nt, (double* (*)[nt])A, B, C, block_rank); const float t2 = get_time() - t1; if (mype == 0) printf("Finished parallel computation\n"); MPI_Barrier(MPI_COMM_WORLD); /* Verification */ if (check) { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { if (block_rank[i * nt + j] == mype) { for (int k = 0; k < ts*ts; k++) { if (Ans[i][j][k] != A[i][j][k]) check = 2; } } } } } float time_mpi = t2; float gflops_mpi = (((1.0 / 3.0) * n * n * n) / ((time_mpi) * 1.0e+9)); float time_ser = t4; float gflops_ser = (((1.0 / 3.0) * n * n * n) / ((time_ser) * 1.0e+9)); printf("test:%s-%d-%d-%d:mype:%2d:np:%2d:threads:%2d:result:%s:gflops:%f:time:%f:gflops_ser:%f:time_ser:%f\n", argv[0], n, ts, num_threads, mype, np, num_threads, result[check], gflops_mpi, t2, gflops_ser, t4); for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { if (block_rank[i*nt+j] == mype) { free(A[i][j]); } if (check) free(Ans[i][j]); } free(C[i]); } free(B); free(block_rank); MPI_Finalize(); return 0; } static void get_block_rank(int *block_rank, int nt) { int row, col; row = col = np; if (np != 1) { while (1) { row = row / 2; if (row * col == np) break; col = col / 2; if (row * col == np) break; } } if (mype == 0) printf("row = %d, col = %d\n", row, col); int i, j, tmp_rank = 0, offset = 0; for (i = 0; i < nt; i++) { for (j = 0; j < nt; j++) { block_rank[i*nt + j] = tmp_rank + offset; tmp_rank++; if (tmp_rank >= col) tmp_rank = 0; } tmp_rank = 0; offset = (offset + col >= np) ? 0 : offset + col; } }
eavlInfoTopologyScatterMapOp.h
// Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information. #ifndef EAVL_INFO_TOPOLOGY_SCATTER_MAP_OP_H #define EAVL_INFO_TOPOLOGY_SCATTER_MAP_OP_H #include "eavlCUDA.h" #include "eavlCellSet.h" #include "eavlCellSetExplicit.h" #include "eavlCellSetAllStructured.h" #include "eavlDataSet.h" #include "eavlArray.h" #include "eavlOpDispatch.h" #include "eavlOperation.h" #include "eavlTopology.h" #include "eavlException.h" #include <time.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #ifndef DOXYGEN template <class CONN> struct eavlInfoTopologyScatterMapOp_CPU { static inline eavlArray::Location location() { return eavlArray::HOST; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, INDEX indices, F &functor) { int *sparseindices = get<0>(indices).array; #pragma omp parallel for for (int denseindex = 0; denseindex < nitems; ++denseindex) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int shapeType = conn.GetShapeType(sparseindex); collect(sparseindex, outputs) = functor(shapeType, collect(denseindex, inputs)); } } }; #if defined __CUDACC__ template <class CONN, class F, class IN, class OUT, class INDEX> __global__ void eavlInfoTopologyScatterMapOp_kernel(int nitems, CONN conn, const IN inputs, OUT outputs, INDEX indices, F functor) { int *sparseindices = get<0>(indices).array; const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int shapeType = conn.GetShapeType(sparseindex); collect(sparseindex, outputs) = functor(shapeType, collect(denseindex, inputs)); } } template <class CONN> struct eavlInfoTopologyScatterMapOp_GPU { static inline eavlArray::Location location() { return eavlArray::DEVICE; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, INDEX indices, F &functor) { int numThreads = 256; dim3 threads(numThreads, 1, 1); dim3 blocks (32, 1, 1); eavlInfoTopologyScatterMapOp_kernel<<< blocks, threads >>>(nitems, conn, inputs, outputs, indices, functor); CUDA_CHECK_ERROR(); } }; #endif #endif // **************************************************************************** // Class: eavlInfoTopologyScatterMapOp // // Purpose: /// Map from one element in a mesh to the same element, with /// topological information passed along to the functor. /// In this scatter version of the operation, the inputs on the destination /// topology are densely indexed (0 to n-1), and the outputs are /// sparsely indexed by the index array. // // Programmer: Jeremy Meredith // Creation: August 1, 2013 // // Modifications: // **************************************************************************** template <class I, class O, class INDEX, class F> class eavlInfoTopologyScatterMapOp : public eavlOperation { protected: eavlCellSet *cells; eavlTopology topology; I inputs; O outputs; INDEX indices; F functor; public: eavlInfoTopologyScatterMapOp(eavlCellSet *c, eavlTopology t, I i, O o, INDEX ind, F f) : cells(c), topology(t), inputs(i), outputs(o), indices(ind), functor(f) { } virtual void GoCPU() { eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); eavlOpDispatch<eavlInfoTopologyScatterMapOp_CPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlInfoTopologyScatterMapOp_CPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor); } } virtual void GoGPU() { #ifdef HAVE_CUDA eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); conn.shapetype.NeedOnDevice(); conn.connectivity.NeedOnDevice(); conn.mapCellToIndex.NeedOnDevice(); eavlOpDispatch<eavlInfoTopologyScatterMapOp_GPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor); conn.shapetype.NeedOnHost(); conn.connectivity.NeedOnHost(); conn.mapCellToIndex.NeedOnHost(); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlInfoTopologyScatterMapOp_GPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor); } #else THROW(eavlException,"Executing GPU code without compiling under CUDA compiler."); #endif } }; // helper function for type deduction template <class I, class O, class INDEX, class F> eavlInfoTopologyScatterMapOp<I,O,INDEX,F> *new_eavlInfoTopologyScatterMapOp(eavlCellSet *c, eavlTopology t, I i, O o, INDEX indices, F f) { return new eavlInfoTopologyScatterMapOp<I,O,INDEX,F>(c,t,i,o,indices,f); } #endif
softmax_hcl_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: [email protected] */ #include "softmax_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "utility/sys_port.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> #include <arm_neon.h> static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[0] != output_tensor->dims[0] || input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static inline float32x4_t vexpq10_f32(float32x4_t x) { x = vmlaq_n_f32(vdupq_n_f32(1.0f), x, 0.0009765625f); // n = 10 x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); return x; } static void GetMaxArray(float* input, float* array, int in_size, int on_size, int num_thread) { float* input_ptr = (float*)input; float* array_ptr = (float*)array; memset(array, 0, in_size * sizeof(float)); // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (in_size & -4); i += 4) { float32x4_t _p = vld1q_f32(array_ptr + i); float32x4_t _in = vld1q_f32(input_ptr + j * in_size + i); #ifdef __aarch64__ _p = vpmaxq_f32(_p, _in); #else _p = vmaxq_f32(_p, vrev64q_f32(_in)); _p = vmaxq_f32(_p, vextq_f32(_p, _in, 2)); #endif vst1q_f32(array_ptr + i, _p); } for (int i = in_size & ~3; i < in_size; i++) { if (array_ptr[i] < input_ptr[j * in_size + i]) array_ptr[i] = input_ptr[j * in_size + i]; } /* for(int l = 0; l < in_size; l++) { if(array_ptr[l] < input_ptr[j * in_size + l]) array_ptr[l] = input_ptr[j * in_size + l]; } */ } } static void GetOutResult(float* input, float* output, float* maxarray, float* sum_array, int in_size, int on_size, int num_thread) { float* input_ptr = (float*)input; float* output_ptr = (float*)output; float* maxarray_ptr = (float*)maxarray; float* sum_array_ptr = (float*)sum_array; memset(sum_array, 0x0, in_size * sizeof(float)); /* get the exp and the summary */ // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (in_size & -4); i += 4) { int index = j * in_size + i; float32x4_t out = vexpq10_f32(vsubq_f32(vld1q_f32(input_ptr + index), vld1q_f32(maxarray_ptr + i))); float32x4_t sum = vaddq_f32(vld1q_f32(sum_array_ptr + i), out); vst1q_f32(output_ptr + index, out); vst1q_f32(sum_array_ptr + i, sum); } for (int i = in_size & ~3; i < in_size; i++) { int index = j * in_size + i; output_ptr[index] = exp(input_ptr[index] - maxarray_ptr[i]); sum_array_ptr[i] += output_ptr[index]; } } /* for(int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] = exp(input_ptr[index] - array_ptr[l]); sum_array_ptr[l] += output_ptr[index]; } */ /* the final result */ for (int j = 0; j < on_size; j++) for (int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] /= sum_array_ptr[l]; } } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct softmax_param* softmax_param = (struct softmax_param*)ir_node->op.param_mem; int element_size = input_tensor->elem_size; int dims[4]; for (int i = 0; i < input_tensor->dim_num; i++) { dims[i] = input_tensor->dims[i]; } int axis = softmax_param->axis; int out_size, in_size, on_size; out_size = 1; for (int i = 0; i < axis; i++) { out_size *= dims[i]; } in_size = 1; for (size_t i = axis + 1; i < input_tensor->dim_num; i++) { in_size *= dims[i]; } on_size = dims[axis]; uint8_t* input = input_tensor->data; uint8_t* output = output_tensor->data; float* max_array = (float*)malloc(in_size * sizeof(float)); float* sum_array = (float*)malloc(in_size * sizeof(float)); int on_in_size = on_size * in_size; float* input_f = NULL; float* output_f = NULL; if (element_size == 1) { input_f = (float*)malloc(on_in_size * 4); output_f = (float*)malloc(on_in_size * 4); /* todo */ free(input_f); free(output_f); } for (int i = 0; i < out_size; i++) { /* get max */ int img_base = i * on_in_size * element_size; GetMaxArray((float*)(input + img_base), max_array, in_size, on_size, exec_graph->num_thread); GetOutResult((float*)(input + img_base), (float*)(output + img_base), max_array, sum_array, in_size, on_size, exec_graph->num_thread); } free(max_array); free(sum_array); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); /* todo support uint8 */ if (input_tensor->data_type != TENGINE_DT_FP32) return 0; return OPS_SCORE_BEST; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_softmax_hcl_arm_op() { return register_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); } int unregister_softmax_hcl_arm_op() { return unregister_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); }
tutorial_region_prof.c
/* * Copyright (c) 2015 - 2022, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <stdint.h> #include <mpi.h> #ifdef _OPENMP #include <omp.h> #endif #include <geopm.h> #include "tutorial_region.h" #ifdef _OPENMP static int stream_profiled_omp(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; int err = 0; #pragma omp parallel for for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } geopm_tprof_post(); } #pragma omp parallel for for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } return err; } #else static int stream_profiled_serial(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; geopm_tprof_init(num_block); for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } geopm_tprof_post(); } for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } return 0; } #endif int tutorial_stream_profiled(double big_o, int do_report) { int err = 0; if (big_o != 0.0) { size_t cline_size = 64; size_t num_stream = (size_t)big_o * 500000000; size_t mem_size = sizeof(double) * num_stream; double *a = NULL; double *b = NULL; double *c = NULL; double scalar = 3.0; uint64_t stream_rid; if (!err) { err = geopm_prof_region("tutorial_stream", GEOPM_REGION_HINT_MEMORY, &stream_rid); } err = posix_memalign((void *)&a, cline_size, mem_size); if (!err) { err = posix_memalign((void *)&b, cline_size, mem_size); } if (!err) { err = posix_memalign((void *)&c, cline_size, mem_size); } if (!err) { #pragma omp parallel for for (int i = 0; i < num_stream; i++) { a[i] = 0.0; b[i] = 1.0; c[i] = 2.0; } if (do_report) { printf("Executing profiled STREAM triad on length %ld vectors.\n", num_stream); fflush(stdout); } err = geopm_prof_enter(stream_rid); } if (!err) { #ifdef _OPENMP err = stream_profiled_omp(stream_rid, num_stream, scalar, a, b, c); #else err = stream_profiled_serial(stream_rid, num_stream, scalar, a, b, c); #endif } if (!err) { err = geopm_prof_exit(stream_rid); } if (!err) { free(c); free(b); free(a); } } return err; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) { for (t4=max(max(ceild(t1-28,32),ceild(4*t2-Nz-51,64)),ceild(8*t3-Ny-51,64));t4<=min(min(min(floord(4*Nt+Nx-9,64),floord(2*t1+Nx-3,64)),floord(4*t2+Nx-9,64)),floord(8*t3+Nx-5,64));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(64*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_unop__one_int16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__one_int16_int16) // op(A') function: GB (_unop_tran__one_int16_int16) // C type: int16_t // A type: int16_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__one_int16_int16) ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = 1 ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__one_int16_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following counters/updates/finals // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters, // PrivateCounters, Inits, // Updates and Finals } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the counters/finals/updates arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } const Stmt *getBody() const { // This relies on the loop form is already checked by Sema. const Stmt *Body = getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) { Body = Body->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); } return Body; } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, StartLoc, EndLoc, 0, 1) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, SourceLocation(),SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
Example_reduction.2.c
/* * @@name: reduction.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ #include <limits.h> #include <math.h> void reduction2(float *x, int *y, int n) { int i, b, b_p, c, c_p; float a, a_p, d, d_p; a = 0.0f; b = 0; c = y[0]; d = x[0]; #pragma omp parallel shared(a, b, c, d, x, y, n) \ private(a_p, b_p, c_p, d_p) { a_p = 0.0f; b_p = 0; c_p = INT_MAX; d_p = -HUGE_VALF; #pragma omp for private(i) for (i=0; i<n; i++) { a_p += x[i]; b_p ^= y[i]; if (c_p > y[i]) c_p = y[i]; d_p = fmaxf(d_p,x[i]); } #pragma omp critical { a += a_p; b ^= b_p; if( c > c_p ) c = c_p; d = fmaxf(d,d_p); } } }
sa.c
#include "common.h" static void restore_edge(const int groups, const int kind_opt, int* restrict edge, int* restrict restored_line, const int* restrict restored_edge) { if(kind_opt != D_1G_OPT && kind_opt != D_2G_OPT) ERROR("Wrong kind_opt: %d\n", kind_opt); #pragma omp parallel for for(int i=0;i<groups*kind_opt;i++){ edge[restored_line[i]*2 ] = restored_edge[i*2 ]; edge[restored_line[i]*2+1] = restored_edge[i*2+1]; } } static void restore_adj(const int degree, const int groups, int* restrict adj, const int kind_opt, int* restrict restored_adj_value, int* restrict restored_adj_idx_y, int* restrict restored_adj_idx_x) { if(kind_opt != D_1G_OPT && kind_opt != D_2G_OPT) ERROR("Wrong kind_opt: %d\n", kind_opt); #pragma omp parallel for for(int i=0;i<kind_opt*groups*2;i++){ int y = restored_adj_idx_y[i]; int x = restored_adj_idx_x[i]; adj[y*degree+x] = restored_adj_value[i]; } } static void copy_edge(int *restrict dst, const int *restrict src, const int n) { #pragma omp parallel for for(int i=0;i<n;i++) dst[i] = src[i]; } static double uniform_rand() { return ((double)random()+1.0)/((double)RAND_MAX+2.0); } static void print_result_header() { PRINT_R0(" Times\t Temp\tCur. ASPL GAP\t\tBest ASPL GAP\t\t"); PRINT_R0("Cur. Dia. GAP\t\tBest Dia. GAP\tAccept Rate\n"); } static void print_results(const long long num, const double temp, const double current_ASPL, const double best_ASPL, const double low_ASPL, const int current_diam, const int best_diam, const int low_diam, const long long accepts, const long long rejects) { PRINT_R0("%8lld\t%f\t", num, temp); PRINT_R0("%f ( %f )\t%f ( %f )\t%d ( %d )\t\t\t%d ( %d )\t\t", current_ASPL, current_ASPL-low_ASPL, best_ASPL, best_ASPL-low_ASPL, current_diam, current_diam-low_diam, best_diam, best_diam-low_diam); if(num != 0) PRINT_R0("%.4f ( %lld / %lld )\n", (double)accepts/(accepts+rejects), accepts, (accepts+rejects)); else PRINT_R0("-\n"); } void create_adj(const int nodes, const int lines, const int degree, const int edge[lines][2], int adj[nodes][degree]) { int count[nodes]; for(int i=0;i<nodes;i++) count[i] = 0; for(int i=0;i<lines;i++){ int n1 = edge[i][0]; int n2 = edge[i][1]; adj[n1][count[n1]++] = n2; adj[n2][count[n2]++] = n1; } } #define CENTER_VERTEX -1 int distance(int nodes, const int a, const int b, const int added_centers) { if(a >= nodes-added_centers || b >= nodes-added_centers) return CENTER_VERTEX; int v = MAX(a, b) - MIN(a, b); if(added_centers) nodes -= added_centers; return (v < nodes/2.0)? v : nodes-v; } bool check(const int nodes, const int based_nodes, const int lines, const int degree, const int groups, int edge[lines][2], const int added_centers, int* adj, const int ii) { bool flag = true; int based_lines = lines/groups; #pragma omp parallel for for(int i=0;i<based_lines;i++){ for(int j=1;j<groups;j++){ int k = j * based_lines + i; if(distance(nodes, edge[i][0], edge[i][1], added_centers) != distance(nodes, edge[k][0], edge[k][1], added_centers)){ PRINT_R0("check 1: %d\n", ii); PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d d=%d\n", i, edge[i][0], i, edge[i][1], distance(nodes, edge[i][0], edge[i][1], added_centers)); PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d d=%d\n", k, edge[k][0], k, edge[k][1], distance(nodes, edge[k][0], edge[k][1], added_centers)); flag = false; } } } #pragma omp parallel for for(int i=0;i<based_lines;i++){ for(int j=1;j<groups;j++){ int k = j * based_lines + i; if(order(nodes, edge[i][0], edge[i][1], added_centers) != order(nodes, edge[k][0], edge[k][1], added_centers)){ PRINT_R0("check 2 : %d\n", ii); PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d %d\n", i, edge[i][0], i, edge[i][1], order(nodes, edge[i][0], edge[i][1], added_centers)); PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d %d\n", k, edge[k][0], k, edge[k][1], order(nodes, edge[k][0], edge[k][1], added_centers)); flag = false; } } } #pragma omp parallel for for(int i=0;i<based_lines;i++){ if(order(nodes, edge[i][0], edge[i][1], added_centers) != MIDDLE) for(int j=1;j<groups;j++){ int k = j * based_lines + i; int tmp0 = edge[k][0] - edge[k-based_lines][0]; int tmp1 = edge[k][1] - edge[k-based_lines][1]; if(added_centers){ tmp0 = (tmp0 < 0)? tmp0+nodes-added_centers : tmp0; tmp1 = (tmp1 < 0)? tmp1+nodes-added_centers : tmp1; } else{ tmp0 = (tmp0 < 0)? tmp0 + nodes : tmp0; tmp1 = (tmp1 < 0)? tmp1 + nodes : tmp1; } if(tmp0 != based_nodes || tmp1 != based_nodes){ PRINT_R0("check 4: %d\n", ii); PRINT_R0("The different group relationship\n"); PRINT_R0("edge[%d][0]-edge[%d][0] = %d - %d = %d != %d\n", k, k-based_lines, edge[k][0], edge[k-based_lines][0], tmp0, based_nodes); PRINT_R0("edge[%d][1]-edge[%d][1] = %d - %d = %d != %d\n", k, k-based_lines, edge[k][1], edge[k-based_lines][1], tmp1, based_nodes); flag = false; } } } if(adj != NULL){ int *tmp_adj = malloc(sizeof(int)*nodes*degree); create_adj(nodes, lines, degree, (const int (*)[2])edge, (int (*)[degree])tmp_adj); for(int i=0;i<nodes;i++){ int sum[2] = {0,0}; for(int j=0;j<degree;j++){ sum[0] += *(adj + i * degree + j); sum[1] += *(tmp_adj + i * degree + j); } if(sum[0] != sum[1]){ PRINT_R0("[ii=%d] Error 5 %d %d\n", ii, sum[0], sum[1]); for(int j=0;j<degree;j++) PRINT_R0("%d ", *(adj + i * degree + j)); PRINT_R0("\n"); for(int j=0;j<degree;j++) PRINT_R0("%d ", *(tmp_adj + i * degree + j)); PRINT_R0("\n"); flag = false; break; } } for(int i=0;i<nodes;i++){ for(int j=0;j<degree;j++){ int tmp = *(adj + i * degree + j); int k; for(k=0;k<degree;k++) if(tmp == *(tmp_adj + i * degree + k)) break; if(k == degree){ PRINT_R0("[ii=%d] Error 6\n", ii); flag = false; break; } } } for(int i=0;i<nodes;i++){ for(int j=0;j<degree;j++){ int tmp = *(tmp_adj + i * degree + j); int k; for(k=0;k<degree;k++) if(tmp == *(adj + i * degree + k)) break; if(k == degree){ PRINT_R0("[ii=%d] Error 7\n", ii); flag = false; break; } } } for(int i=0;i<nodes;i++){ for(int j=0;j<degree;j++){ int tmp = *(adj + i * degree + j); for(int k=j+1;k<degree;k++) if(tmp == *(adj + i * degree + k)){ flag = false; break; } } } free(tmp_adj); } return flag; } bool has_duplicated_vertex(const int e00, const int e01, const int e10, const int e11) { return (e00 == e10 || e01 == e11 || e00 == e11 || e01 == e10); } static void exchange_edge_2opt(const int nodes, const int lines, const int groups, const int degree, const int based_nodes, int edge[lines][2], const int added_centers, int* restrict adj, int *kind_opt, int* restrict restored_edge, int* restrict restored_line, int* restrict restored_adj_value, int* restrict restored_adj_idx_y, int* restrict restored_adj_idx_x, const bool is_simple_graph, const int ii) { int tmp_line[groups*2], tmp_edge[groups*2][2], r; int based_lines = lines / groups; while(1){ while(1){ while(1){ tmp_line[0] = getRandom(lines); tmp_line[1] = getRandom(lines); if(tmp_line[0] != tmp_line[1]) break; } if(has_duplicated_vertex(edge[tmp_line[0]][0], edge[tmp_line[0]][1], edge[tmp_line[1]][0], edge[tmp_line[1]][1])){ continue; } else if((tmp_line[0] - tmp_line[1]) % based_lines == 0){ if(edge_1g_opt(edge, nodes, lines, degree, based_nodes, based_lines, groups, tmp_line[0], added_centers, adj, kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, ii)) return; else continue; } else break; } bool flag0 = (distance(nodes, edge[tmp_line[0]][0], edge[tmp_line[0]][1], added_centers) == (nodes-added_centers)/2); bool flag1 = (distance(nodes, edge[tmp_line[1]][0], edge[tmp_line[1]][1], added_centers) == (nodes-added_centers)/2); bool diameter_flag = ((flag0 || flag1) && groups%2 == 0); if(diameter_flag){ if(edge_1g_opt(edge, nodes, lines, degree, based_nodes, based_lines, groups, tmp_line[0], added_centers, adj, kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, ii)) return; else continue; } // 2g-opt for(int i=1;i<groups;i++){ int tmp0 = tmp_line[0] + based_lines * i; int tmp1 = tmp_line[1] + based_lines * i; tmp_line[0+2*i] = (tmp0 >= lines)? tmp0 - lines : tmp0; tmp_line[1+2*i] = (tmp1 >= lines)? tmp1 - lines : tmp1; } for(int i=0;i<groups*2;i++) for(int j=0;j<2;j++) tmp_edge[i][j] = edge[tmp_line[i]][j]; r = getRandom(2); if(r == 0){ for(int i=0;i<groups;i++) swap(&tmp_edge[i*2][1], &tmp_edge[i*2+1][1]); } else{ for(int i=0;i<groups;i++) swap(&tmp_edge[i*2][1], &tmp_edge[i*2+1][0]); } assert(check_loop(groups*2, tmp_edge)); if(!check_duplicate_tmp_edge(2, groups, tmp_edge)) continue; else if(!check_duplicate_current_edge(lines, groups*2, tmp_line, edge, tmp_edge, groups, 2, false)) continue; else break; } // end while for(int i=0;i<groups*2;i++) if(order(nodes, tmp_edge[i][0], tmp_edge[i][1], added_centers) == RIGHT) swap(&tmp_edge[i][0], &tmp_edge[i][1]); // RIGHT -> LEFT if(is_simple_graph){ // Change a part of adj. int y0[groups], y1[groups], y2[groups], y3[groups]; int x0[groups], x1[groups], x2[groups], x3[groups]; #pragma omp parallel for for(int i=0;i<groups;i++){ y0[i] = edge[tmp_line[i*2 ]][0]; y1[i] = edge[tmp_line[i*2 ]][1]; y2[i] = edge[tmp_line[i*2+1]][0]; y3[i] = edge[tmp_line[i*2+1]][1]; for(x0[i]=0;x0[i]<degree;x0[i]++) if(adj[y0[i]*degree+x0[i]] == y1[i]) break; for(x1[i]=0;x1[i]<degree;x1[i]++) if(adj[y1[i]*degree+x1[i]] == y0[i]) break; for(x2[i]=0;x2[i]<degree;x2[i]++) if(adj[y2[i]*degree+x2[i]] == y3[i]) break; for(x3[i]=0;x3[i]<degree;x3[i]++) if(adj[y3[i]*degree+x3[i]] == y2[i]) break; if(x0[i] == degree || x1[i] == degree || x2[i] == degree || x3[i] == degree) ERROR("%d : %d %d %d %d\n", ii, x0[i], x1[i], x2[i], x3[i]); restored_adj_idx_y[i*4 ] = y0[i]; restored_adj_idx_x[i*4 ] = x0[i]; restored_adj_idx_y[i*4+1] = y1[i]; restored_adj_idx_x[i*4+1] = x1[i]; restored_adj_idx_y[i*4+2] = y2[i]; restored_adj_idx_x[i*4+2] = x2[i]; restored_adj_idx_y[i*4+3] = y3[i]; restored_adj_idx_x[i*4+3] = x3[i]; restored_adj_value[i*4 ] = adj[y0[i]*degree+x0[i]]; restored_adj_value[i*4+1] = adj[y1[i]*degree+x1[i]]; restored_adj_value[i*4+2] = adj[y2[i]*degree+x2[i]]; restored_adj_value[i*4+3] = adj[y3[i]*degree+x3[i]]; // restored_line[i*2 ] = tmp_line[i*2 ]; restored_line[i*2+1] = tmp_line[i*2+1]; restored_edge[i*4 ] = edge[tmp_line[i*2 ]][0]; restored_edge[i*4+1] = edge[tmp_line[i*2 ]][1]; restored_edge[i*4+2] = edge[tmp_line[i*2+1]][0]; restored_edge[i*4+3] = edge[tmp_line[i*2+1]][1]; } #pragma omp parallel for for(int i=0;i<groups;i++){ if(r==0){ adj[y0[i]*degree+x0[i]] = y3[i]; adj[y1[i]*degree+x1[i]] = y2[i]; adj[y2[i]*degree+x2[i]] = y1[i]; adj[y3[i]*degree+x3[i]] = y0[i]; } else{ adj[y0[i]*degree+x0[i]] = y2[i]; adj[y1[i]*degree+x1[i]] = y3[i]; adj[y2[i]*degree+x2[i]] = y0[i]; adj[y3[i]*degree+x3[i]] = y1[i]; } } } #pragma omp parallel for for(int i=0;i<groups;i++){ edge[tmp_line[i*2 ]][0] = tmp_edge[i*2 ][0]; edge[tmp_line[i*2+1]][0] = tmp_edge[i*2+1][0]; edge[tmp_line[i*2 ]][1] = tmp_edge[i*2 ][1]; edge[tmp_line[i*2+1]][1] = tmp_edge[i*2+1][1]; } *kind_opt = D_2G_OPT; } static bool accept(const int new_diam, const int current_diam, const double new_ASPL, const double current_ASPL, const double temp, const int nodes, const int groups, const bool hill_climbing_flag, const bool detect_temp_flag, const long long i, double *max_diff_energy, long long *total_accepts, long long *accepts, long long *rejects) { if(new_diam < current_diam){ *accepts += 1; if(i > SKIP_ACCEPTS) *total_accepts +=1; return true; } else if(new_diam > current_diam){ *rejects += 1; return false; } else{ // new_diam == current_diam if(new_ASPL <= current_ASPL){ *accepts += 1; if(i > SKIP_ACCEPTS) *total_accepts +=1; return true; } else if(hill_climbing_flag){ // Only accept when ASPL <= current_ASPL. *rejects += 1; return false; } double diff = ((current_ASPL-new_ASPL)*nodes*(nodes-1))/groups; if(detect_temp_flag) *max_diff_energy = MAX(*max_diff_energy, -1.0 * diff); if(exp(diff/temp) > uniform_rand()){ *accepts += 1; if(i > SKIP_ACCEPTS) *total_accepts +=1; return true; } else{ *rejects += 1; return false; } } } long long sa(const int nodes, const int lines, const int degree, const int groups, double temp, const long long ncalcs, const double cooling_rate, const int low_diam, const double low_ASPL, const bool hill_climbing_flag, const bool detect_temp_flag, double *max_diff_energy, int edge[lines][2], int *diam, double *ASPL, const int cooling_cycle, const int added_centers, const int based_nodes, long long *total_accepts, const bool is_simple_graph, const int num_degrees[nodes], const int algo) { long long ii, accepts = 0, rejects = 0; int best_edge[lines][2], tmp_edge[lines][2], tmp_edge_nsg[lines][2] /* nsg = not simple graph */, kind_opt; int restored_adj_value[groups*4], restored_adj_idx_y[groups*4], restored_adj_idx_x[groups*4]; int restored_edge[groups*4], restored_line[groups*2]; bool restore_flag = false; copy_edge((int *)best_edge, (int *)edge, lines*2); copy_edge((int *)tmp_edge, (int *)edge, lines*2); // Create adj matrix int *adj = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree]; create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj); evaluation(nodes, based_nodes, groups, lines, degree, adj, diam, ASPL, added_centers, num_degrees, algo); double current_ASPL = *ASPL; double best_ASPL = *ASPL; int current_diam = *diam; int best_diam = *diam; int print_interval = (ncalcs/NUM_OF_PROGRESS == 0)? 1 : ncalcs/NUM_OF_PROGRESS; if(rank == 0 && !detect_temp_flag) print_result_header(); for(ii=0;ii<ncalcs;ii++){ double tmp_ASPL; int tmp_diam; if(ii % print_interval == 0 && !detect_temp_flag){ print_results(ii, temp, current_ASPL, best_ASPL, low_ASPL, current_diam, best_diam, low_diam, accepts, rejects); accepts = 0; rejects = 0; } while(1){ if(is_simple_graph){ if(restore_flag){ restore_adj(degree, groups, adj, kind_opt, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x); restore_edge(groups, kind_opt, (int *)tmp_edge, restored_line, restored_edge); } } else{ copy_edge((int *)tmp_edge_nsg, (int *)tmp_edge, lines*2); } exchange_edge_2opt(nodes, lines, groups, degree, based_nodes, tmp_edge, added_centers, adj, &kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, (int)ii); if(!is_simple_graph) create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj); assert(check(nodes, based_nodes, lines, degree, groups, tmp_edge, added_centers, adj, (int)ii)); if(evaluation(nodes, based_nodes, groups, lines, degree, adj, &tmp_diam, &tmp_ASPL, added_centers, num_degrees, algo)) break; else{ if(is_simple_graph) restore_flag = true; else copy_edge((int *)tmp_edge, (int *)tmp_edge_nsg, lines*2); } } if(!accept(tmp_diam, current_diam, tmp_ASPL, current_ASPL, temp, nodes, groups, hill_climbing_flag, detect_temp_flag, ii, max_diff_energy, total_accepts, &accepts, &rejects)){ if(is_simple_graph) restore_flag = true; else copy_edge((int *)tmp_edge, (int *)tmp_edge_nsg, lines*2); } else{ if(is_simple_graph) restore_flag = false; current_ASPL = tmp_ASPL; current_diam = tmp_diam; if((best_diam > current_diam) || (best_diam == current_diam && best_ASPL > current_ASPL)){ copy_edge((int *)best_edge, (int *)tmp_edge, lines*2); best_ASPL = current_ASPL; best_diam = current_diam; } if(best_diam == current_diam && best_ASPL == low_ASPL){ if(!detect_temp_flag){ print_results(ii, temp, current_ASPL, best_ASPL, low_ASPL, current_diam, best_diam, low_diam, accepts, rejects); PRINT_R0("---\nFound optimum solution.\n"); } break; } } if((ii+1)%cooling_cycle == 0) temp *= cooling_rate; } *ASPL = best_ASPL; *diam = best_diam; copy_edge((int *)edge, (int *)best_edge, lines*2); free(adj); return ii; } #define ESTIMATED_TIMES 5 double estimate_elapse_time(const int nodes, const int based_nodes, const int lines, const int degree, const int groups, int edge[lines][2], const int added_centers, const bool is_simple_graph, const int num_degrees[nodes], const int algo) { int diam; // Not use double ASPL; // Not use int *adj = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree]; int (*tmp_edge)[2] = malloc(sizeof(int)*lines*2); // int tmp_edge[lines][2]; int kind_opt; int restored_adj_value[groups*4], restored_adj_idx_y[groups*4], restored_adj_idx_x[groups*4]; int restored_edge[groups*4], restored_line[groups*2]; copy_edge((int *)tmp_edge, (int *)edge, lines*2); create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj); timer_start(TIMER_ESTIMATED); for(int i=0;i<ESTIMATED_TIMES;i++){ exchange_edge_2opt(nodes, lines, groups, degree, based_nodes, tmp_edge, added_centers, adj, &kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, (int)i); if(!is_simple_graph) create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj); assert(check(nodes, based_nodes, lines, degree, groups, tmp_edge, added_centers, adj, (int)i)); evaluation(nodes, based_nodes, groups, lines, degree, adj, &diam, &ASPL, added_centers, num_degrees, algo); } timer_stop(TIMER_ESTIMATED); free(tmp_edge); free(adj); return timer_read(TIMER_ESTIMATED)/ESTIMATED_TIMES; } // This function is mainly useful when groupe is 1. void check_current_edge(const int nodes, const int degree, const int lines, const int groups, const int based_nodes, int edge[lines][2], const double low_ASPL, const int added_centers, const int num_degrees[nodes], const int algo) { int diam; // Not use double ASPL; int (*adj)[degree] = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree]; create_adj(nodes, lines, degree, (const int (*)[2])edge, adj); if(! evaluation(nodes, based_nodes, groups, lines, degree, (int *)adj, &diam, &ASPL, added_centers, num_degrees, algo)) ERROR("The input file has a node which is never reached by another node.\n"); if(ASPL == low_ASPL) END("The input file has already optimum solution.\n"); free(adj); }
convolution_3x3_pack8_fp16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_fp16_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 8b-8a-inch/8a-64-outch/8b; kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)2u * 64, 64); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack8.channel(q / 8); for (int k = 0; k < 64; k++) { unsigned short* g00 = (unsigned short*)g0.row(k); for (int p = 0; p + 7 < inch; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); g00[0] = float32_to_float16(k00[k]); g00[1] = float32_to_float16(k10[k]); g00[2] = float32_to_float16(k20[k]); g00[3] = float32_to_float16(k30[k]); g00[4] = float32_to_float16(k40[k]); g00[5] = float32_to_float16(k50[k]); g00[6] = float32_to_float16(k60[k]); g00[7] = float32_to_float16(k70[k]); g00[8] = float32_to_float16(k01[k]); g00[9] = float32_to_float16(k11[k]); g00[10] = float32_to_float16(k21[k]); g00[11] = float32_to_float16(k31[k]); g00[12] = float32_to_float16(k41[k]); g00[13] = float32_to_float16(k51[k]); g00[14] = float32_to_float16(k61[k]); g00[15] = float32_to_float16(k71[k]); g00[16] = float32_to_float16(k02[k]); g00[17] = float32_to_float16(k12[k]); g00[18] = float32_to_float16(k22[k]); g00[19] = float32_to_float16(k32[k]); g00[20] = float32_to_float16(k42[k]); g00[21] = float32_to_float16(k52[k]); g00[22] = float32_to_float16(k62[k]); g00[23] = float32_to_float16(k72[k]); g00[24] = float32_to_float16(k03[k]); g00[25] = float32_to_float16(k13[k]); g00[26] = float32_to_float16(k23[k]); g00[27] = float32_to_float16(k33[k]); g00[28] = float32_to_float16(k43[k]); g00[29] = float32_to_float16(k53[k]); g00[30] = float32_to_float16(k63[k]); g00[31] = float32_to_float16(k73[k]); g00[32] = float32_to_float16(k04[k]); g00[33] = float32_to_float16(k14[k]); g00[34] = float32_to_float16(k24[k]); g00[35] = float32_to_float16(k34[k]); g00[36] = float32_to_float16(k44[k]); g00[37] = float32_to_float16(k54[k]); g00[38] = float32_to_float16(k64[k]); g00[39] = float32_to_float16(k74[k]); g00[40] = float32_to_float16(k05[k]); g00[41] = float32_to_float16(k15[k]); g00[42] = float32_to_float16(k25[k]); g00[43] = float32_to_float16(k35[k]); g00[44] = float32_to_float16(k45[k]); g00[45] = float32_to_float16(k55[k]); g00[46] = float32_to_float16(k65[k]); g00[47] = float32_to_float16(k75[k]); g00[48] = float32_to_float16(k06[k]); g00[49] = float32_to_float16(k16[k]); g00[50] = float32_to_float16(k26[k]); g00[51] = float32_to_float16(k36[k]); g00[52] = float32_to_float16(k46[k]); g00[53] = float32_to_float16(k56[k]); g00[54] = float32_to_float16(k66[k]); g00[55] = float32_to_float16(k76[k]); g00[56] = float32_to_float16(k07[k]); g00[57] = float32_to_float16(k17[k]); g00[58] = float32_to_float16(k27[k]); g00[59] = float32_to_float16(k37[k]); g00[60] = float32_to_float16(k47[k]); g00[61] = float32_to_float16(k57[k]); g00[62] = float32_to_float16(k67[k]); g00[63] = float32_to_float16(k77[k]); g00 += 64; } } } } static void conv3x3s1_winograd64_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _tmp0m = _mm256_fmadd_1_ps(_mm256_sub_ps(_r00, _r06), _mm256_sub_ps(_r04, _r02), 5.25f); __m256 _tmp7m = _mm256_fmadd_1_ps(_mm256_sub_ps(_r07, _r01), _mm256_sub_ps(_r03, _r05), 5.25f); _mm256_storeu_ps(tmp[0][m], _tmp0m); _mm256_storeu_ps(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; __m256 _tmp12a = _mm256_fmrsub_1_ps(_mm256_add_ps(_r02, _r06), _r04, 4.25f); __m256 _tmp12b = _mm256_fmrsub_1_ps(_mm256_add_ps(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); __m256 _tmp1m = _mm256_add_ps(_tmp12a, _tmp12b); __m256 _tmp2m = _mm256_sub_ps(_tmp12a, _tmp12b); _mm256_storeu_ps(tmp[1][m], _tmp1m); _mm256_storeu_ps(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; __m256 _tmp34a = _mm256_fmrsub_1_ps(_mm256_fmadd_1_ps(_r06, _r02, 0.25f), _r04, 1.25f); __m256 _tmp34b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_r01, _mm256_set1_ps(0.5f)), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); __m256 _tmp3m = _mm256_add_ps(_tmp34a, _tmp34b); __m256 _tmp4m = _mm256_sub_ps(_tmp34a, _tmp34b); _mm256_storeu_ps(tmp[3][m], _tmp3m); _mm256_storeu_ps(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; __m256 _tmp56a = _mm256_fmadd_1_ps(_r06, _mm256_fmrsub_1_ps(_r02, _r04, 1.25f), 4.f); __m256 _tmp56b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_r01, _mm256_set1_ps(2.f)), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); __m256 _tmp5m = _mm256_add_ps(_tmp56a, _tmp56b); __m256 _tmp6m = _mm256_sub_ps(_tmp56a, _tmp56b); _mm256_storeu_ps(tmp[5][m], _tmp5m); _mm256_storeu_ps(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 8; float* r0_tm_1 = r0_tm_0 + tiles * 8; float* r0_tm_2 = r0_tm_0 + tiles * 16; float* r0_tm_3 = r0_tm_0 + tiles * 24; float* r0_tm_4 = r0_tm_0 + tiles * 32; float* r0_tm_5 = r0_tm_0 + tiles * 40; float* r0_tm_6 = r0_tm_0 + tiles * 48; float* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { __m256 _tmp00 = _mm256_loadu_ps(tmp[m][0]); __m256 _tmp01 = _mm256_loadu_ps(tmp[m][1]); __m256 _tmp02 = _mm256_loadu_ps(tmp[m][2]); __m256 _tmp03 = _mm256_loadu_ps(tmp[m][3]); __m256 _tmp04 = _mm256_loadu_ps(tmp[m][4]); __m256 _tmp05 = _mm256_loadu_ps(tmp[m][5]); __m256 _tmp06 = _mm256_loadu_ps(tmp[m][6]); __m256 _tmp07 = _mm256_loadu_ps(tmp[m][7]); __m256 _r0tm0 = _mm256_fmadd_1_ps(_mm256_sub_ps(_tmp00, _tmp06), _mm256_sub_ps(_tmp04, _tmp02), 5.25f); __m256 _r0tm7 = _mm256_fmadd_1_ps(_mm256_sub_ps(_tmp07, _tmp01), _mm256_sub_ps(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; __m256 _tmp12a = _mm256_fmrsub_1_ps(_mm256_add_ps(_tmp02, _tmp06), _tmp04, 4.25f); __m256 _tmp12b = _mm256_fmrsub_1_ps(_mm256_add_ps(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); __m256 _r0tm1 = _mm256_add_ps(_tmp12a, _tmp12b); __m256 _r0tm2 = _mm256_sub_ps(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; __m256 _tmp34a = _mm256_fmrsub_1_ps(_mm256_fmadd_1_ps(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); __m256 _tmp34b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_tmp01, _mm256_set1_ps(0.5f)), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); __m256 _r0tm3 = _mm256_add_ps(_tmp34a, _tmp34b); __m256 _r0tm4 = _mm256_sub_ps(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; __m256 _tmp56a = _mm256_fmadd_1_ps(_tmp06, _mm256_fmrsub_1_ps(_tmp02, _tmp04, 1.25f), 4.f); __m256 _tmp56b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_tmp01, _mm256_set1_ps(2.f)), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); __m256 _r0tm5 = _mm256_add_ps(_tmp56a, _tmp56b); __m256 _r0tm6 = _mm256_sub_ps(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; _mm256_storeu_ps(r0_tm_0, _r0tm0); _mm256_storeu_ps(r0_tm_1, _r0tm1); _mm256_storeu_ps(r0_tm_2, _r0tm2); _mm256_storeu_ps(r0_tm_3, _r0tm3); _mm256_storeu_ps(r0_tm_4, _r0tm4); _mm256_storeu_ps(r0_tm_5, _r0tm5); _mm256_storeu_ps(r0_tm_6, _r0tm6); _mm256_storeu_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); __m256 _r4 = _mm256_loadu_ps(r0 + 32); __m256 _r5 = _mm256_loadu_ps(r0 + 40); __m256 _r6 = _mm256_loadu_ps(r0 + 48); __m256 _r7 = _mm256_loadu_ps(r0 + 56); __m256 _r8 = _mm256_loadu_ps(r0 + 64); __m256 _r9 = _mm256_loadu_ps(r0 + 72); __m256 _r10 = _mm256_loadu_ps(r0 + 80); __m256 _r11 = _mm256_loadu_ps(r0 + 88); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); _mm256_storeu_ps(tm2p + 32, _r4); _mm256_storeu_ps(tm2p + 40, _r5); _mm256_storeu_ps(tm2p + 48, _r6); _mm256_storeu_ps(tm2p + 56, _r7); _mm256_storeu_ps(tm2p + 64, _r8); _mm256_storeu_ps(tm2p + 72, _r9); _mm256_storeu_ps(tm2p + 80, _r10); _mm256_storeu_ps(tm2p + 88, _r11); tm2p += 96; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); __m256 _r4 = _mm256_loadu_ps(r0 + 32); __m256 _r5 = _mm256_loadu_ps(r0 + 40); __m256 _r6 = _mm256_loadu_ps(r0 + 48); __m256 _r7 = _mm256_loadu_ps(r0 + 56); _mm256_storeu_ps(tm2p + 32, _r4); _mm256_storeu_ps(tm2p + 40, _r5); _mm256_storeu_ps(tm2p + 48, _r6); _mm256_storeu_ps(tm2p + 56, _r7); tm2p += 64; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); tm2p += 32; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); tm2p += 16; r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); _mm256_storeu_ps(tm2p, _r0); tm2p += 8; r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const unsigned short* k01 = (const unsigned short*)kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); __m256 _sum4 = _mm256_set1_ps(0.f); __m256 _sum5 = _mm256_set1_ps(0.f); __m256 _sum6 = _mm256_set1_ps(0.f); __m256 _sum7 = _mm256_set1_ps(0.f); __m256 _sum8 = _mm256_set1_ps(0.f); __m256 _sum9 = _mm256_set1_ps(0.f); __m256 _sum10 = _mm256_set1_ps(0.f); __m256 _sum11 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = loadfp16(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); __m256 _r04 = _mm256_broadcast_ss(r0 + 32); __m256 _r05 = _mm256_broadcast_ss(r0 + 40); __m256 _r06 = _mm256_broadcast_ss(r0 + 48); __m256 _r07 = _mm256_broadcast_ss(r0 + 56); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); __m256 _r08 = _mm256_broadcast_ss(r0 + 64); __m256 _r09 = _mm256_broadcast_ss(r0 + 72); __m256 _r010 = _mm256_broadcast_ss(r0 + 80); __m256 _r011 = _mm256_broadcast_ss(r0 + 88); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = loadfp16(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 33); _r05 = _mm256_broadcast_ss(r0 + 41); _r06 = _mm256_broadcast_ss(r0 + 49); _r07 = _mm256_broadcast_ss(r0 + 57); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 65); _r09 = _mm256_broadcast_ss(r0 + 73); _r010 = _mm256_broadcast_ss(r0 + 81); _r011 = _mm256_broadcast_ss(r0 + 89); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = loadfp16(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 34); _r05 = _mm256_broadcast_ss(r0 + 42); _r06 = _mm256_broadcast_ss(r0 + 50); _r07 = _mm256_broadcast_ss(r0 + 58); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 66); _r09 = _mm256_broadcast_ss(r0 + 74); _r010 = _mm256_broadcast_ss(r0 + 82); _r011 = _mm256_broadcast_ss(r0 + 90); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = loadfp16(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 35); _r05 = _mm256_broadcast_ss(r0 + 43); _r06 = _mm256_broadcast_ss(r0 + 51); _r07 = _mm256_broadcast_ss(r0 + 59); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 67); _r09 = _mm256_broadcast_ss(r0 + 75); _r010 = _mm256_broadcast_ss(r0 + 83); _r011 = _mm256_broadcast_ss(r0 + 91); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = loadfp16(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 36); _r05 = _mm256_broadcast_ss(r0 + 44); _r06 = _mm256_broadcast_ss(r0 + 52); _r07 = _mm256_broadcast_ss(r0 + 60); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 68); _r09 = _mm256_broadcast_ss(r0 + 76); _r010 = _mm256_broadcast_ss(r0 + 84); _r011 = _mm256_broadcast_ss(r0 + 92); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = loadfp16(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 37); _r05 = _mm256_broadcast_ss(r0 + 45); _r06 = _mm256_broadcast_ss(r0 + 53); _r07 = _mm256_broadcast_ss(r0 + 61); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 69); _r09 = _mm256_broadcast_ss(r0 + 77); _r010 = _mm256_broadcast_ss(r0 + 85); _r011 = _mm256_broadcast_ss(r0 + 93); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = loadfp16(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 38); _r05 = _mm256_broadcast_ss(r0 + 46); _r06 = _mm256_broadcast_ss(r0 + 54); _r07 = _mm256_broadcast_ss(r0 + 62); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 70); _r09 = _mm256_broadcast_ss(r0 + 78); _r010 = _mm256_broadcast_ss(r0 + 86); _r011 = _mm256_broadcast_ss(r0 + 94); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = loadfp16(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 39); _r05 = _mm256_broadcast_ss(r0 + 47); _r06 = _mm256_broadcast_ss(r0 + 55); _r07 = _mm256_broadcast_ss(r0 + 63); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 71); _r09 = _mm256_broadcast_ss(r0 + 79); _r010 = _mm256_broadcast_ss(r0 + 87); _r011 = _mm256_broadcast_ss(r0 + 95); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); k01 += 64; r0 += 96; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); _mm256_storeu_ps(output0_tm + 32, _sum4); _mm256_storeu_ps(output0_tm + 40, _sum5); _mm256_storeu_ps(output0_tm + 48, _sum6); _mm256_storeu_ps(output0_tm + 56, _sum7); _mm256_storeu_ps(output0_tm + 64, _sum8); _mm256_storeu_ps(output0_tm + 72, _sum9); _mm256_storeu_ps(output0_tm + 80, _sum10); _mm256_storeu_ps(output0_tm + 88, _sum11); output0_tm += 96; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const unsigned short* k01 = (const unsigned short*)kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); __m256 _sum4 = _mm256_set1_ps(0.f); __m256 _sum5 = _mm256_set1_ps(0.f); __m256 _sum6 = _mm256_set1_ps(0.f); __m256 _sum7 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = loadfp16(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); __m256 _r04 = _mm256_broadcast_ss(r0 + 32); __m256 _r05 = _mm256_broadcast_ss(r0 + 40); __m256 _r06 = _mm256_broadcast_ss(r0 + 48); __m256 _r07 = _mm256_broadcast_ss(r0 + 56); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = loadfp16(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 33); _r05 = _mm256_broadcast_ss(r0 + 41); _r06 = _mm256_broadcast_ss(r0 + 49); _r07 = _mm256_broadcast_ss(r0 + 57); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = loadfp16(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 34); _r05 = _mm256_broadcast_ss(r0 + 42); _r06 = _mm256_broadcast_ss(r0 + 50); _r07 = _mm256_broadcast_ss(r0 + 58); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = loadfp16(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 35); _r05 = _mm256_broadcast_ss(r0 + 43); _r06 = _mm256_broadcast_ss(r0 + 51); _r07 = _mm256_broadcast_ss(r0 + 59); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = loadfp16(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 36); _r05 = _mm256_broadcast_ss(r0 + 44); _r06 = _mm256_broadcast_ss(r0 + 52); _r07 = _mm256_broadcast_ss(r0 + 60); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = loadfp16(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 37); _r05 = _mm256_broadcast_ss(r0 + 45); _r06 = _mm256_broadcast_ss(r0 + 53); _r07 = _mm256_broadcast_ss(r0 + 61); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = loadfp16(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 38); _r05 = _mm256_broadcast_ss(r0 + 46); _r06 = _mm256_broadcast_ss(r0 + 54); _r07 = _mm256_broadcast_ss(r0 + 62); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = loadfp16(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 39); _r05 = _mm256_broadcast_ss(r0 + 47); _r06 = _mm256_broadcast_ss(r0 + 55); _r07 = _mm256_broadcast_ss(r0 + 63); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); k01 += 64; r0 += 64; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); _mm256_storeu_ps(output0_tm + 32, _sum4); _mm256_storeu_ps(output0_tm + 40, _sum5); _mm256_storeu_ps(output0_tm + 48, _sum6); _mm256_storeu_ps(output0_tm + 56, _sum7); output0_tm += 64; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const unsigned short* k01 = (const unsigned short*)kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = loadfp16(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = loadfp16(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = loadfp16(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = loadfp16(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = loadfp16(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = loadfp16(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = loadfp16(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = loadfp16(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); k01 += 64; r0 += 32; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); output0_tm += 32; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const unsigned short* k01 = (const unsigned short*)kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = loadfp16(k01); __m256 _r0 = _mm256_broadcast_ss(r0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = loadfp16(k01 + 8); _r0 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = loadfp16(k01 + 16); _r0 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = loadfp16(k01 + 24); _r0 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = loadfp16(k01 + 32); _r0 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = loadfp16(k01 + 40); _r0 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = loadfp16(k01 + 48); _r0 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = loadfp16(k01 + 56); _r0 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); k01 += 64; r0 += 16; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); output0_tm += 16; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const unsigned short* k01 = (const unsigned short*)kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = loadfp16(k01); __m256 _r0 = _mm256_broadcast_ss(r0); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _k01 = loadfp16(k01 + 8); _r0 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _k01 = loadfp16(k01 + 16); _r0 = _mm256_broadcast_ss(r0 + 2); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _k01 = loadfp16(k01 + 24); _r0 = _mm256_broadcast_ss(r0 + 3); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _k01 = loadfp16(k01 + 32); _r0 = _mm256_broadcast_ss(r0 + 4); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _k01 = loadfp16(k01 + 40); _r0 = _mm256_broadcast_ss(r0 + 5); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _k01 = loadfp16(k01 + 48); _r0 = _mm256_broadcast_ss(r0 + 6); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _k01 = loadfp16(k01 + 56); _r0 = _mm256_broadcast_ss(r0 + 7); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); k01 += 64; r0 += 8; } } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); float tmp[6][8][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 8; const float* output0_tm_1 = output0_tm_0 + tiles * 8; const float* output0_tm_2 = output0_tm_0 + tiles * 16; const float* output0_tm_3 = output0_tm_0 + tiles * 24; const float* output0_tm_4 = output0_tm_0 + tiles * 32; const float* output0_tm_5 = output0_tm_0 + tiles * 40; const float* output0_tm_6 = output0_tm_0 + tiles * 48; const float* output0_tm_7 = output0_tm_0 + tiles * 56; float* output0 = out0.row(i * 6) + (j * 6) * 8; // TODO neon optimize for (int m = 0; m < 8; m++) { __m256 _out0tm0 = _mm256_loadu_ps(output0_tm_0); __m256 _out0tm1 = _mm256_loadu_ps(output0_tm_1); __m256 _out0tm2 = _mm256_loadu_ps(output0_tm_2); __m256 _out0tm3 = _mm256_loadu_ps(output0_tm_3); __m256 _out0tm4 = _mm256_loadu_ps(output0_tm_4); __m256 _out0tm5 = _mm256_loadu_ps(output0_tm_5); __m256 _out0tm6 = _mm256_loadu_ps(output0_tm_6); __m256 _out0tm7 = _mm256_loadu_ps(output0_tm_7); __m256 _tmp024a = _mm256_add_ps(_out0tm1, _out0tm2); __m256 _tmp135a = _mm256_sub_ps(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; __m256 _tmp024b = _mm256_add_ps(_out0tm3, _out0tm4); __m256 _tmp135b = _mm256_sub_ps(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; __m256 _tmp024c = _mm256_add_ps(_out0tm5, _out0tm6); __m256 _tmp135c = _mm256_sub_ps(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; __m256 _tmp0m = _mm256_add_ps(_mm256_add_ps(_out0tm0, _tmp024a), _mm256_fmadd_1_ps(_tmp024b, _tmp024c, 32.f)); __m256 _tmp2m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); __m256 _tmp4m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); _mm256_storeu_ps(tmp[0][m], _tmp0m); _mm256_storeu_ps(tmp[2][m], _tmp2m); _mm256_storeu_ps(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; __m256 _tmp1m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); __m256 _tmp3m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); __m256 _tmp5m = _mm256_add_ps(_mm256_add_ps(_out0tm7, _tmp135a), _mm256_fmadd_1_ps(_tmp135c, _tmp135b, 32.f)); _mm256_storeu_ps(tmp[1][m], _tmp1m); _mm256_storeu_ps(tmp[3][m], _tmp3m); _mm256_storeu_ps(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 64; output0_tm_1 += tiles * 64; output0_tm_2 += tiles * 64; output0_tm_3 += tiles * 64; output0_tm_4 += tiles * 64; output0_tm_5 += tiles * 64; output0_tm_6 += tiles * 64; output0_tm_7 += tiles * 64; } for (int m = 0; m < 6; m++) { __m256 _tmp00 = _mm256_loadu_ps(tmp[m][0]); __m256 _tmp01 = _mm256_loadu_ps(tmp[m][1]); __m256 _tmp02 = _mm256_loadu_ps(tmp[m][2]); __m256 _tmp03 = _mm256_loadu_ps(tmp[m][3]); __m256 _tmp04 = _mm256_loadu_ps(tmp[m][4]); __m256 _tmp05 = _mm256_loadu_ps(tmp[m][5]); __m256 _tmp06 = _mm256_loadu_ps(tmp[m][6]); __m256 _tmp07 = _mm256_loadu_ps(tmp[m][7]); __m256 _tmp024a = _mm256_add_ps(_tmp01, _tmp02); __m256 _tmp135a = _mm256_sub_ps(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; __m256 _tmp024b = _mm256_add_ps(_tmp03, _tmp04); __m256 _tmp135b = _mm256_sub_ps(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; __m256 _tmp024c = _mm256_add_ps(_tmp05, _tmp06); __m256 _tmp135c = _mm256_sub_ps(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; __m256 _out00 = _mm256_add_ps(_bias0, _mm256_add_ps(_mm256_add_ps(_tmp00, _tmp024a), _mm256_fmadd_1_ps(_tmp024b, _tmp024c, 32.f))); __m256 _out02 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); __m256 _out04 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); _mm256_storeu_ps(output0, _out00); _mm256_storeu_ps(output0 + 16, _out02); _mm256_storeu_ps(output0 + 32, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; __m256 _out01 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); __m256 _out03 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); __m256 _out05 = _mm256_add_ps(_bias0, _mm256_add_ps(_mm256_add_ps(_tmp07, _tmp135a), _mm256_fmadd_1_ps(_tmp135c, _tmp135b, 32.f))); _mm256_storeu_ps(output0 + 8, _out01); _mm256_storeu_ps(output0 + 24, _out03); _mm256_storeu_ps(output0 + 40, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 8; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
requantize_relu_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void requantize_relu_pack8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int size = w * h; int scale_in_data_size = scale_in_data.w; int scale_out_data_size = scale_out_data.w; int bias_data_size = bias_data.w; // int8(relu(v * scale_in) * scale_out) // int8_relu(v * (scale_in * scale_out)) // int8(relu(v * scale_in + bias) * scale_out) // int8_relu(v * (scale_in * scale_out) + (bias * scale_out)) if (bias_data_size == 0) { #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { const int* intptr = bottom_blob.channel(q); signed char* ptr = top_blob.channel(q); v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0); v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0); v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0); v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0); v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0); v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1); int i = 0; for (; i + 3 < size; i += 4) { __builtin_prefetch(intptr + 128); v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0)); v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0)); v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0)); v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0)); v4f32 _v4 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 16, 0)); v4f32 _v5 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 20, 0)); v4f32 _v6 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 24, 0)); v4f32 _v7 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 28, 0)); _v0 = __msa_fmul_w(_v0, _scale0); _v1 = __msa_fmul_w(_v1, _scale1); _v2 = __msa_fmul_w(_v2, _scale0); _v3 = __msa_fmul_w(_v3, _scale1); _v4 = __msa_fmul_w(_v4, _scale0); _v5 = __msa_fmul_w(_v5, _scale1); _v6 = __msa_fmul_w(_v6, _scale0); _v7 = __msa_fmul_w(_v7, _scale1); *((int64_t*)ptr) = float2int8relu(_v0, _v1); *((int64_t*)(ptr + 8)) = float2int8relu(_v2, _v3); *((int64_t*)(ptr + 16)) = float2int8relu(_v4, _v5); *((int64_t*)(ptr + 24)) = float2int8relu(_v6, _v7); intptr += 32; ptr += 32; } for (; i + 1 < size; i += 2) { __builtin_prefetch(intptr + 64); v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0)); v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0)); v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0)); v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0)); _v0 = __msa_fmul_w(_v0, _scale0); _v1 = __msa_fmul_w(_v1, _scale1); _v2 = __msa_fmul_w(_v2, _scale0); _v3 = __msa_fmul_w(_v3, _scale1); *((int64_t*)ptr) = float2int8relu(_v0, _v1); *((int64_t*)(ptr + 8)) = float2int8relu(_v2, _v3); intptr += 16; ptr += 16; } for (; i < size; i++) { __builtin_prefetch(intptr + 32); v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0)); v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0)); _v0 = __msa_fmul_w(_v0, _scale0); _v1 = __msa_fmul_w(_v1, _scale1); *((int64_t*)ptr) = float2int8relu(_v0, _v1); intptr += 8; ptr += 8; } } } else { #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { const int* intptr = bottom_blob.channel(q); signed char* ptr = top_blob.channel(q); v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0); v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0); v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0); v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0); v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0); v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0); v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0); v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1); _bias0 = __msa_fmul_w(_bias0, _scale_out0); _bias1 = __msa_fmul_w(_bias1, _scale_out1); int i = 0; for (; i + 3 < size; i += 4) { __builtin_prefetch(intptr + 128); v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0)); v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0)); v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0)); v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0)); v4f32 _v4 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 16, 0)); v4f32 _v5 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 20, 0)); v4f32 _v6 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 24, 0)); v4f32 _v7 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 28, 0)); _v0 = __msa_fmadd_w(_bias0, _v0, _scale0); _v1 = __msa_fmadd_w(_bias1, _v1, _scale1); _v2 = __msa_fmadd_w(_bias0, _v2, _scale0); _v3 = __msa_fmadd_w(_bias1, _v3, _scale1); _v4 = __msa_fmadd_w(_bias0, _v4, _scale0); _v5 = __msa_fmadd_w(_bias1, _v5, _scale1); _v6 = __msa_fmadd_w(_bias0, _v6, _scale0); _v7 = __msa_fmadd_w(_bias1, _v7, _scale1); *((int64_t*)ptr) = float2int8relu(_v0, _v1); *((int64_t*)(ptr + 8)) = float2int8relu(_v2, _v3); *((int64_t*)(ptr + 16)) = float2int8relu(_v4, _v5); *((int64_t*)(ptr + 24)) = float2int8relu(_v6, _v7); intptr += 32; ptr += 32; } for (; i + 1 < size; i += 2) { __builtin_prefetch(intptr + 64); v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0)); v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0)); v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0)); v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0)); _v0 = __msa_fmadd_w(_bias0, _v0, _scale0); _v1 = __msa_fmadd_w(_bias1, _v1, _scale1); _v2 = __msa_fmadd_w(_bias0, _v2, _scale0); _v3 = __msa_fmadd_w(_bias1, _v3, _scale1); *((int64_t*)ptr) = float2int8relu(_v0, _v1); *((int64_t*)(ptr + 8)) = float2int8relu(_v2, _v3); intptr += 16; ptr += 16; } for (; i < size; i++) { __builtin_prefetch(intptr + 32); v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0)); v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0)); _v0 = __msa_fmadd_w(_bias0, _v0, _scale0); _v1 = __msa_fmadd_w(_bias1, _v1, _scale1); *((int64_t*)ptr) = float2int8relu(_v0, _v1); intptr += 8; ptr += 8; } } } }
DOT_NetSimplexUnit.h
/** * @fileoverview Copyright (c) 2019-2021, Stefano Gualandi, * via Ferrata, 1, I-27100, Pavia, Italy * * @author [email protected] (Stefano Gualandi) * */ // ORIGINAL SOURCE CODE FOR THE NETWORK SIMPLEX BASIS DATA STRUCTURE TAKE FROM: // WEBSITE: https://lemon.cs.elte.hu /* ORIGINAL LICENSE FILE: * This file is a part of LEMON, a generic C++ optimization library. * * Copyright (C) 2003-2013 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #pragma once #include "DOT_Commons.h" namespace DOT { class NetSimplexUnit { public: typedef std::vector<int> IntVector; typedef std::vector<int> ValueVector; typedef std::vector<int> CostVector; typedef std::vector<signed char> CharVector; typedef std::vector<bool> BoolVector; // State constants for arcs const bool STATE_TREE = false; const bool STATE_LOWER = true; // Direction constants for tree arcs const int DIR_DOWN = -1; const int DIR_UP = 1; // Data related to the underlying digraph int _node_num; int _arc_num; int _dummy_arc; // Arc id where begin the basic arcs int _next_arc; // Parameters of the problem int64_t _sum_supply; // Data structures for storing the digraph IntVector _source; IntVector _target; // Node and arc data ValueVector _supply; ValueVector _flow; CostVector _cost; CostVector _pi; // Data for storing the spanning tree structure IntVector _parent; IntVector _pred; IntVector _thread; IntVector _rev_thread; IntVector _succ_num; IntVector _last_succ; CharVector _pred_dir; BoolVector _state; IntVector _dirty_revs; int _root; // Temporary data used in the current pivot iteration int in_arc, join, u_in, v_in, u_out, v_out; int delta; const int MAX; const int INF; double _runtime; double _timelimit; std::string _verbosity; double _opt_tolerance; int N_IT_LOG; uint64_t _iterations; double t1, t2, t3, t4, t5, t6; private: // Implementation of the Block Search pivot rule class BlockSearchPivotRule { private: // References to the NetworkSimplex class const IntVector& _source; const IntVector& _target; const BoolVector& _state; const CostVector& _pi; int& _in_arc; int _arc_num; int _dummy_arc; // Pivot rule data int _block_size; int _next_arc; public: // Constructor BlockSearchPivotRule(NetSimplexUnit& ns) : _source(ns._source), _target(ns._target), _state(ns._state), _pi(ns._pi), _in_arc(ns.in_arc), _arc_num(ns._arc_num), _dummy_arc(ns._dummy_arc), _next_arc(ns._next_arc) { // The main parameters of the pivot rule const double BLOCK_SIZE_FACTOR = 1; const int MIN_BLOCK_SIZE = 20; _block_size = (std::max)(int(BLOCK_SIZE_FACTOR * std::sqrt(double(_arc_num) - double(_dummy_arc))), MIN_BLOCK_SIZE); } // Find next entering arc bool findEnteringArc() { int min = 0; int cnt = _block_size; for (int e = _next_arc; e < _arc_num; ++e) { int c = _state[e] * (1 + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; _in_arc = e; } if (--cnt == 0) { if (min < 0) goto search_end; cnt = _block_size; } } for (int e = _dummy_arc; e < _next_arc; ++e) { int c = _state[e] * (1 + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; _in_arc = e; } if (--cnt == 0) { if (min < 0) goto search_end; cnt = _block_size; } } if (min >= 0) return false; search_end: _next_arc = _in_arc; return true; } }; // class BlockSearchPivotRule public: NetSimplexUnit(const char INIT, int node_num, int arc_num) : _node_num(node_num), _arc_num(0), _root(-1), in_arc(-1), join(-1), u_in(-1), v_in(-1), u_out(-1), v_out(-1), MAX((std::numeric_limits<int>::max)()), INF(std::numeric_limits<int>::has_infinity ? std::numeric_limits<int>::infinity() : MAX), _runtime(0.0) { // Check the number types if (!std::numeric_limits<int>::is_signed) throw std::runtime_error( "The flow type of NetworkSimplex must be signed"); if (!std::numeric_limits<int>::is_signed) throw std::runtime_error( "The cost type of NetworkSimplex must be signed"); // Reset data structures int all_node_num = _node_num + 1; _supply.resize(all_node_num, 0); _pi.resize(all_node_num); _parent.resize(all_node_num); _pred.resize(all_node_num); _pred_dir.resize(all_node_num); _thread.resize(all_node_num); _rev_thread.resize(all_node_num); _succ_num.resize(all_node_num); _last_succ.resize(all_node_num); // 2*n arcs from nodes to root and from root to node; // 2*n-1 nodes in a basic solution int max_arc_num = 0; if (INIT == 'F') // Full max_arc_num = 2 * _node_num + arc_num + 1; if (INIT == 'E') // Empty, for Column Generation max_arc_num = 8 * _node_num + 1; _source.reserve(max_arc_num); _target.reserve(max_arc_num); _cost.reserve(max_arc_num); _flow.reserve(max_arc_num); _state.reserve(max_arc_num); _source.resize(_node_num); _target.resize(_node_num); _cost.resize(_node_num, 0); _flow.resize(_node_num, 0); _state.resize(_node_num, STATE_LOWER); // Dummy arcs for eavery node to root node _dummy_arc = _node_num; _arc_num = _node_num; _next_arc = _dummy_arc; // Interal parameters N_IT_LOG = 1000; // check runtime every IT_LOG iterations _timelimit = std::numeric_limits<double>::max(); _verbosity = DOT_VAL_INFO; _opt_tolerance = 1e-06; _iterations = 0; // Benchmarking t1 = 0.0, t2 = 0.0, t3 = 0.0, t4 = 0.0, t5 = 0.0, t6 = 0.0; } ProblemType run(PivotRule pivot_rule = PivotRule::BLOCK_SEARCH) { // shuffle(); _runtime = 0.0; _iterations = 0; // Reset arc variables for (int e = 0; e < _arc_num; ++e) { _state[e] = STATE_LOWER; _flow[e] = 0; } if (!init()) return ProblemType::INFEASIBLE; return start(pivot_rule); } ProblemType reRun(PivotRule pivot_rule = PivotRule::BLOCK_SEARCH) { return start(pivot_rule); } uint64_t num_arcs() const { return uint64_t(_source.size()) - _dummy_arc; } uint64_t num_nodes() const { return _node_num; } void addNode(int i, int b) { _supply[i] = b; } size_t addArc(int a, int b, int c) { size_t mm = _source.size(); _source.emplace_back(a); _target.emplace_back(b); _cost.emplace_back(c); _flow.emplace_back(0); _state.emplace_back(STATE_LOWER); _arc_num++; return mm; } void setArc(size_t idx, int a, int b, int c) { _source[_dummy_arc + idx] = a; _target[_dummy_arc + idx] = b; _cost[_dummy_arc + idx] = c; _flow[_dummy_arc + idx] = 0; _state[_dummy_arc + idx] = STATE_LOWER; _arc_num++; } int64_t computeDummyFlow(const vector<size_t>& as) const { int64_t tot_flow = 0; for (const auto e : as) tot_flow += _flow[e]; return tot_flow; } int updateArcs(const Vars& as) { int new_arc = 0; size_t idx = 0; size_t idx_max = as.size(); int e = _dummy_arc; int e_max = _arc_num; // Store the new arc variable, replacing an used arc, // out of the basis and with positive reduced cost for (; idx < idx_max; ++idx) { while (e < e_max) { // Replace useless variables with new variables if (_state[e] == STATE_LOWER && (1 + _pi[_source[e]] - _pi[_target[e]] > 1)) break; ++e; } if (e >= e_max) break; _source[e] = as[idx].a; _target[e] = as[idx].b; if (new_arc == 0) _next_arc = e; new_arc++; } for (; idx < idx_max; ++idx) { addArc(as[idx].a, as[idx].b, as[idx].c); if (new_arc == 0) _next_arc = e; new_arc++; } return new_arc; } int addArcs(const Vars& as) { int new_arc = 0; size_t idx = 0; size_t idx_max = as.size(); for (; idx < idx_max; ++idx) { addArc(as[idx].a, as[idx].b, as[idx].c); if (new_arc == 0) _next_arc = _arc_num; new_arc++; } return new_arc; } int64_t totalCost() const { int64_t c = 0; for (int e = _dummy_arc; e < _arc_num; ++e) if (_source[e] != _root && _target[e] != _root) c += _cost[e] * _flow[e]; return c; } int64_t totalFlow() const { int64_t tot_flow = 0; for (int e = _dummy_arc; e < _arc_num; ++e) if (_source[e] != _root && _target[e] != _root) tot_flow += _flow[e]; return tot_flow; } int64_t dummyFlow() const { int64_t tot_flow = 0; for (int e = 0; e < _dummy_arc; ++e) tot_flow += _flow[e]; return tot_flow; } // Potential of node n int potential(int n) const { return _pi[n]; } // Runtime in milliseconds double runtime() const { return _runtime; } // Number of iterations of simplex algorithms uint64_t iterations() const { return _iterations; } // Set basic parameters void setTimelimit(double t) { _timelimit = t; PRINT("INFO: change <timelimit> to %f\n", t); } void setOptTolerance(double o) { _opt_tolerance = o; PRINT("INFO: change <opt_tolerance> to %f\n", o); } void setVerbosity(std::string v) { _verbosity = v; if (v == DOT_VAL_DEBUG) N_IT_LOG = 100000; if (v == DOT_VAL_INFO) N_IT_LOG = 10000000; if (v == DOT_VAL_SILENT) N_IT_LOG = 0; PRINT("INFO: change <verbosity> to %s\n", v.c_str()); } // Check feasibility ProblemType checkFeasibility() { for (int e = 0; e != _dummy_arc; ++e) if (fabs(_flow[e]) > 0) throw std::runtime_error( "ERROR 3: flow on dummy arcs: " + std::to_string(_flow[e]) + "\n"); return ProblemType::OPTIMAL; } // Reserve memory for arcs in the simplex network void reserveArcMemory(size_t s) { auto o = _source.size(); _source.reserve(o + s); _target.reserve(o + s); _flow.reserve(o + s); _state.reserve(o + s); } // Reserve memory for arcs in the simplex network void resizeArcMemory(int s) { int o = static_cast<int>(_source.size()); _source.resize(o + s, -1); _target.resize(o + s, -1); _flow.resize(o + s, 0); _state.resize(o + s, STATE_LOWER); } private: // Initialize internal data structures bool init() { if (_node_num == 0) return false; // Check the sum of supply values _sum_supply = 0; for (int i = 0; i != _node_num; ++i) _sum_supply += _supply[i]; // Initialize artifical cost int ART_COST; if (std::numeric_limits<int>::is_exact) { ART_COST = (std::numeric_limits<int>::max)() / 2 + 1; } else { ART_COST = 2 * _node_num; } // Set data for the artificial root node _root = _node_num; _parent[_root] = -1; _pred[_root] = -1; _thread[_root] = 0; _rev_thread[0] = _root; _succ_num[_root] = _node_num + 1; _last_succ[_root] = _root - 1; _supply[_root] = -_sum_supply; _pi[_root] = 0; for (int u = 0, e = 0; u != _node_num; ++u, ++e) { _parent[u] = _root; _pred[u] = e; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; _state[e] = STATE_TREE; if (_supply[u] >= 0) { _pred_dir[u] = DIR_UP; _pi[u] = 0; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; } else { _pred_dir[u] = DIR_DOWN; _pi[u] = ART_COST; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = ART_COST; } } return true; } // Find the join node void findJoinNode() { int u = _source[in_arc]; int v = _target[in_arc]; while (u != v) { if (_succ_num[u] < _succ_num[v]) { u = _parent[u]; } else { v = _parent[v]; } } join = u; } // Find the leaving arc of the cycle and returns true if the // leaving arc is not the same as the entering arc bool findLeavingArc() { // Initialize first and second nodes according to the direction // of the cycle int first, second; first = _source[in_arc]; second = _target[in_arc]; delta = MAX; int result = 0; int d; int e; // Search the cycle form the first node to the join node for (int u = first; u != join; u = _parent[u]) { e = _pred[u]; d = _flow[e]; if (_pred_dir[u] == DIR_DOWN) d = INF - d; if (d < delta) { delta = d; u_out = u; result = 1; } } // Search the cycle form the second node to the join node for (int u = second; u != join; u = _parent[u]) { e = _pred[u]; d = _flow[e]; if (_pred_dir[u] == DIR_UP) d = INF - d; if (d <= delta) { delta = d; u_out = u; result = 2; } } if (result == 1) { u_in = first; v_in = second; } else { u_in = second; v_in = first; } return result != 0; } // Change _flow and _state vectors void changeFlow() { // Augment along the cycle if (delta > 0) { _flow[in_arc] += delta; #pragma omp parallel sections num_threads(2) { #pragma omp section { for (int u = _source[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] -= _pred_dir[u] * delta; } } #pragma omp section { for (int u = _target[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _pred_dir[u] * delta; } } } } // Update the state of the entering and leaving arcs _state[in_arc] = STATE_TREE; _state[_pred[u_out]] = STATE_LOWER; } // Update the tree structure void updateTreeStructure() { int old_rev_thread = _rev_thread[u_out]; int old_succ_num = _succ_num[u_out]; int old_last_succ = _last_succ[u_out]; v_out = _parent[u_out]; // Check if u_in and u_out coincide if (u_in == u_out) { // Update _parent, _pred, _pred_dir _parent[u_in] = v_in; _pred[u_in] = in_arc; _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN; // Update _thread and _rev_thread if (_thread[v_in] != u_out) { int after = _thread[old_last_succ]; _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; after = _thread[v_in]; _thread[v_in] = u_out; _rev_thread[u_out] = v_in; _thread[old_last_succ] = after; _rev_thread[after] = old_last_succ; } } else { // Handle the case when old_rev_thread equals to v_in // (it also means that join and v_out coincide) int thread_continue = old_rev_thread == v_in ? _thread[old_last_succ] : _thread[v_in]; // Update _thread and _parent along the stem nodes (i.e. the nodes // between u_in and u_out, whose parent have to be changed) int stem = u_in; // the current stem node int par_stem = v_in; // the new parent of stem int next_stem; // the next stem node int last = _last_succ[u_in]; // the last successor of stem int before, after = _thread[last]; _thread[v_in] = u_in; _dirty_revs.clear(); _dirty_revs.push_back(v_in); while (stem != u_out) { // Insert the next stem node into the thread list next_stem = _parent[stem]; _thread[last] = next_stem; _dirty_revs.push_back(last); // Remove the subtree of stem from the thread list before = _rev_thread[stem]; _thread[before] = after; _rev_thread[after] = before; // Change the parent node and shift stem nodes _parent[stem] = par_stem; par_stem = stem; stem = next_stem; // Update last and after last = _last_succ[stem] == _last_succ[par_stem] ? _rev_thread[par_stem] : _last_succ[stem]; after = _thread[last]; } _parent[u_out] = par_stem; _thread[last] = thread_continue; _rev_thread[thread_continue] = last; _last_succ[u_out] = last; // Remove the subtree of u_out from the thread list except for // the case when old_rev_thread equals to v_in if (old_rev_thread != v_in) { _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; } // Update _rev_thread using the new _thread values for (int i = 0; i != int(_dirty_revs.size()); ++i) { int u = _dirty_revs[i]; _rev_thread[_thread[u]] = u; } // Update _pred, _pred_dir, _last_succ and _succ_num for the // stem nodes from u_out to u_in int tmp_sc = 0, tmp_ls = _last_succ[u_out]; for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) { _pred[u] = _pred[p]; _pred_dir[u] = -_pred_dir[p]; tmp_sc += _succ_num[u] - _succ_num[p]; _succ_num[u] = tmp_sc; _last_succ[p] = tmp_ls; } _pred[u_in] = in_arc; _pred_dir[u_in] = u_in == _source[in_arc] ? DIR_UP : DIR_DOWN; _succ_num[u_in] = old_succ_num; } // Update _last_succ from v_in towards the root int up_limit_out = _last_succ[join] == v_in ? join : -1; int last_succ_out = _last_succ[u_out]; for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) { _last_succ[u] = last_succ_out; } // Update _last_succ from v_out towards the root if (join != old_rev_thread && v_in != old_rev_thread) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = old_rev_thread; } } else if (last_succ_out != old_last_succ) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = last_succ_out; } } // Update _succ_num from v_in to join for (int u = v_in; u != join; u = _parent[u]) { _succ_num[u] += old_succ_num; } // Update _succ_num from v_out to join for (int u = v_out; u != join; u = _parent[u]) { _succ_num[u] -= old_succ_num; } } // Update potentials in the subtree that has been moved void updatePotential() { int sigma = _pi[v_in] - _pi[u_in] - _pred_dir[u_in]; int end = _thread[_last_succ[u_in]]; int cc = 0; if (sigma != 0) for (int u = u_in; u != end; u = _thread[u], cc++) _pi[u] += sigma; fprintf(stdout, "%d\t", cc); } // Execute the algorithm ProblemType start(PivotRule pivot_rule) { // Select the pivot rule implementation switch (pivot_rule) { case PivotRule::BLOCK_SEARCH: return start<BlockSearchPivotRule>(); } return ProblemType::INFEASIBLE; // avoid warning } template <typename PivotRuleImpl> ProblemType start() { auto start_tt = std::chrono::steady_clock::now(); PivotRuleImpl pivot(*this); // Benchmarking // Execute the Network Simplex algorithm while (true) { // auto start_t = std::chrono::steady_clock::now(); bool stop = pivot.findEnteringArc(); // auto end_t = std::chrono::steady_clock::now(); // t1 += double(std::chrono::duration_cast<std::chrono::nanoseconds>(end_t // - // start_t) // .count()) / // 1000000000; if (!stop) break; // start_t = std::chrono::steady_clock::now(); findJoinNode(); // end_t = std::chrono::steady_clock::now(); // t2 += double(std::chrono::duration_cast<std::chrono::nanoseconds>(end_t // - // start_t) // .count()) / // 1000000000; // start_t = std::chrono::steady_clock::now(); findLeavingArc(); // end_t = std::chrono::steady_clock::now(); // t3 += double(std::chrono::duration_cast<std::chrono::nanoseconds>(end_t // - // start_t) // .count()) / // 1000000000; // start_t = std::chrono::steady_clock::now(); changeFlow(); // end_t = std::chrono::steady_clock::now(); // t4 += double(std::chrono::duration_cast<std::chrono::nanoseconds>(end_t // - // start_t) // .count()) / // 1000000000; // start_t = std::chrono::steady_clock::now(); updateTreeStructure(); // end_t = std::chrono::steady_clock::now(); // t5 += double(std::chrono::duration_cast<std::chrono::nanoseconds>(end_t // - // start_t) // .count()) / // 1000000000; // start_t = std::chrono::steady_clock::now(); updatePotential(); // end_t = std::chrono::steady_clock::now(); // t6 += double(std::chrono::duration_cast<std::chrono::nanoseconds>(end_t // - // start_t) // .count()) / // 1000000000; // Add as log file _iterations++; //if (N_IT_LOG > 0) { // if (_iterations % N_IT_LOG == 0) { // auto end_t = std::chrono::steady_clock::now(); // double tot = // double(std::chrono::duration_cast<std::chrono::nanoseconds>( // end_t - start_tt) // .count()) / // 1000000000; // if (tot > _timelimit) // return ProblemType::TIMELIMIT; // if (_verbosity == DOT_VAL_DEBUG) // PRINT("NetSIMPLEX inner loop | it: %ld, distance: %.4f, runtime: " // "%.4f\n", // _iterations, totalCost(), tot); // } //} } auto end_t = std::chrono::steady_clock::now(); _runtime += double(std::chrono::duration_cast<std::chrono::milliseconds>( end_t - start_tt) .count()) / 1000; if (_verbosity == DOT_VAL_DEBUG) PRINT("NetSIMPLEX outer loop | enter: %.3f, join: %.3f, leave: %.3f, " "change: %.3f, tree: %.3f, " "potential: %.3f, runtime: %.3f\n", t1, t2, t3, t4, t5, t6, _runtime); return ProblemType::OPTIMAL; } }; } // namespace DOT
broadcast_reduce-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015-2017 by Contributors * \file broadcast_reduce-inl.h * \brief CPU-specific Function definition of broadcast and reduce operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" #include "../operator_common.h" namespace mxnet { namespace op { namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } template<int ndim> MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) { *j = 0; *k = 0; #pragma unroll for (index_t i = ndim-1, idx_t = idx; i >=0; --i) { const auto tmp = idx_t / shape[i]; const auto coord = idx_t - tmp*shape[i]; *j += coord*stridej[i]; *k += coord*stridek[i]; idx_t = tmp; } } template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (index_t i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > 1) * coord[i]; } return ret; } template<int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } #pragma unroll for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) ret += coord[i] * stride[i]; return ret; } template<typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template<int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = unravel(idx, oshape); const index_t j = ravel(coord, lshape); const index_t k = ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, OType *small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord = unravel(idx, sshape); index_t j = ravel(coord, bshape); AType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { coord = unravel(k, rshape); Reducer::Reduce(val, AType(OP::Map(big[j + dot(coord, rstride)])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, OType(val)); } #ifdef __CUDACC__ #include "broadcast_reduce-inl.cuh" #else template<int ndim, typename DType, typename OP> void binary_broadcast_compute(const size_t N, const bool addto, const DType *lhs, const DType *rhs, DType *out, const Shape<ndim> lshape, const Shape<ndim> rshape, const Shape<ndim> oshape) { for (size_t idx = 0; idx < N; ++idx) { binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape); } } template<int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { if (req == kNullOp) return; size_t N = out.shape_.Size(); binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(), out.shape_.get<ndim>()); } template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType *big, OType *small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP>(idx, M, addto, big, small, bshape, sshape, rshape, rstride); } } template <typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto, const DType* big, DType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride, const index_t* ws_dptr) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { Shape<ndim> coord = unravel(idx, sshape); index_t j = ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual); } assign(&small[idx], addto, val); } } template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); if (!safe_acc) { seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } else { // TODO(haojin2): Use real-only type swtich for windows temporarily due to CI issues. #ifndef _WIN32 MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>( N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); }); }); #else MXNET_REAL_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>( N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); }); }); #endif } } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { using namespace mxnet_op; if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_); size_t N = small.shape_.Size(), M = rshape.Size(); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t k = 0; k < static_cast<index_t>(M); k++) { Shape<ndim> coord = unravel(k, rshape); ws_dptr[k] = dot(coord, rstride); } seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, ws_dptr); } template<int ndim, typename DType> size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big) { return 0; } template<int ndim, typename DType> size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const mxnet::TShape& lhs, const mxnet::TShape& rhs) { return 0; } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType *small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = unravel(idx, small_shape); const index_t idx_big0 = ravel(coord, big_shape); const index_t idx_lhs0 = ravel(coord, lhs_shape0); const index_t idx_rhs0 = ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Shape<ndim> coord_big = unravel(k, rshape); index_t idx_big = idx_big0 + dot(coord_big, rstride); Shape<ndim> coord_lhs = unravel(k, lhs_shape); index_t idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = unravel(k, rhs_shape); index_t idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType *big, const DType *lhs, const DType *rhs, DType *small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(); size_t M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>( N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
GB_binop__isle_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_01__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32) // A*D function (colscale): GB (_AxD__isle_int32) // D*A function (rowscale): GB (_DxB__isle_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32) // C=scalar+B GB (_bind1st__isle_int32) // C=scalar+B' GB (_bind1st_tran__isle_int32) // C=A+scalar GB (_bind2nd__isle_int32) // C=A'+scalar GB (_bind2nd_tran__isle_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Ooura_FFT.h
#ifndef _H_OOURA_FFT_ #define _H_OOURA_FFT_ #include <cmath> class Ooura_FFT{ private: int frame_size; int channels; double **a, **w; int **ip; public: inline Ooura_FFT(int _frame_size, int _channels); inline ~Ooura_FFT(); inline void FFT(double **); inline void FFT(double **, int tagret_channels); inline void iFFT(double **); inline void FFT(double *); inline void iFFT(double *); inline void SingleFFT(double *); inline void SingleiFFT(double *); }; /* Copyright: Copyright(C) 1996-2001 Takuya OOURA email: [email protected] download: http://momonga.t.u-tokyo.ac.jp/~ooura/fft.html You may use, copy, modify this code for any purpose and without fee. You may distribute this ORIGINAL package. Fast Fourier/Cosine/Sine Transform dimension :one data length :power of 2 decimation :frequency radix :4, 2 data :inplace table :use functions cdft: Complex Discrete Fourier Transform rdft: Real Discrete Fourier Transform ddct: Discrete Cosine Transform ddst: Discrete Sine Transform dfct: Cosine Transform of RDFT (Real Symmetric DFT) dfst: Sine Transform of RDFT (Real Anti-symmetric DFT) function prototypes void cdft(int, int, double *, int *, double *); void rdft(int, int, double *, int *, double *); void ddct(int, int, double *, int *, double *); void ddst(int, int, double *, int *, double *); void dfct(int, double *, double *, int *, double *); void dfst(int, double *, double *, int *, double *); -------- Complex DFT (Discrete Fourier Transform) -------- [definition] <case1> X[k] = sum_j=0^n-1 x[j]*exp(2*pi*i*j*k/n), 0<=k<n <case2> X[k] = sum_j=0^n-1 x[j]*exp(-2*pi*i*j*k/n), 0<=k<n (notes: sum_j=0^n-1 is a summation from j=0 to n-1) [usage] <case1> ip[0] = 0; // first time only cdft(2*n, 1, a, ip, w); <case2> ip[0] = 0; // first time only cdft(2*n, -1, a, ip, w); [parameters] 2*n :data length (int) n >= 1, n = power of 2 a[0...2*n-1] :input/output data (double *) input data a[2*j] = Re(x[j]), a[2*j+1] = Im(x[j]), 0<=j<n output data a[2*k] = Re(X[k]), a[2*k+1] = Im(X[k]), 0<=k<n ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n) strictly, length of ip >= 2+(1<<(int)(log(n+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n/2-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of cdft(2*n, -1, a, ip, w); is cdft(2*n, 1, a, ip, w); for (j = 0; j <= 2 * n - 1; j++) { a[j] *= 1.0 / n; } . -------- Real DFT / Inverse of Real DFT -------- [definition] <case1> RDFT R[k] = sum_j=0^n-1 a[j]*cos(2*pi*j*k/n), 0<=k<=n/2 I[k] = sum_j=0^n-1 a[j]*sin(2*pi*j*k/n), 0<k<n/2 <case2> IRDFT (excluding scale) a[k] = (R[0] + R[n/2]*cos(pi*k))/2 + sum_j=1^n/2-1 R[j]*cos(2*pi*j*k/n) + sum_j=1^n/2-1 I[j]*sin(2*pi*j*k/n), 0<=k<n [usage] <case1> ip[0] = 0; // first time only rdft(n, 1, a, ip, w); <case2> ip[0] = 0; // first time only rdft(n, -1, a, ip, w); [parameters] n :data length (int) n >= 2, n = power of 2 a[0...n-1] :input/output data (double *) <case1> output data a[2*k] = R[k], 0<=k<n/2 a[2*k+1] = I[k], 0<k<n/2 a[1] = R[n/2] <case2> input data a[2*j] = R[j], 0<=j<n/2 a[2*j+1] = I[j], 0<j<n/2 a[1] = R[n/2] ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/2) strictly, length of ip >= 2+(1<<(int)(log(n/2+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n/2-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of rdft(n, 1, a, ip, w); is rdft(n, -1, a, ip, w); for (j = 0; j <= n - 1; j++) { a[j] *= 2.0 / n; } . -------- DCT (Discrete Cosine Transform) / Inverse of DCT -------- [definition] <case1> IDCT (excluding scale) C[k] = sum_j=0^n-1 a[j]*cos(pi*j*(k+1/2)/n), 0<=k<n <case2> DCT C[k] = sum_j=0^n-1 a[j]*cos(pi*(j+1/2)*k/n), 0<=k<n [usage] <case1> ip[0] = 0; // first time only ddct(n, 1, a, ip, w); <case2> ip[0] = 0; // first time only ddct(n, -1, a, ip, w); [parameters] n :data length (int) n >= 2, n = power of 2 a[0...n-1] :input/output data (double *) output data a[k] = C[k], 0<=k<n ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/2) strictly, length of ip >= 2+(1<<(int)(log(n/2+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n*5/4-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of ddct(n, -1, a, ip, w); is a[0] *= 0.5; ddct(n, 1, a, ip, w); for (j = 0; j <= n - 1; j++) { a[j] *= 2.0 / n; } . -------- DST (Discrete Sine Transform) / Inverse of DST -------- [definition] <case1> IDST (excluding scale) S[k] = sum_j=1^n A[j]*sin(pi*j*(k+1/2)/n), 0<=k<n <case2> DST S[k] = sum_j=0^n-1 a[j]*sin(pi*(j+1/2)*k/n), 0<k<=n [usage] <case1> ip[0] = 0; // first time only ddst(n, 1, a, ip, w); <case2> ip[0] = 0; // first time only ddst(n, -1, a, ip, w); [parameters] n :data length (int) n >= 2, n = power of 2 a[0...n-1] :input/output data (double *) <case1> input data a[j] = A[j], 0<j<n a[0] = A[n] output data a[k] = S[k], 0<=k<n <case2> output data a[k] = S[k], 0<k<n a[0] = S[n] ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/2) strictly, length of ip >= 2+(1<<(int)(log(n/2+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n*5/4-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of ddst(n, -1, a, ip, w); is a[0] *= 0.5; ddst(n, 1, a, ip, w); for (j = 0; j <= n - 1; j++) { a[j] *= 2.0 / n; } . -------- Cosine Transform of RDFT (Real Symmetric DFT) -------- [definition] C[k] = sum_j=0^n a[j]*cos(pi*j*k/n), 0<=k<=n [usage] ip[0] = 0; // first time only dfct(n, a, t, ip, w); [parameters] n :data length - 1 (int) n >= 2, n = power of 2 a[0...n] :input/output data (double *) output data a[k] = C[k], 0<=k<=n t[0...n/2] :work area (double *) ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/4) strictly, length of ip >= 2+(1<<(int)(log(n/4+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n*5/8-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of a[0] *= 0.5; a[n] *= 0.5; dfct(n, a, t, ip, w); is a[0] *= 0.5; a[n] *= 0.5; dfct(n, a, t, ip, w); for (j = 0; j <= n; j++) { a[j] *= 2.0 / n; } . -------- Sine Transform of RDFT (Real Anti-symmetric DFT) -------- [definition] S[k] = sum_j=1^n-1 a[j]*sin(pi*j*k/n), 0<k<n [usage] ip[0] = 0; // first time only dfst(n, a, t, ip, w); [parameters] n :data length + 1 (int) n >= 2, n = power of 2 a[0...n-1] :input/output data (double *) output data a[k] = S[k], 0<k<n (a[0] is used for work area) t[0...n/2-1] :work area (double *) ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/4) strictly, length of ip >= 2+(1<<(int)(log(n/4+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n*5/8-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of dfst(n, a, t, ip, w); is dfst(n, a, t, ip, w); for (j = 1; j <= n - 1; j++) { a[j] *= 2.0 / n; } . Appendix : The cos/sin table is recalculated when the larger table required. w[] and ip[] are compatible with all routines. */ inline void cdft(int, int, double *, int *, double *); inline void rdft(int, int, double *, int *, double *); inline void ddct(int, int, double *, int *, double *); inline void ddst(int, int, double *, int *, double *); inline void dfct(int, double *, double *, int *, double *); inline void dfst(int, double *, double *, int *, double *); inline void makewt(int nw, int* ip, double* w); inline void makect(int nc, int* ip, double* c); inline void bitrv2(int n, int* ip, double* a); inline void cftfsub(int n, double* a, double* w); inline void cftbsub(int n, double* a, double* w); inline void rftfsub(int n, double* a, int nc, double* c); inline void rftbsub(int n, double* a, int nc, double* c); inline void cft1st(int n, double* a, double* w); inline void cftmdl(int n, int l, double* a, double* w); inline Ooura_FFT::Ooura_FFT(int _frame_size, int _channels){ frame_size = _frame_size; channels = _channels; a = new double *[channels]; for (int i = 0; i < channels; i++) a[i] = new double[frame_size]; w = new double *[channels]; for (int i = 0; i < channels; i++) w[i] = new double[frame_size]; ip = new int *[channels]; for (int i = 0; i < channels; i++) ip[i] = new int[(int)(sqrt(frame_size / 2)) + 1]; } inline Ooura_FFT::~Ooura_FFT() { for (int i = 0; i < channels; i++) { delete[] a[i]; delete[] w[i]; delete[] ip[i]; } delete[] a; delete[] w; delete[] ip; } inline void Ooura_FFT::FFT(double **data) { int j; #pragma omp parallel for for (j = 0; j < channels; j++) { double *t; t = data[j]; ip[j][0] = 0; for (int i = 0; i < frame_size; i++) a[j][i] = t[i]; rdft(frame_size, 1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i += 2) { t[i] = a[j][i]; t[i + 1] = -a[j][i + 1]; } t[1] = 0; t[frame_size] = a[j][1]; t[frame_size + 1] = 0; } } void Ooura_FFT::FFT(double ** data, int target_channels){ int j; #pragma omp parallel for for (j = 0; j < target_channels; j++) { double *t; t = data[j]; ip[j][0] = 0; for (int i = 0; i < frame_size; i++) a[j][i] = t[i]; rdft(frame_size, 1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i += 2) { t[i] = a[j][i]; t[i + 1] = -a[j][i + 1]; } t[1] = 0; t[frame_size] = a[j][1]; t[frame_size + 1] = 0; } } inline void Ooura_FFT::FFT(double *data) { int j; #pragma omp parallel for for (j = 0; j < channels; j++) { double *t; t = &data[j*(frame_size+2)]; ip[j][0] = 0; for (int i = 0; i < frame_size; i++) a[j][i] = t[i]; rdft(frame_size, 1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i += 2) { t[i] = a[j][i]; t[i + 1] = -a[j][i + 1]; } t[1] = 0; t[frame_size] = a[j][1]; t[frame_size + 1] = 0; } } inline void Ooura_FFT::iFFT(double **data) { int j; #pragma omp parallel for for (j = 0; j < channels; j++) { double *t; t = data[j]; ip[j][0] = 0; for (int i = 0; i < frame_size; i += 2) { a[j][i] = t[i]; a[j][i + 1] = -t[i + 1]; } a[j][1] = t[frame_size]; rdft(frame_size, -1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i++) { a[j][i] *= 2.0; a[j][i] /= frame_size; } for (int i = 0; i < frame_size; i++) { t[i] = a[j][i]; } } } inline void Ooura_FFT::iFFT(double *data) { int j; #pragma omp parallel for for (j = 0; j < channels; j++) { double *t; t = &data[j*(frame_size+2)]; ip[j][0] = 0; for (int i = 0; i < frame_size; i += 2) { a[j][i] = t[i]; a[j][i + 1] = -t[i + 1]; } a[j][1] = t[frame_size]; rdft(frame_size, -1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i++) { a[j][i] *= 2.0; a[j][i] /= frame_size; } for (int i = 0; i < frame_size; i++) { t[i] = a[j][i]; } } } inline void Ooura_FFT::SingleFFT(double *data) { int i; ip[0][0] = 0; for (i = 0; i < frame_size; i++) a[0][i] = data[i]; rdft(frame_size, 1, a[0], ip[0], w[0]); for (i = 0; i < frame_size; i += 2) { data[i] = a[0][i]; data[i + 1] = -a[0][i + 1]; } data[1] = 0; data[frame_size] = a[0][1]; data[frame_size + 1] = 0; } inline void Ooura_FFT::SingleiFFT(double *data) { int i; ip[0][0] = 0; for (i = 0; i < frame_size; i += 2) { a[0][i] = data[i]; a[0][i + 1] = -data[i + 1]; } a[0][1] = data[frame_size]; rdft(frame_size, -1, a[0], ip[0], w[0]); for (i = 0; i < frame_size; i++) { a[0][i] *= 2.0; a[0][i] /= frame_size; } for (i = 0; i < frame_size; i++) { data[i] = a[0][i]; } } inline void cdft(int n, int isgn, double *a, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void bitrv2(int n, int *ip, double *a); void bitrv2conj(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void cftbsub(int n, double *a, double *w); if (n > (ip[0] << 2)) { makewt(n >> 2, ip, w); } if (n > 4) { if (isgn >= 0) { bitrv2(n, ip + 2, a); cftfsub(n, a, w); } else { bitrv2conj(n, ip + 2, a); cftbsub(n, a, w); } } else if (n == 4) { cftfsub(n, a, w); } } inline void rdft(int n, int isgn, double *a, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void cftbsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void rftbsub(int n, double *a, int nc, double *c); int nw, nc; double xi; nw = ip[0]; if (n > (nw << 2)) { nw = n >> 2; makewt(nw, ip, w); } nc = ip[1]; if (n > (nc << 2)) { nc = n >> 2; makect(nc, ip, w + nw); } if (isgn >= 0) { if (n > 4) { bitrv2(n, ip + 2, a); cftfsub(n, a, w); rftfsub(n, a, nc, w + nw); } else if (n == 4) { cftfsub(n, a, w); } xi = a[0] - a[1]; a[0] += a[1]; a[1] = xi; } else { a[1] = 0.5 * (a[0] - a[1]); a[0] -= a[1]; if (n > 4) { rftbsub(n, a, nc, w + nw); bitrv2(n, ip + 2, a); cftbsub(n, a, w); } else if (n == 4) { cftfsub(n, a, w); } } } inline void ddct(int n, int isgn, double *a, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void cftbsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void rftbsub(int n, double *a, int nc, double *c); void dctsub(int n, double *a, int nc, double *c); int j, nw, nc; double xr; nw = ip[0]; if (n > (nw << 2)) { nw = n >> 2; makewt(nw, ip, w); } nc = ip[1]; if (n > nc) { nc = n; makect(nc, ip, w + nw); } if (isgn < 0) { xr = a[n - 1]; for (j = n - 2; j >= 2; j -= 2) { a[j + 1] = a[j] - a[j - 1]; a[j] += a[j - 1]; } a[1] = a[0] - xr; a[0] += xr; if (n > 4) { rftbsub(n, a, nc, w + nw); bitrv2(n, ip + 2, a); cftbsub(n, a, w); } else if (n == 4) { cftfsub(n, a, w); } } dctsub(n, a, nc, w + nw); if (isgn >= 0) { if (n > 4) { bitrv2(n, ip + 2, a); cftfsub(n, a, w); rftfsub(n, a, nc, w + nw); } else if (n == 4) { cftfsub(n, a, w); } xr = a[0] - a[1]; a[0] += a[1]; for (j = 2; j < n; j += 2) { a[j - 1] = a[j] - a[j + 1]; a[j] += a[j + 1]; } a[n - 1] = xr; } } inline void ddst(int n, int isgn, double *a, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void cftbsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void rftbsub(int n, double *a, int nc, double *c); void dstsub(int n, double *a, int nc, double *c); int j, nw, nc; double xr; nw = ip[0]; if (n > (nw << 2)) { nw = n >> 2; makewt(nw, ip, w); } nc = ip[1]; if (n > nc) { nc = n; makect(nc, ip, w + nw); } if (isgn < 0) { xr = a[n - 1]; for (j = n - 2; j >= 2; j -= 2) { a[j + 1] = -a[j] - a[j - 1]; a[j] -= a[j - 1]; } a[1] = a[0] + xr; a[0] -= xr; if (n > 4) { rftbsub(n, a, nc, w + nw); bitrv2(n, ip + 2, a); cftbsub(n, a, w); } else if (n == 4) { cftfsub(n, a, w); } } dstsub(n, a, nc, w + nw); if (isgn >= 0) { if (n > 4) { bitrv2(n, ip + 2, a); cftfsub(n, a, w); rftfsub(n, a, nc, w + nw); } else if (n == 4) { cftfsub(n, a, w); } xr = a[0] - a[1]; a[0] += a[1]; for (j = 2; j < n; j += 2) { a[j - 1] = -a[j] - a[j + 1]; a[j] -= a[j + 1]; } a[n - 1] = -xr; } } inline void dfct(int n, double *a, double *t, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void dctsub(int n, double *a, int nc, double *c); int j, k, l, m, mh, nw, nc; double xr, xi, yr, yi; nw = ip[0]; if (n > (nw << 3)) { nw = n >> 3; makewt(nw, ip, w); } nc = ip[1]; if (n > (nc << 1)) { nc = n >> 1; makect(nc, ip, w + nw); } m = n >> 1; yi = a[m]; xi = a[0] + a[n]; a[0] -= a[n]; t[0] = xi - yi; t[m] = xi + yi; if (n > 2) { mh = m >> 1; for (j = 1; j < mh; j++) { k = m - j; xr = a[j] - a[n - j]; xi = a[j] + a[n - j]; yr = a[k] - a[n - k]; yi = a[k] + a[n - k]; a[j] = xr; a[k] = yr; t[j] = xi - yi; t[k] = xi + yi; } t[mh] = a[mh] + a[n - mh]; a[mh] -= a[n - mh]; dctsub(m, a, nc, w + nw); if (m > 4) { bitrv2(m, ip + 2, a); cftfsub(m, a, w); rftfsub(m, a, nc, w + nw); } else if (m == 4) { cftfsub(m, a, w); } a[n - 1] = a[0] - a[1]; a[1] = a[0] + a[1]; for (j = m - 2; j >= 2; j -= 2) { a[2 * j + 1] = a[j] + a[j + 1]; a[2 * j - 1] = a[j] - a[j + 1]; } l = 2; m = mh; while (m >= 2) { dctsub(m, t, nc, w + nw); if (m > 4) { bitrv2(m, ip + 2, t); cftfsub(m, t, w); rftfsub(m, t, nc, w + nw); } else if (m == 4) { cftfsub(m, t, w); } a[n - l] = t[0] - t[1]; a[l] = t[0] + t[1]; k = 0; for (j = 2; j < m; j += 2) { k += l << 2; a[k - l] = t[j] - t[j + 1]; a[k + l] = t[j] + t[j + 1]; } l <<= 1; mh = m >> 1; for (j = 0; j < mh; j++) { k = m - j; t[j] = t[m + k] - t[m + j]; t[k] = t[m + k] + t[m + j]; } t[mh] = t[m + mh]; m = mh; } a[l] = t[0]; a[n] = t[2] - t[1]; a[0] = t[2] + t[1]; } else { a[1] = a[0]; a[2] = t[0]; a[0] = t[1]; } } inline void dfst(int n, double *a, double *t, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void dstsub(int n, double *a, int nc, double *c); int j, k, l, m, mh, nw, nc; double xr, xi, yr, yi; nw = ip[0]; if (n > (nw << 3)) { nw = n >> 3; makewt(nw, ip, w); } nc = ip[1]; if (n > (nc << 1)) { nc = n >> 1; makect(nc, ip, w + nw); } if (n > 2) { m = n >> 1; mh = m >> 1; for (j = 1; j < mh; j++) { k = m - j; xr = a[j] + a[n - j]; xi = a[j] - a[n - j]; yr = a[k] + a[n - k]; yi = a[k] - a[n - k]; a[j] = xr; a[k] = yr; t[j] = xi + yi; t[k] = xi - yi; } t[0] = a[mh] - a[n - mh]; a[mh] += a[n - mh]; a[0] = a[m]; dstsub(m, a, nc, w + nw); if (m > 4) { bitrv2(m, ip + 2, a); cftfsub(m, a, w); rftfsub(m, a, nc, w + nw); } else if (m == 4) { cftfsub(m, a, w); } a[n - 1] = a[1] - a[0]; a[1] = a[0] + a[1]; for (j = m - 2; j >= 2; j -= 2) { a[2 * j + 1] = a[j] - a[j + 1]; a[2 * j - 1] = -a[j] - a[j + 1]; } l = 2; m = mh; while (m >= 2) { dstsub(m, t, nc, w + nw); if (m > 4) { bitrv2(m, ip + 2, t); cftfsub(m, t, w); rftfsub(m, t, nc, w + nw); } else if (m == 4) { cftfsub(m, t, w); } a[n - l] = t[1] - t[0]; a[l] = t[0] + t[1]; k = 0; for (j = 2; j < m; j += 2) { k += l << 2; a[k - l] = -t[j] - t[j + 1]; a[k + l] = t[j] - t[j + 1]; } l <<= 1; mh = m >> 1; for (j = 1; j < mh; j++) { k = m - j; t[j] = t[m + k] + t[m + j]; t[k] = t[m + k] - t[m + j]; } t[0] = t[m + mh]; m = mh; } a[l] = t[0]; } a[0] = 0; } /* -------- initializing routines -------- */ inline void makewt(int nw, int *ip, double *w) { void bitrv2(int n, int *ip, double *a); int j, nwh; double delta, x, y; ip[0] = nw; ip[1] = 1; if (nw > 2) { nwh = nw >> 1; delta = atan(1.0) / nwh; w[0] = 1; w[1] = 0; w[nwh] = cos(delta * nwh); w[nwh + 1] = w[nwh]; if (nwh > 2) { for (j = 2; j < nwh; j += 2) { x = cos(delta * j); y = sin(delta * j); w[j] = x; w[j + 1] = y; w[nw - j] = y; w[nw - j + 1] = x; } bitrv2(nw, ip + 2, w); } } } inline void makect(int nc, int *ip, double *c) { int j, nch; double delta; ip[1] = nc; if (nc > 1) { nch = nc >> 1; delta = atan(1.0) / nch; c[0] = cos(delta * nch); c[nch] = 0.5 * c[0]; for (j = 1; j < nch; j++) { c[j] = 0.5 * cos(delta * j); c[nc - j] = 0.5 * sin(delta * j); } } } /* -------- child routines -------- */ inline void bitrv2(int n, int *ip, double *a) { int j, j1, k, k1, l, m, m2; double xr, xi, yr, yi; ip[0] = 0; l = n; m = 1; while ((m << 3) < l) { l >>= 1; for (j = 0; j < m; j++) { ip[m + j] = ip[j] + l; } m <<= 1; } m2 = 2 * m; if ((m << 3) == l) { for (k = 0; k < m; k++) { for (j = 0; j < k; j++) { j1 = 2 * j + ip[k]; k1 = 2 * k + ip[j]; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += 2 * m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 -= m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += 2 * m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } j1 = 2 * k + m2 + ip[k]; k1 = j1 + m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } } else { for (k = 1; k < m; k++) { for (j = 0; j < k; j++) { j1 = 2 * j + ip[k]; k1 = 2 * k + ip[j]; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } } } } inline void bitrv2conj(int n, int *ip, double *a) { int j, j1, k, k1, l, m, m2; double xr, xi, yr, yi; ip[0] = 0; l = n; m = 1; while ((m << 3) < l) { l >>= 1; for (j = 0; j < m; j++) { ip[m + j] = ip[j] + l; } m <<= 1; } m2 = 2 * m; if ((m << 3) == l) { for (k = 0; k < m; k++) { for (j = 0; j < k; j++) { j1 = 2 * j + ip[k]; k1 = 2 * k + ip[j]; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += 2 * m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 -= m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += 2 * m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } k1 = 2 * k + ip[k]; a[k1 + 1] = -a[k1 + 1]; j1 = k1 + m2; k1 = j1 + m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; k1 += m2; a[k1 + 1] = -a[k1 + 1]; } } else { a[1] = -a[1]; a[m2 + 1] = -a[m2 + 1]; for (k = 1; k < m; k++) { for (j = 0; j < k; j++) { j1 = 2 * j + ip[k]; k1 = 2 * k + ip[j]; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } k1 = 2 * k + ip[k]; a[k1 + 1] = -a[k1 + 1]; a[k1 + m2 + 1] = -a[k1 + m2 + 1]; } } } inline void cftfsub(int n, double *a, double *w) { void cft1st(int n, double *a, double *w); void cftmdl(int n, int l, double *a, double *w); int j, j1, j2, j3, l; double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; l = 2; if (n > 8) { cft1st(n, a, w); l = 8; while ((l << 2) < n) { cftmdl(n, l, a, w); l <<= 2; } } if ((l << 2) == n) { for (j = 0; j < l; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; a[j2] = x0r - x2r; a[j2 + 1] = x0i - x2i; a[j1] = x1r - x3i; a[j1 + 1] = x1i + x3r; a[j3] = x1r + x3i; a[j3 + 1] = x1i - x3r; } } else { for (j = 0; j < l; j += 2) { j1 = j + l; x0r = a[j] - a[j1]; x0i = a[j + 1] - a[j1 + 1]; a[j] += a[j1]; a[j + 1] += a[j1 + 1]; a[j1] = x0r; a[j1 + 1] = x0i; } } } inline void cftbsub(int n, double *a, double *w) { void cft1st(int n, double *a, double *w); void cftmdl(int n, int l, double *a, double *w); int j, j1, j2, j3, l; double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; l = 2; if (n > 8) { cft1st(n, a, w); l = 8; while ((l << 2) < n) { cftmdl(n, l, a, w); l <<= 2; } } if ((l << 2) == n) { for (j = 0; j < l; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = -a[j + 1] - a[j1 + 1]; x1r = a[j] - a[j1]; x1i = -a[j + 1] + a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i - x2i; a[j2] = x0r - x2r; a[j2 + 1] = x0i + x2i; a[j1] = x1r - x3i; a[j1 + 1] = x1i - x3r; a[j3] = x1r + x3i; a[j3 + 1] = x1i + x3r; } } else { for (j = 0; j < l; j += 2) { j1 = j + l; x0r = a[j] - a[j1]; x0i = -a[j + 1] + a[j1 + 1]; a[j] += a[j1]; a[j + 1] = -a[j + 1] - a[j1 + 1]; a[j1] = x0r; a[j1 + 1] = x0i; } } } inline void cft1st(int n, double *a, double *w) { int j, k1, k2; double wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; x0r = a[0] + a[2]; x0i = a[1] + a[3]; x1r = a[0] - a[2]; x1i = a[1] - a[3]; x2r = a[4] + a[6]; x2i = a[5] + a[7]; x3r = a[4] - a[6]; x3i = a[5] - a[7]; a[0] = x0r + x2r; a[1] = x0i + x2i; a[4] = x0r - x2r; a[5] = x0i - x2i; a[2] = x1r - x3i; a[3] = x1i + x3r; a[6] = x1r + x3i; a[7] = x1i - x3r; wk1r = w[2]; x0r = a[8] + a[10]; x0i = a[9] + a[11]; x1r = a[8] - a[10]; x1i = a[9] - a[11]; x2r = a[12] + a[14]; x2i = a[13] + a[15]; x3r = a[12] - a[14]; x3i = a[13] - a[15]; a[8] = x0r + x2r; a[9] = x0i + x2i; a[12] = x2i - x0i; a[13] = x0r - x2r; x0r = x1r - x3i; x0i = x1i + x3r; a[10] = wk1r * (x0r - x0i); a[11] = wk1r * (x0r + x0i); x0r = x3i + x1r; x0i = x3r - x1i; a[14] = wk1r * (x0i - x0r); a[15] = wk1r * (x0i + x0r); k1 = 0; for (j = 16; j < n; j += 16) { k1 += 2; k2 = 2 * k1; wk2r = w[k1]; wk2i = w[k1 + 1]; wk1r = w[k2]; wk1i = w[k2 + 1]; wk3r = wk1r - 2 * wk2i * wk1i; wk3i = 2 * wk2i * wk1r - wk1i; x0r = a[j] + a[j + 2]; x0i = a[j + 1] + a[j + 3]; x1r = a[j] - a[j + 2]; x1i = a[j + 1] - a[j + 3]; x2r = a[j + 4] + a[j + 6]; x2i = a[j + 5] + a[j + 7]; x3r = a[j + 4] - a[j + 6]; x3i = a[j + 5] - a[j + 7]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; x0r -= x2r; x0i -= x2i; a[j + 4] = wk2r * x0r - wk2i * x0i; a[j + 5] = wk2r * x0i + wk2i * x0r; x0r = x1r - x3i; x0i = x1i + x3r; a[j + 2] = wk1r * x0r - wk1i * x0i; a[j + 3] = wk1r * x0i + wk1i * x0r; x0r = x1r + x3i; x0i = x1i - x3r; a[j + 6] = wk3r * x0r - wk3i * x0i; a[j + 7] = wk3r * x0i + wk3i * x0r; wk1r = w[k2 + 2]; wk1i = w[k2 + 3]; wk3r = wk1r - 2 * wk2r * wk1i; wk3i = 2 * wk2r * wk1r - wk1i; x0r = a[j + 8] + a[j + 10]; x0i = a[j + 9] + a[j + 11]; x1r = a[j + 8] - a[j + 10]; x1i = a[j + 9] - a[j + 11]; x2r = a[j + 12] + a[j + 14]; x2i = a[j + 13] + a[j + 15]; x3r = a[j + 12] - a[j + 14]; x3i = a[j + 13] - a[j + 15]; a[j + 8] = x0r + x2r; a[j + 9] = x0i + x2i; x0r -= x2r; x0i -= x2i; a[j + 12] = -wk2i * x0r - wk2r * x0i; a[j + 13] = -wk2i * x0i + wk2r * x0r; x0r = x1r - x3i; x0i = x1i + x3r; a[j + 10] = wk1r * x0r - wk1i * x0i; a[j + 11] = wk1r * x0i + wk1i * x0r; x0r = x1r + x3i; x0i = x1i - x3r; a[j + 14] = wk3r * x0r - wk3i * x0i; a[j + 15] = wk3r * x0i + wk3i * x0r; } } inline void cftmdl(int n, int l, double *a, double *w) { int j, j1, j2, j3, k, k1, k2, m, m2; double wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; m = l << 2; for (j = 0; j < l; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; a[j2] = x0r - x2r; a[j2 + 1] = x0i - x2i; a[j1] = x1r - x3i; a[j1 + 1] = x1i + x3r; a[j3] = x1r + x3i; a[j3 + 1] = x1i - x3r; } wk1r = w[2]; for (j = m; j < l + m; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; a[j2] = x2i - x0i; a[j2 + 1] = x0r - x2r; x0r = x1r - x3i; x0i = x1i + x3r; a[j1] = wk1r * (x0r - x0i); a[j1 + 1] = wk1r * (x0r + x0i); x0r = x3i + x1r; x0i = x3r - x1i; a[j3] = wk1r * (x0i - x0r); a[j3 + 1] = wk1r * (x0i + x0r); } k1 = 0; m2 = 2 * m; for (k = m2; k < n; k += m2) { k1 += 2; k2 = 2 * k1; wk2r = w[k1]; wk2i = w[k1 + 1]; wk1r = w[k2]; wk1i = w[k2 + 1]; wk3r = wk1r - 2 * wk2i * wk1i; wk3i = 2 * wk2i * wk1r - wk1i; for (j = k; j < l + k; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; x0r -= x2r; x0i -= x2i; a[j2] = wk2r * x0r - wk2i * x0i; a[j2 + 1] = wk2r * x0i + wk2i * x0r; x0r = x1r - x3i; x0i = x1i + x3r; a[j1] = wk1r * x0r - wk1i * x0i; a[j1 + 1] = wk1r * x0i + wk1i * x0r; x0r = x1r + x3i; x0i = x1i - x3r; a[j3] = wk3r * x0r - wk3i * x0i; a[j3 + 1] = wk3r * x0i + wk3i * x0r; } wk1r = w[k2 + 2]; wk1i = w[k2 + 3]; wk3r = wk1r - 2 * wk2r * wk1i; wk3i = 2 * wk2r * wk1r - wk1i; for (j = k + m; j < l + (k + m); j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; x0r -= x2r; x0i -= x2i; a[j2] = -wk2i * x0r - wk2r * x0i; a[j2 + 1] = -wk2i * x0i + wk2r * x0r; x0r = x1r - x3i; x0i = x1i + x3r; a[j1] = wk1r * x0r - wk1i * x0i; a[j1 + 1] = wk1r * x0i + wk1i * x0r; x0r = x1r + x3i; x0i = x1i - x3r; a[j3] = wk3r * x0r - wk3i * x0i; a[j3 + 1] = wk3r * x0i + wk3i * x0r; } } } inline void rftfsub(int n, double *a, int nc, double *c) { int j, k, kk, ks, m; double wkr, wki, xr, xi, yr, yi; m = n >> 1; ks = 2 * nc / m; kk = 0; for (j = 2; j < m; j += 2) { k = n - j; kk += ks; wkr = 0.5 - c[nc - kk]; wki = c[kk]; xr = a[j] - a[k]; xi = a[j + 1] + a[k + 1]; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; a[j] -= yr; a[j + 1] -= yi; a[k] += yr; a[k + 1] -= yi; } } inline void rftbsub(int n, double *a, int nc, double *c) { int j, k, kk, ks, m; double wkr, wki, xr, xi, yr, yi; a[1] = -a[1]; m = n >> 1; ks = 2 * nc / m; kk = 0; for (j = 2; j < m; j += 2) { k = n - j; kk += ks; wkr = 0.5 - c[nc - kk]; wki = c[kk]; xr = a[j] - a[k]; xi = a[j + 1] + a[k + 1]; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; a[j] -= yr; a[j + 1] = yi - a[j + 1]; a[k] += yr; a[k + 1] = yi - a[k + 1]; } a[m + 1] = -a[m + 1]; } inline void dctsub(int n, double *a, int nc, double *c) { int j, k, kk, ks, m; double wkr, wki, xr; m = n >> 1; ks = nc / n; kk = 0; for (j = 1; j < m; j++) { k = n - j; kk += ks; wkr = c[kk] - c[nc - kk]; wki = c[kk] + c[nc - kk]; xr = wki * a[j] - wkr * a[k]; a[j] = wkr * a[j] + wki * a[k]; a[k] = xr; } a[m] *= c[0]; } inline void dstsub(int n, double *a, int nc, double *c) { int j, k, kk, ks, m; double wkr, wki, xr; m = n >> 1; ks = nc / n; kk = 0; for (j = 1; j < m; j++) { k = n - j; kk += ks; wkr = c[kk] - c[nc - kk]; wki = c[kk] + c[nc - kk]; xr = wki * a[k] - wkr * a[j]; a[k] = wkr * a[k] + wki * a[j]; a[j] = xr; } a[m] *= c[0]; } #endif
omp.c
// RUN: mlir-clang %s --function=* -fopenmp -S | FileCheck %s void square(double* x, int sstart, int send, int sinc) { #pragma omp parallel for for(int i=sstart; i < send; i+= sinc) { x[i] = i; } } // CHECK: func @square(%arg0: memref<?xf64>, %arg1: i32, %arg2: i32, %arg3: i32) attributes {llvm.linkage = #llvm.linkage<external>} { // CHECK-DAG: %c1 = arith.constant 1 : index // CHECK-DAG: %[[i0:.+]] = arith.index_cast %arg1 : i32 to index // CHECK-DAG: %[[i1:.+]] = arith.index_cast %arg2 : i32 to index // CHECK-DAG: %[[i2:.+]] = arith.index_cast %arg3 : i32 to index // CHECK-NEXT: %[[i3:.+]] = arith.subi %[[i1]], %[[i0]] : index // CHECK-NEXT: %4 = arith.subi %[[i3]], %c1 : index // CHECK-NEXT: %5 = arith.addi %4, %[[i2]] : index // CHECK-NEXT: %6 = arith.divui %5, %[[i2]] : index // CHECK-NEXT: %7 = arith.muli %6, %[[i2]] : index // CHECK-NEXT: %8 = arith.addi %[[i0]], %7 : index // CHECK-NEXT: scf.parallel (%arg4) = (%[[i0]]) to (%8) step (%[[i2]]) { // CHECK-NEXT: %9 = arith.index_cast %arg4 : index to i32 // CHECK-NEXT: %10 = arith.sitofp %9 : i32 to f64 // CHECK-NEXT: memref.store %10, %arg0[%arg4] : memref<?xf64> // CHECK-NEXT: scf.yield // CHECK-NEXT: } // CHECK-NEXT: return // CHECK-NEXT: }
GB_unop__identity_int8_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_bool) // op(A') function: GB (_unop_tran__identity_int8_bool) // C type: int8_t // A type: bool // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_bool) ( int8_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DenseMatrix.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseMatrix.h // \brief Header file for the OpenMP-based dense matrix SMP implementation // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/AlignmentFlag.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseMatrix.h> #include <blaze/math/expressions/SparseMatrix.h> #include <blaze/math/functors/AddAssign.h> #include <blaze/math/functors/Assign.h> #include <blaze/math/functors/MultAssign.h> #include <blaze/math/functors/SchurAssign.h> #include <blaze/math/functors/SubAssign.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/smp/ThreadMapping.h> #include <blaze/math/StorageOrder.h> #include <blaze/math/typetraits/IsDenseMatrix.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Submatrix.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/mpl/And.h> #include <blaze/util/mpl/Not.h> #include <blaze/util/mpl/Or.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side dense matrix to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side dense matrix , bool SO2 // Storage order of the right-hand side dense matrix , typename OP > // Type of the assignment operation void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_<MT1>; using ET2 = ElementType_<MT2>; constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads( omp_get_num_threads() ); const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) ); const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL ); const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 ); const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) ); const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) ); const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL ); const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 ); const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) ); const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0; i<threads; ++i ) { const size_t row ( ( i / threadmap.second ) * rowsPerThread ); const size_t column( ( i % threadmap.second ) * colsPerThread ); if( row >= (~rhs).rows() || column >= (~rhs).columns() ) continue; const size_t m( min( rowsPerThread, (~rhs).rows() - row ) ); const size_t n( min( colsPerThread, (~rhs).columns() - column ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( submatrix<aligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( submatrix<aligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else { auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side sparse matrix to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side sparse matrix , bool SO2 // Storage order of the right-hand side sparse matrix , typename OP > // Type of the assignment operation void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const size_t threads( omp_get_num_threads() ); const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) ); const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 ); const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 ); #pragma omp for schedule(dynamic,1) nowait for( size_t i=0; i<threads; ++i ) { const size_t row ( ( i / threadmap.second ) * rowsPerThread ); const size_t column( ( i % threadmap.second ) * colsPerThread ); if( row >= (~rhs).rows() || column >= (~rhs).columns() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > > smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be assigned. // \return void // // This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > > smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, Assign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > > smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > > smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, AddAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > > smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a // dense matrix. Due to the explicit application of the SFINAE principle, this function can only // be selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > > smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, SubAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SCHUR PRODUCT ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix for the Schur product. // \return void // // This function implements the default OpenMP-based SMP Schur product assignment to a dense // matrix. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > > smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); schurAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix for the Schur product. // \return void // // This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due // to the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > > smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { schurAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, SchurAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< IsDenseMatrix<MT1> > smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
atomic-8.c
/* { dg-do compile } */ long double z; void f3(void) { #pragma omp atomic z++; #pragma omp atomic z--; #pragma omp atomic ++z; #pragma omp atomic --z; #pragma omp atomic z += 1; #pragma omp atomic z *= 3; #pragma omp atomic z /= 3; }
twoRegions.c
int N=100; int b[100],c[100]; int j; void foo() { #pragma omp parallel for shared(b) firstprivate(c) for (j=0; j<N; j++) { b[j] = c[j]; } #pragma omp parallel for shared(b) firstprivate(c) for (j=0; j<N; j++) { b[j] = c[j]; } }
GB_unaryop__minv_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint8_uint64 // op(A') function: GB_tran__minv_uint8_uint64 // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint8_uint64 ( uint8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_winograd_transform_pack4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float16x4_t _r00 = vld1_f16(r0); float16x4_t _r01 = vld1_f16(r0 + 4); float16x4_t _r02 = vld1_f16(r0 + 8); float16x4_t _r03 = vld1_f16(r0 + 12); float16x4_t _r04 = vld1_f16(r0 + 16); float16x4_t _r05 = vld1_f16(r0 + 20); float16x4_t _r06 = vld1_f16(r0 + 24); float16x4_t _r07 = vld1_f16(r0 + 28); float16x4_t _tmp0m = vfma_n_f16(vsub_f16(_r00, _r06), vsub_f16(_r04, _r02), 5.25f); float16x4_t _tmp7m = vfma_n_f16(vsub_f16(_r07, _r01), vsub_f16(_r03, _r05), 5.25f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[7][m], _tmp7m); float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_r02, _r06), _r04, 4.25f); float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_r01, _r05), _r03, 4.25f); float16x4_t _tmp1m = vadd_f16(_tmp12a, _tmp12b); float16x4_t _tmp2m = vsub_f16(_tmp12a, _tmp12b); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[2][m], _tmp2m); float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float16x4_t _tmp3m = vadd_f16(_tmp34a, _tmp34b); float16x4_t _tmp4m = vsub_f16(_tmp34a, _tmp34b); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[4][m], _tmp4m); float16x4_t _tmp56a = vfma_n_f16(_r06, vfms_n_f16(_r02, _r04, 1.25f), 4.f); float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float16x4_t _tmp5m = vadd_f16(_tmp56a, _tmp56b); float16x4_t _tmp6m = vsub_f16(_tmp56a, _tmp56b); vst1_f16(tmp[5][m], _tmp5m); vst1_f16(tmp[6][m], _tmp6m); r0 += w * 4; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 4; __fp16* r0_tm_1 = r0_tm_0 + tiles * 4; __fp16* r0_tm_2 = r0_tm_0 + tiles * 8; __fp16* r0_tm_3 = r0_tm_0 + tiles * 12; __fp16* r0_tm_4 = r0_tm_0 + tiles * 16; __fp16* r0_tm_5 = r0_tm_0 + tiles * 20; __fp16* r0_tm_6 = r0_tm_0 + tiles * 24; __fp16* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _r0tm0 = vfma_n_f16(vsub_f16(_tmp00, _tmp06), vsub_f16(_tmp04, _tmp02), 5.25f); float16x4_t _r0tm7 = vfma_n_f16(vsub_f16(_tmp07, _tmp01), vsub_f16(_tmp03, _tmp05), 5.25f); float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_tmp01, _tmp05), _tmp03, 4.25f); float16x4_t _r0tm1 = vadd_f16(_tmp12a, _tmp12b); float16x4_t _r0tm2 = vsub_f16(_tmp12a, _tmp12b); float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float16x4_t _r0tm3 = vadd_f16(_tmp34a, _tmp34b); float16x4_t _r0tm4 = vsub_f16(_tmp34a, _tmp34b); float16x4_t _tmp56a = vfma_n_f16(_tmp06, vfms_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float16x4_t _r0tm5 = vadd_f16(_tmp56a, _tmp56b); float16x4_t _r0tm6 = vsub_f16(_tmp56a, _tmp56b); vst1_f16(r0_tm_0, _r0tm0); vst1_f16(r0_tm_1, _r0tm1); vst1_f16(r0_tm_2, _r0tm2); vst1_f16(r0_tm_3, _r0tm3); vst1_f16(r0_tm_4, _r0tm4); vst1_f16(r0_tm_5, _r0tm5); vst1_f16(r0_tm_6, _r0tm6); vst1_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const __fp16* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float16x4_t _bias0 = biasptr ? vld1_f16(biasptr + p * 4) : vdup_n_f16(0.f); __fp16 tmp[6][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 4; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[4][m], _tmp4m); float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_f16(output0, _out00); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 16, _out04); float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f))); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 20, _out05); output0 += outw * 4; } } } } }
sync.c
/* * Copyright (c) 2009, 2010, 2011, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <stdint.h> #include <omp.h> #include <arch/x86/barrelfish_kpi/asm_inlines_arch.h> #define GANG_SCHEDULING #undef MEASURE_SYNC #define MEASURE #define WORK_PERIOD 5000000000UL #define STACK_SIZE (64 * 1024) int main(int argc, char *argv[]) { uint64_t now, start; volatile uint64_t workcnt, workload = 0; int64_t workmax = 1000; int64_t i; if(argc == 1) { printf("calibrating...\n"); do { workload = 0; workmax *= 2; start = rdtsc(); #pragma omp parallel private(i,workload) for(i = 0; i < workmax; i++) { #pragma omp barrier workload++; } now = rdtsc(); } while(now - start < WORK_PERIOD); printf("workmax = %ld\n", workmax); return 0; } else { workmax = atol(argv[1]); } int nthreads = omp_get_max_threads(); if(argc == 3) { nthreads = atoi(argv[2]); //assert(!"REVISE!!!"); bomp_bomp_init(nthreads); omp_set_num_threads(nthreads); } printf("threads %d, workmax %ld, CPUs %d\n", nthreads, workmax, omp_get_num_procs()); #ifdef MEASURE_SYNC uint64_t waits[16] = { 0, 1000, 1000000, 1000000000, 500, 5000000, 5000000000, 3000000, 0, 1000, 1000000, 1000000000, 500, 5000000, 5000000000, 3000000 }; uint64_t ts[16][10]; printf("before sync:\n"); #pragma omp parallel private(workcnt) { for(int j = 0; j < waits[omp_get_thread_num()]; j++) { workcnt++; } for(int j = 0; j < 10; j++) { ts[omp_get_thread_num()][j] = rdtsc(); } } for(int j = 0; j < 10; j++) { printf("timestamp %d: ", j); for(int n = 1; n < nthreads; n++) { printf("%ld ", ts[n][j] - ts[n - 1][j]); } printf("\n"); } printf("after sync:\n"); #pragma omp parallel { bomp_synchronize(); for(int j = 0; j < 10; j++) { ts[omp_get_thread_num()][j] = rdtsc(); } } for(int j = 0; j < 10; j++) { printf("timestamp %d: ", j); for(int n = 1; n < nthreads; n++) { printf("%ld ", ts[n][j] - ts[n - 1][j]); } printf("\n"); } #endif #ifdef GANG_SCHEDULING #pragma omp parallel { // bomp_synchronize(); } #endif start = rdtsc(); #ifdef MEASURE # define MAXTHREADS 16 # define WORKMAX 10000 static uint64_t starta[MAXTHREADS][WORKMAX]; static uint64_t end1[MAXTHREADS][WORKMAX]; static uint64_t end2[MAXTHREADS][WORKMAX]; #endif // Do some work #pragma omp parallel private(workcnt,i) for(i = 0; i < workmax; i++) { #ifdef MEASURE starta[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc(); #endif workcnt++; #ifdef MEASURE end1[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc(); #endif #pragma omp barrier #ifdef MEASURE end2[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc(); #endif } now = rdtsc(); #ifdef MEASURE printf("avg compute time: "); for(int n = 0; n < nthreads; n++) { uint64_t sum = 0, min = end1[0][0], max = 0; for(i = 0; i < WORKMAX; i++) { uint64_t val = end1[n][i] - starta[n][i]; sum += val; min = val < min ? val : min; max = val > max ? val : max; } printf("%lu(%lu,%lu) ", sum / WORKMAX, min, max); } printf("\n"); #if 0 printf("wait time dump:\n"); for(i = 0; i < WORKMAX; i++) { for(int n = 0; n < nthreads; n++) { uint64_t val = end2[n][i] - end1[n][i]; printf("%lu ", val); } printf("\n"); } #endif printf("avg wait time: "); for(int n = 0; n < nthreads; n++) { uint64_t sum = 0, min = end2[0][0], max = 0; for(i = 0; i < WORKMAX; i++) { uint64_t val = end2[n][i] - end1[n][i]; sum += val; min = val < min ? val : min; max = val > max ? val : max; } printf("%lu(%lu,%lu) ", sum / WORKMAX, min, max); } printf("\n"); #endif printf("%s: threads %d, compute time %lu ticks\n", argv[0], nthreads, now - start); for(;;); return 0; }
GB_binop__lxor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_int16) // A.*B function (eWiseMult): GB (_AemultB_03__lxor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int16) // A*D function (colscale): GB (_AxD__lxor_int16) // D*A function (rowscale): GB (_DxB__lxor_int16) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int16) // C=scalar+B GB (_bind1st__lxor_int16) // C=scalar+B' GB (_bind1st_tran__lxor_int16) // C=A+scalar GB (_bind2nd__lxor_int16) // C=A'+scalar GB (_bind2nd_tran__lxor_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_INT16 || GxB_NO_LXOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lxor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
imagenes.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <omp.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) #define MAXCAD 100 #define NUM_PASOS 5 #define DIST_RADIO 8 #define IMAGEN_ENTRADA "Lenna.ppm" #define IMAGEN_SALIDA "lenna-fil.ppm" struct pixel { unsigned char r, g, b; }; int lee_ppm(char *nomfich, struct pixel ***img, int *nf, int *nc) { FILE *df; char tipo[MAXCAD]; int nivmax, height, width, i, j; df = fopen(nomfich, "r"); if (!df) return -1; /* fopen ha fallado */ fscanf(df, "%s", tipo); if (strcmp(tipo, "P3") != 0) return -2; /* formato erróneo */ fscanf(df, "%d%d", &width, &height); if ((*img = (struct pixel **)malloc(sizeof(struct pixel *) * height)) == NULL) { return -3; /* malloc ha fallado */ } if (((*img)[0] = (struct pixel *)malloc(sizeof(struct pixel) * width * height)) == NULL) { return -3; /* malloc ha fallado */ } for (i = 1; i < height; i++) (*img)[i] = (*img)[i - 1] + width; fscanf(df, "%d", &nivmax); for (i = 0; i < height; i++) for (j = 0; j < width; j++) { fscanf(df, "%hhu", &((*img)[i][j].r)); fscanf(df, "%hhu", &((*img)[i][j].g)); fscanf(df, "%hhu", &((*img)[i][j].b)); } *nf = height; *nc = width; fclose(df); return 0; } int escribe_ppm(char *nomfich, struct pixel **img, int nf, int nc) { FILE *df; int nivmax = 255, i, j; df = fopen(nomfich, "w"); if (!df) return -1; /* fopen ha fallado */ fprintf(df, "P3\n"); fprintf(df, "%d %d\n", nc, nf); fprintf(df, "%d\n", nivmax); for (i = 0; i < nf; i++) { for (j = 0; j < nc; j++) { fprintf(df, "%d ", img[i][j].r); fprintf(df, "%d ", img[i][j].g); fprintf(df, "%d ", img[i][j].b); } fprintf(df, "\n"); } fclose(df); return 0; } int Filtro(int pasos, int radio, struct pixel **ppsImagenOrg, struct pixel **ppsImagenDst, int n, int m) { int i, j, k, l, p, tot; int r, g, b; int **ppdBloque, v; if ((ppdBloque = (int **)malloc(sizeof(int *) * (2*radio + 1))) == NULL) { return -3; /* malloc ha fallado */ } if ((ppdBloque[0] = (int *)malloc(sizeof(int) * (2*radio + 1) * (2*radio + 1))) == NULL) { return -3; /* malloc ha fallado */ } for (i = 1; i < 2*radio + 1; i++) ppdBloque[i] = ppdBloque[i - 1] + 2*radio + 1; for (i = -radio; i <= radio; i++) for (j = -radio; j <= radio; j++) ppdBloque[i + radio][j + radio] = (radio - abs(i)) * (radio - abs(i)) + (radio - abs(j)) * (radio - abs(j)) + 1; // No tiene sentido paralelizar el bucle de pasos porque una // iteración depende de la anterior para formar el filtro. // #pragma omp parallel for reduction(+:tot) private(i,j,r,g,b,l,v) for (p = 0; p < pasos; p++) { #pragma omp parallel for private(j,k,tot,r,g,b,l,v) for (i = 0; i < n; i++) { // #pragma omp parallel for private(k,tot,r,g,b,l,v) for (j = 0; j < m; j++) { r = 0; g = 0; b = 0; tot = 0; // #pragma omp parallel for reduction(+:tot,r,g,b) private(l,v) for (k = max(0, i - radio); k <= min(n - 1, i + radio); k++) { // #pragma omp parallel for reduction(+:tot,r,g,b) private(v) for (l = max(0, j - radio); l <= min(m - 1, j + radio); l++) { v = ppdBloque[k - i + radio][l - j + radio]; r += ppsImagenOrg[k][l].r * v; g += ppsImagenOrg[k][l].g * v; b += ppsImagenOrg[k][l].b * v; tot += v; } } r /= tot; g /= tot; b /= tot; ppsImagenDst[i][j].r = r; ppsImagenDst[i][j].g = g; ppsImagenDst[i][j].b = b; } } if (p+1 < pasos) memcpy(ppsImagenOrg[0], ppsImagenDst[0], n * m * sizeof(struct pixel)); } free(ppdBloque[0]); free(ppdBloque); return 0; } int main() { struct pixel **ImgOrg, **ImgDst; int n = 2560, m = 1920; int i, rc; #pragma omp parallel { int id_hilo = omp_get_thread_num(); if(id_hilo==0) printf("Número de hilos: %d\n", omp_get_num_threads()); //printf("ID del hilo ejecutando región paralela = %d\n", id_hilo); } rc = lee_ppm(IMAGEN_ENTRADA, &ImgOrg, &n, &m); if (rc) { printf("Error al leer el fichero %s\n", IMAGEN_ENTRADA); return 1; } printf("Abierta una imagen de n:%d, m:%d\n", n, m); ImgDst = (struct pixel **)malloc(n * sizeof(struct pixel *)); ImgDst[0] = (struct pixel *)malloc(m * n * sizeof(struct pixel)); for (i = 1; i < n; i++) ImgDst[i] = ImgDst[i - 1] + m; rc = Filtro(NUM_PASOS, DIST_RADIO, ImgOrg, ImgDst, n, m); if (rc) { printf("Error al aplicar el filtro\n"); return 2; } rc = escribe_ppm(IMAGEN_SALIDA, ImgDst, n, m); if (rc) { printf("Error al escribir la imagen\n"); return 3; } free(ImgOrg[0]); free(ImgDst[0]); free(ImgOrg); free(ImgDst); return 0; }
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_meshdata_types.h" # include "DNA_object_force_types.h" # include "DNA_object_types.h" # include "DNA_scene_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; i++) { if (i > 0) { printf("\n"); } printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; q++) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; j++) { if (j > 0 && j % 3 == 0) { printf("\n"); } for (i = 0; i < size; i++) { if (i > 0 && i % 3 == 0) { printf(" "); } implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; i++) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } /* this version of the CG algorithm does not work very well with partial constraints * (where S has non-zero elements). */ # if 0 static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif /* Conjugate gradient algorithm to solve Ax=b. */ cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) { root_to_world_v3(data, index, x, data->X[index]); } if (v) { root_to_world_v3(data, index, v, data->V[index]); } } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; i++) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor) / 2.0f; } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } float BPH_tri_tetra_volume_signed_6x(Implicit_Data *data, int v1, int v2, int v3) { /* The result will be 6x the volume */ return volume_tri_tetrahedron_signed_v3_6x(data->X[v1], data->X[v2], data->X[v3]); } void BPH_mass_spring_force_pressure( Implicit_Data *data, int v1, int v2, int v3, float pressure_difference, float weights[3]) { float nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = pressure_difference * area / 3.0f; /* add pressure to each of the face verts */ madd_v3_v3fl(data->F[v1], nor, factor * weights[0]); madd_v3_v3fl(data->F[v2], nor, factor * weights[1]); madd_v3_v3fl(data->F[v3], nor, factor * weights[2]); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. // return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return fbstar_fl; } else { return tempfb_fl; } } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, * which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps * in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) { sub_v3_v3(edge_ij, dx); } if (q == j) { add_v3_v3(edge_ij, dx); } normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) { sub_v3_v3(edge_jk, dx); } if (q == k) { add_v3_v3(edge_jk, dx); } normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) { sub_v3_v3(vel_ij, dv); } if (q == j) { add_v3_v3(vel_ij, dv); } sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) { sub_v3_v3(vel_jk, dv); } if (q == k) { add_v3_v3(vel_jk, dv); } /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; b++) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; b++) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
pi.c
<<<<<<< HEAD #ifndef _HISTOGRAM_H #define _HISTOGRAM_H #include "pi.h" #endif int main(int argc, char *argv[]) { double elapsed_time; double serialPI = 0, parallelPI = 0; if (argc != 3) { printf("Correct way to execute this program is:\n"); printf("./pi stepNum numberOfThreads\n"); return 1; } int num_steps = atoi(argv[1]); int num_thread = atoi(argv[2]); // Sequential histogram set_clock(); // COMPLETE HERE time_t t; srand((unsigned)time(&t)); double x = 0, y = 0; double sum = 0; int count_inside = 0; unsigned int seed = 11; for (int i = 0; i < num_steps; i++) { x = (double)rand_r(&seed) / RAND_MAX; y = (double)rand_r(&seed) / RAND_MAX; sum = x * x + y * y; if (sum <= 1) { count_inside++; } } serialPI = (count_inside * 4.0) / num_steps; elapsed_time = get_elapsed_time(); printf("Naive PI calculation time: %.4fms\n", elapsed_time / 1000); // Openmp Parallel histogram set_clock(); omp_set_num_threads(num_thread); #pragma omp parallel { double x, y; int count_inside_shared = 0; unsigned int seed = 11 * omp_get_thread_num(); #pragma omp for for (int i = 0; i < num_steps; i++) { x = (double)rand_r(&seed) / RAND_MAX; y = (double)rand_r(&seed) / RAND_MAX; if (x * x + y * y <= 1.0) count_inside_shared++; } #pragma omp atomic parallelPI += count_inside_shared; } parallelPI = (parallelPI * 4.0) / num_steps; elapsed_time = get_elapsed_time(); printf("-> Openmp PI calculation time: %.4fms\n", elapsed_time / 1000); #ifdef TEST validate(&serialPI, &parallelPI, 1); #endif return 0; }
3DConvolution.c
/** * 3DConvolution.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <[email protected]> * Rafael Cardoso F Sousa <[email protected]> * Luís Felipe Mattos <[email protected]> */ #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" #define BENCHMARK_NAME "3DCONV" // define the error threshold for the results "not matching" #define ERROR_THRESHOLD 0.5 /* Problem size. */ #ifdef RUN_POLYBENCH_SIZE #define SIZE 256 #elif RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define NI SIZE #define NJ SIZE #define NK SIZE /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void conv3D(DATA_TYPE *A, DATA_TYPE *B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; for (j = 1; j < NJ - 1; ++j) { for (i = 1; i < NI - 1; ++i) { for (k = 1; k < NK - 1; ++k) { B[i * (NK * NJ) + j * NK + k] = c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c21 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c23 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c31 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c33 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c12 * A[(i + 0) * (NK * NJ) + (j - 1) * NK + (k + 0)] + c22 * A[(i + 0) * (NK * NJ) + (j + 0) * NK + (k + 0)] + c32 * A[(i + 0) * (NK * NJ) + (j + 1) * NK + (k + 0)] + c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c21 * A[(i - 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c23 * A[(i + 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c31 * A[(i - 1) * (NK * NJ) + (j + 1) * NK + (k + 1)] + c33 * A[(i + 1) * (NK * NJ) + (j + 1) * NK + (k + 1)]; } } } } void conv3D_OMP(DATA_TYPE *A, DATA_TYPE *B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; #pragma omp target teams distribute parallel for map(to:A[:NI*NJ*NK]) map(from:B[:NI*NJ*NK]) device(DEVICE_ID) private(i,k) for (j = 1; j < NJ - 1; ++j) { for (i = 1; i < NI - 1; ++i) { for (k = 1; k < NK - 1; ++k) { B[i * (NK * NJ) + j * NK + k] = c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c21 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c23 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c31 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c33 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c12 * A[(i + 0) * (NK * NJ) + (j - 1) * NK + (k + 0)] + c22 * A[(i + 0) * (NK * NJ) + (j + 0) * NK + (k + 0)] + c32 * A[(i + 0) * (NK * NJ) + (j + 1) * NK + (k + 0)] + c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c21 * A[(i - 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c23 * A[(i + 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c31 * A[(i - 1) * (NK * NJ) + (j + 1) * NK + (k + 1)] + c33 * A[(i + 1) * (NK * NJ) + (j + 1) * NK + (k + 1)]; } } } } void init(DATA_TYPE *A) { int i, j, k; for (i = 0; i < NI; ++i) { for (j = 0; j < NJ; ++j) { for (k = 0; k < NK; ++k) { A[i * (NK * NJ) + j * NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13); } } } } int compareResults(DATA_TYPE *B, DATA_TYPE *B_GPU) { int i, j, k, fail; fail = 0; // Compare result from cpu and gpu... for (i = 1; i < NI - 1; ++i) { for (j = 1; j < NJ - 1; ++j) { for (k = 1; k < NK - 1; ++k) { if (percentDiff(B[i * (NK * NJ) + j * NK + k], B_GPU[i * (NK * NJ) + j * NK + k]) > ERROR_THRESHOLD) { fail++; } } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); return fail; } int main(int argc, char *argv[]) { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *B_GPU; A = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE)); B_GPU = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE)); //fprintf(stdout, ">> Three dimensional (3D) convolution size: %d<<\n", SIZE); printBenchmarkInfo(BENCHMARK_NAME, SIZE); init(A); t_start = rtclock(); conv3D_OMP(A, B_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); conv3D(A, B); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(B, B_GPU); #endif free(A); free(B); free(B_GPU); return fail; }
TinyErode.h
// SPDX-License-Identifier: MIT // _______ _ ______ _ // |__ __(_) | ____| | | // | | _ _ __ _ _| |__ _ __ ___ __| | ___ // | | | | '_ \| | | | __| | '__/ _ \ / _` |/ _ \ // | | | | | | | |_| | |____| | | (_) | (_| | __/ // |_| |_|_| |_|\__, |______|_| \___/ \__,_|\___| // __/ | // |___/ // // Copyright (C) 2021 Taylor Holberton // // A C++ library for simulating erosion. #pragma once #ifndef TINYERODE_H_INCLUDED #define TINYERODE_H_INCLUDED #include <algorithm> #include <array> #include <numeric> #include <vector> #include <cassert> #include <cmath> namespace TinyErode { /// Used for simulating a rainfall event on a terrain. /// Stores information on the terrain that is required to simulate the effect of /// hydraulic erosion. /// /// @note The class should only be used once per rainfall event. class Simulation final { public: Simulation(int w = 0, int h = 0); void SetTimeStep(float timeStep) noexcept { mTimeStep = timeStep; } float GetTimeStep() const noexcept { return mTimeStep; } int GetWidth() const noexcept { return mSize[0]; } int GetHeight() const noexcept { return mSize[1]; } /// Called at the beginning of each iteration. /// This function will compute the flow rate of each cell, based on the level /// of water and the elevation at each point in the height map. /// /// @param height The function taking an x and y coordinate and returning the /// height value at each point in the map. /// /// @param water The function taking an x and y coordinate and returning the /// water level at each point in the map. The water levels can be /// initialized to simulate rainfall or river streams. template<typename Height, typename Water> void ComputeFlowAndTilt(const Height& height, const Water& water); /// This function is called after @ref Simulation::ComputeFlow in order to /// determine where the water at each cell is going to be moving. /// /// @param waterAdder A function taking an x and y coordinate as well as a /// water value to be added to a cell within the water /// model. template<typename WaterAdder> void TransportWater(WaterAdder waterAdder); /// Erodes and deposites sediment, and then moves remaining sediment based on /// the velocity of the water at each cell. /// /// @param kC A function taking an x and y coordinate and returning the /// carry capacity constant at that particular location. /// /// @param kD A function taking an x and y coordinate and returning the /// deposition constant at that particular location. /// /// @param kE A function taking an x and y coordinate and returning the /// erosion constant at that particular location. /// /// @param heightAdder A function taking an x and y coordinate, as well as a /// height delta, and adding the value to the height model. /// /// @note For simple models, @p Kc @p Ke and @p Kd can both be single, uniform /// values. template<typename CarryCapacity, typename Deposition, typename Erosion, typename HeightAdder> void TransportSediment(CarryCapacity kC, Deposition kD, Erosion kE, HeightAdder heightAdder); /// Evaporates water in the water model, based on evaporation constants. /// /// @param waterAdder A function taking an x and y coordiante as well as a /// water value to be added to a cell within the water /// model. /// /// @param kEvap A function taking an x and y coordinate and returning the /// evaporation constant at that particular location. It is /// the responsibility of this function to ensure that the water /// level does not become negative at this step. template<typename WaterAdder, typename Evaporation> void Evaporate(WaterAdder water, Evaporation kEvap); /// Deposites all currently suspended sediment into the terrain. template<typename HeightAdder> void TerminateRainfall(HeightAdder heightAdder); void Resize(int w, int h); /// Gets the sediment levels at each cell. Useful primarily for debugging. auto GetSediment() const noexcept -> const std::vector<float>& { return mSediment; } void SetMetersPerX(float metersPerX) noexcept { mPipeLengths[0] = metersPerX; } void SetMetersPerY(float metersPerY) noexcept { mPipeLengths[1] = metersPerY; } private: using Velocity = std::array<float, 2>; using Flow = std::array<float, 4>; template<typename Height, typename Water> void ComputeFlowAndTiltAt(const Height& height, const Water& water, int x, int y); template<typename WaterAdder> void TransportWaterAt(WaterAdder& water, int x, int y); template<typename CarryCapacity, typename Deposition, typename Erosion, typename HeightAdder> void ErodeAndDeposit(CarryCapacity& kC, Deposition& kD, Erosion& kE, HeightAdder& heightAdder, int x, int y); const Flow& GetFlow(int x, int y) const noexcept { return mFlow[(y * GetWidth()) + x]; } Flow& GetFlow(int x, int y) noexcept { return mFlow[(y * GetWidth()) + x]; } bool InBounds(int x, int y) const noexcept { return (x >= 0) && (x < GetWidth()) && (y >= 0) && (y < GetHeight()); } Flow GetInflow(int x, int y) const noexcept; float GetScalingFactor(const Flow& flow, float waterLevel) noexcept; int ToIndex(int x, int y) const noexcept { return (y * GetWidth()) + x; } private: float mTimeStep = 0.0125; float mGravity = 9.8; std::array<float, 2> mPipeLengths{ 1, 1 }; std::array<int, 2> mSize{ 0, 0 }; std::vector<Flow> mFlow; std::vector<float> mSediment; std::vector<Velocity> mVelocity; std::vector<float> mTilt; }; // Implementation details beyond this point. inline Simulation::Simulation(int w, int h) { Resize(w, h); } template<typename WaterAdder> void Simulation::TransportWater(WaterAdder water) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) { TransportWaterAt(water, x, y); } } } template<typename WaterAdder> void Simulation::TransportWaterAt(WaterAdder& water, int x, int y) { auto& flow = GetFlow(x, y); auto inflow = GetInflow(x, y); auto inflowSum = std::accumulate(inflow.begin(), inflow.end(), 0.0f); auto outflowSum = std::accumulate(flow.begin(), flow.end(), 0.0f); auto volumeDelta = (inflowSum - outflowSum) * mTimeStep; auto waterDelta = volumeDelta / (mPipeLengths[0] * mPipeLengths[1]); float waterLevel = water(x, y, waterDelta); // Compute Water Velocity float dx = 0.5f * ((inflow[1] - flow[1]) + (flow[2] - inflow[2])); float dy = 0.5f * ((flow[0] - inflow[0]) + (inflow[3] - flow[3])); float avgWaterLevel = waterLevel - (waterDelta * 0.5f); Velocity velocity{ { 0, 0 } }; if (avgWaterLevel != 0.0f) { velocity[0] = dx / (mPipeLengths[0] * avgWaterLevel); velocity[1] = dy / (mPipeLengths[1] * avgWaterLevel); } mVelocity[ToIndex(x, y)] = velocity; } template<typename Height, typename Water> void Simulation::ComputeFlowAndTilt(const Height& height, const Water& water) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) ComputeFlowAndTiltAt(height, water, x, y); } } template<typename Height, typename Water> void Simulation::ComputeFlowAndTiltAt(const Height& height, const Water& water, int x, int y) { auto& center = GetFlow(x, y); std::array<int, 4> xDeltas{ { 0, -1, 1, 0 } }; std::array<int, 4> yDeltas{ { -1, 0, 0, 1 } }; auto centerH = height(x, y); auto centerW = water(x, y); std::array<float, 4> heightNeighbors{ centerH, centerH, centerH, centerH }; std::array<int, 4> pipeLengthIndices{ { 1, 0, 0, 1 } }; for (int i = 0; i < 4; i++) { auto neighborX = x + xDeltas[i]; auto neighborY = y + yDeltas[i]; if (!InBounds(neighborX, neighborY)) continue; heightNeighbors[i] = height(neighborX, neighborY); auto neighborH = heightNeighbors[i]; auto neighborW = water(neighborX, neighborY); auto heightDiff = (centerH + centerW) - (neighborH + neighborW); // Cross sectional area of the virtual pipe. float area = 1; // Length of the virtual pipe. float pipeLength = mPipeLengths[pipeLengthIndices[i]]; auto c = mTimeStep * area * (mGravity * heightDiff) / pipeLength; center[i] = std::max(0.0f, center[i] + c); } float totalOutputVolume = std::accumulate(center.begin(), center.end(), 0.0f) * mTimeStep; if (totalOutputVolume > (centerW * mPipeLengths[0] * mPipeLengths[1])) { auto k = GetScalingFactor(center, centerW); for (auto& n : center) n *= k; } // Compute Tilt float avgDeltaY = 0; avgDeltaY += (centerH - heightNeighbors[0]); avgDeltaY += (heightNeighbors[3] - centerH); avgDeltaY *= 0.5f; float avgDeltaX = 0; avgDeltaX += (centerH - heightNeighbors[1]); avgDeltaX += (heightNeighbors[2] - centerH); avgDeltaX *= 0.5f; float a = avgDeltaX * avgDeltaX; float b = avgDeltaY * avgDeltaY; auto abSum = a + b; mTilt[ToIndex(x, y)] = std::sqrt(abSum) / std::sqrt(1 + abSum); } template<typename CarryCapacity, typename Deposition, typename Erosion, typename HeightAdder> void Simulation::TransportSediment(CarryCapacity kC, Deposition kD, Erosion kE, HeightAdder heightAdder) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) ErodeAndDeposit(kC, kD, kE, heightAdder, x, y); } std::vector<float> nextSediment(GetWidth() * GetHeight()); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) { auto index = ToIndex(x, y); auto vel = mVelocity[index]; auto xf = x - (vel[0] * mTimeStep); auto yf = y + (vel[1] * mTimeStep); auto xfi = int(xf); auto yfi = int(yf); auto u = xf - xfi; auto v = yf - yfi; std::array<float, 4> s{ { 0, 0, 0, 0 } }; if (InBounds(xfi + 0, yfi + 0)) s[0] = mSediment[ToIndex(xfi + 0, yfi + 0)]; if (InBounds(xfi + 1, yfi + 0)) s[1] = mSediment[ToIndex(xfi + 1, yfi + 0)]; if (InBounds(xfi + 0, yfi + 1)) s[2] = mSediment[ToIndex(xfi + 0, yfi + 1)]; if (InBounds(xfi + 1, yfi + 1)) s[3] = mSediment[ToIndex(xfi + 1, yfi + 1)]; float sx1 = s[0] + (u * (s[1] - s[0])); float sx2 = s[2] + (u * (s[3] - s[2])); nextSediment[index] = sx1 + (v * (sx2 - sx1)); } } mSediment = std::move(nextSediment); } template<typename HeightAdder> void Simulation::TerminateRainfall(HeightAdder heightAdder) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) { auto index = ToIndex(x, y); float sediment = mSediment[index]; heightAdder(x, y, sediment / (mPipeLengths[0] * mPipeLengths[1])); mSediment[index] = 0; mVelocity[index][0] = 0; mVelocity[index][1] = 0; } } } template<typename CarryCapacity, typename Deposition, typename Erosion, typename HeightAdder> void Simulation::ErodeAndDeposit(CarryCapacity& kC, Deposition& kD, Erosion& kE, HeightAdder& heightAdder, int x, int y) { auto vel = mVelocity[ToIndex(x, y)]; auto velocityMagnitude = std::sqrt((vel[0] * vel[0]) + (vel[1] * vel[1])); float tiltAngle = mTilt[ToIndex(x, y)]; float capacity = kC(x, y) * std::max(0.01f, tiltAngle) * velocityMagnitude; float sediment = mSediment[ToIndex(x, y)]; float factor = (capacity > sediment) ? kE(x, y) : kD(x, y); heightAdder(x, y, -(factor * (capacity - sediment))); mSediment[ToIndex(x, y)] += factor * (capacity - sediment); } template<typename WaterAdder, typename Evaporation> void Simulation::Evaporate(WaterAdder water, Evaporation kEvap) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) water(x, y, -mTimeStep * kEvap(x, y)); } } inline auto Simulation::GetInflow(int centerX, int centerY) const noexcept -> Flow { std::array<int, 4> xDeltas{ { 0, -1, 1, 0 } }; std::array<int, 4> yDeltas{ { -1, 0, 0, 1 } }; std::array<float, 4> inflow{ { 0, 0, 0, 0 } }; for (int i = 0; i < 4; i++) { int x = centerX + xDeltas[i]; int y = centerY + yDeltas[i]; if (InBounds(x, y)) inflow[i] = GetFlow(x, y)[3 - i]; } return inflow; } inline float Simulation::GetScalingFactor(const Flow& flow, float waterLevel) noexcept { auto volume = std::accumulate(flow.begin(), flow.end(), 0.0f) * mTimeStep; if (volume == 0.0f) return 1.0f; return std::min(1.0f, (waterLevel * mPipeLengths[0] * mPipeLengths[1]) / volume); } inline void Simulation::Resize(int w, int h) { assert(w >= 0); assert(h >= 0); w = std::max(w, 0); h = std::max(h, 0); mFlow.resize(w * h); mSediment.resize(w * h); mVelocity.resize(w * h); mTilt.resize(w * h); mSize[0] = w; mSize[1] = h; } } // namespace TinyErode #endif // TINYERODE_H_INCLUDED
tensor_cpu-inl.h
/*! * Copyright (c) 2014 by Contributors * \file tensor_cpu-inl.h * \brief implementation of CPU host code * \author Bing Xu, Tianqi Chen */ #ifndef MSHADOW_TENSOR_CPU_INL_H_ #define MSHADOW_TENSOR_CPU_INL_H_ #include <cstring> #include <functional> #include <utility> #include <vector> #include "./base.h" #include "./tensor.h" #include "./packet-inl.h" #include "./dot_engine-inl.h" namespace mshadow { template<> inline void InitTensorEngine<cpu>(int dev_id) { } template<> inline void ShutdownTensorEngine<cpu>(void) { } template<> inline void SetDevice<cpu>(int devid) { } template<> inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle, bool create_dnn_handle) { return new Stream<cpu>(); } template<> inline void DeleteStream<cpu>(Stream<cpu> *stream) { delete stream; } template<int ndim> inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*) os << '('; for (int i = 0; i < ndim; ++i) { if (i != 0) os << ','; os << shape[i]; } // python style tuple if (ndim == 1) os << ','; os << ')'; return os; } template<typename xpu> inline void *AllocHost_(size_t size); template<typename xpu> inline void FreeHost_(void * dptr); #ifdef __CUDACC__ template<> inline void *AllocHost_<gpu>(size_t size) { void *dptr; MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable)); return dptr; } template<> inline void FreeHost_<gpu>(void *dptr) { MSHADOW_CUDA_CALL(cudaFreeHost(dptr)); } #endif template<> inline void *AllocHost_<cpu>(size_t size) { size_t pitch; return packet::AlignedMallocPitch(&pitch, size, 1); } template<> inline void FreeHost_<cpu>(void *dptr) { packet::AlignedFree(dptr); } template<typename xpu, int dim, typename DType> inline void AllocHost(Tensor<cpu, dim, DType> *obj) { obj->stride_ = obj->size(dim - 1); CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost"; void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType)); obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename xpu, int dim, typename DType> inline void FreeHost(Tensor<cpu, dim, DType> *obj) { if (obj->dptr_ == NULL) { LOG(FATAL) << "FreeHost:: double free"; } FreeHost_<xpu>(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) { size_t pitch; void *dptr; if (pad) { dptr = packet::AlignedMallocPitch (&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]); obj->stride_ = static_cast<index_t>(pitch / sizeof(DType)); } else { obj->stride_ = obj->size(dim - 1); dptr = packet::AlignedMallocPitch (&pitch, obj->shape_.Size() * sizeof(DType), 1); } obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename Device, typename DType, int dim> inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) { Tensor<Device, dim, DType> obj(shape); obj.stream_ = stream_; AllocSpace(&obj, pad); MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv)); return obj; } template<int dim, typename DType> inline void FreeSpace(Tensor<cpu, dim, DType> *obj) { packet::AlignedFree(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void Copy(Tensor<cpu, dim, DType> _dst, const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) { CHECK_EQ(_dst.shape_, _src.shape_) << "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_; if (_dst.CheckContiguous() && _src.CheckContiguous()) { memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size()); } else { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); Tensor<cpu, 2, DType> src = _src.FlatTo2D(); for (index_t y = 0; y < dst.size(0); ++y) { memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1)); } } } template<typename Saver, typename R, int dim, typename DType, typename E> inline void MapPlan(TRValue<R, cpu, dim, DType> *dst, const expr::Plan<E, DType> &plan) { Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D(); expr::Plan<R, DType> dplan = expr::MakePlan(dst->self()); // #pragma omp parallel for // temp remove openmp, as default setting throttles CPU for (index_t y = 0; y < shape[0]; ++y) { for (index_t x = 0; x < shape[1]; ++x) { // trust your compiler! -_- they will optimize it Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x)); } } } // code to handle SSE optimization template<bool pass_check, typename Saver, typename R, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine { inline static void Map(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { MapPlan<Saver>(dst, MakePlan(exp.self())); } }; template<typename SV, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E, etype> { inline static void Map(Tensor<cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) && expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) { expr::MapPacketPlan<SV>(dst->self(), expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self())); } else { MapPlan<SV>(dst, MakePlan(exp.self())); } } }; template<typename Saver, typename R, int dim, typename DType, typename E, int etype> inline void MapExp(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass> ::Error_All_Tensor_in_Exp_Must_Have_Same_Type(); Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self()); Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self()); CHECK(eshape[0] == 0 || eshape == dshape) << "Assignment: Shape of Tensors are not consistent with target, " << "eshape: " << eshape << " dshape:" << dshape; MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R, dim, DType, E, etype> ::Map(dst->ptrself(), exp); } template<typename Saver, typename Reducer, typename R, typename DType, typename E, int etype> inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()).FlatTo2D(); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match"; CHECK_NE(eshape[0], 0) << "can not reduce over empty tensor"; // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); for (index_t x = 0; x < eshape[1]; ++x) { DType res = splan.Eval(0, x); for (index_t y = 1; y < eshape[0]; ++y) { Reducer::Reduce(res, splan.Eval(y, x)); } Saver::template Save<DType>(dplan.REval(0, x), res * scale); } } template<typename Saver, typename Reducer, int dimkeep, typename R, typename DType, typename E, int etype> inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); typedef Shape<expr::ExpInfo<E>::kDim> EShape; EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[dimkeep], dshape[0]) << "MapReduceKeepHighDim::reduction dimension do not match"; // use equvalent form Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep], eshape.ProdShape(dimkeep + 1, EShape::kSubdim), eshape[EShape::kSubdim]); // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); for (index_t c = 0; c < pshape[1]; ++c) { DType res; Reducer::SetInitValue(res); for (index_t n = 0; n < pshape[0]; ++n) { DType tres; Reducer::SetInitValue(tres); for (index_t y = 0; y < pshape[2]; ++y) { for (index_t x = 0; x < pshape[3]; ++x) { Reducer::Reduce(tres, splan.Eval((n * pshape[1] + c) * pshape[2] + y, x)); } } Reducer::Reduce(res, tres); } Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale)); } } template<typename DType> inline void Softmax(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &energy) { DType mmax = energy[0]; for (index_t x = 1; x < dst.size(0); ++x) { if (mmax < energy[x]) mmax = energy[x]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(0); ++x) { dst[x] = std::exp(energy[x] - mmax); sum += dst[x]; } for (index_t x = 0; x < dst.size(0); ++x) { dst[x] /= sum; } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label) { for (index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label) { for (index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label) { for (index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y][n]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label) { for (index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } } template<typename DType> inline void Softmax(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; for (index_t y = 0; y < dst.size(0); ++y) { Softmax(dst[y], energy[y]); } } template<typename DType> inline void Softmax(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; for (index_t y = 0; y < dst.size(0); ++y) { for (index_t n = 0; n < dst.size(2); ++n) { DType mmax = energy[y][0][n]; for (index_t x = 1; x < dst.size(1); ++x) { if (mmax < energy[y][x][n]) mmax = energy[y][x][n]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = std::exp(energy[y][x][n] - mmax); sum += dst[y][x][n]; } for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] /= sum; } } } } template<typename IndexType, typename DType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < index.size(0); ++y) { dst[index[y]] += src[y]; } } template<typename IndexType, typename DType> inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& sorted, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < sorted.size(0); ++y) { dst[sorted[y]] += src[index[y]]; } } template<typename KDType, typename VDType> inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values, bool is_ascend) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); CHECK_EQ(keys.size(0), values.size(0)) << "The sizes of key/value are not equal! keys_size: " << keys.size(0) << "values_size: " << values.size(0); std::vector<std::pair<KDType, VDType> > V; for (index_t i = 0; i < values.size(0); ++i) { std::pair<KDType, VDType> P = std::make_pair(keys[i], values[i]); V.push_back(P); } if (is_ascend) { std::stable_sort(V.begin(), V.end()); } else { std::stable_sort(V.begin(), V.end(), std::greater<std::pair<KDType, VDType> >()); } for (index_t i = 0; i < values.size(0); i++) { keys[i] = V[i].first; values[i] = V[i].second; } } template<typename Device, typename VDType, typename SDType> inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) { // We can sort each segments using two stable sorts SortByKey(values, segments, true); SortByKey(segments, values, true); } // blas related template<typename Device, typename DType> inline void VectorDot(Tensor<Device, 1, DType> dst, const Tensor<Device, 1, DType> &lhs, const Tensor<Device, 1, DType> &rhs) { CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch"; CHECK_EQ(dst.size(0), 1) << "VectorDot: expect dst to be scalar"; expr::BLASEngine<Device, DType>::SetStream(lhs.stream_); mshadow::expr::BLASEngine<Device, DType>::dot( lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_); } template<bool transpose_left, bool transpose_right, typename Device, typename DType> inline void BatchGEMM(Tensor<Device, 3, DType> dst, const Tensor<Device, 3, DType> &lhs, const Tensor<Device, 3, DType> &rhs, DType alpha, DType beta, Tensor<Device, 1, DType*> workspace) { index_t batch_size = dst.shape_[0]; expr::BLASEngine<Device, DType>::SetStream(dst.stream_); Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1]) : lhs.shape_; Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1]) : rhs.shape_; CHECK_EQ(dst.CheckContiguous(), true); CHECK_EQ(lhs.CheckContiguous(), true); CHECK_EQ(rhs.CheckContiguous(), true); CHECK(sleft[0] == batch_size && sright[0] == batch_size) << "BatchGEMM: batchsize must be equal." << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1]) << "BatchGEMM: matrix shape mismatch" << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(workspace.size(0) >= 3 * batch_size) << "Workspace Size must be bigger than " << 3 * batch_size; CHECK_EQ(workspace.CheckContiguous(), true); // use column major argument to compatible with most BLAS expr::BLASEngine<Device, DType>::batched_gemm (dst.stream_, transpose_right, transpose_left, transpose_right ? rhs.size(1) : rhs.size(2), transpose_left ? lhs.size(2) : lhs.size(1), transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_, rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_, batch_size, workspace.dptr_); } } // namespace mshadow #endif // MSHADOW_TENSOR_CPU_INL_H_
9596.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <[email protected]> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for simd schedule(static) num_threads(4) private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp parallel for simd schedule(static) num_threads(4) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
for-20.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ extern void bar(int); void foo (int n) { int i; #pragma omp for schedule(nonmonotonic:guided) for (i = 0; i < n; ++i) bar(i); } /* { dg-final { scan-tree-dump-times "GOMP_loop_nonmonotonic_guided_start" 1 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_loop_nonmonotonic_guided_next" 1 "ompexp" } } */
ast-dump-openmp-flush.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp flush } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-flush.c:3:1, line:5:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:5:1> // CHECK-NEXT: `-OMPFlushDirective {{.*}} <line:4:9, col:18> openmp_standalone_directive
hsrp_fmt_plug.c
/* * Cracker for MD5 authentication in HSRP, HSRPv2, VRRP, and GLBP. * http://www.rfc-editor.org/rfc/rfc1828.txt * * This is dedicated to Darya. You inspire me. * * This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * optimized Feb 2016, JimF. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hsrp; #elif FMT_REGISTERS_H john_register_one(&fmt_hsrp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> // OMP_SCALE tuned on core i7 4-core HT // 2048 - 8850k 6679k // 4096 - 10642k 7278k // 8192 - 10489k 7532k // 16k - 10413k 7694k // 32k - 12111k 7803k ** this value chosen // 64k - 12420k 6523k // 128k - 12220k 6741k #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32768 #endif #endif #endif #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "hsrp" #define FORMAT_NAME "\"MD5 authentication\" HSRP, HSRPv2, VRRP, GLBP" #define FORMAT_TAG "$hsrp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 55 // Must fit in a single MD5 block #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define REAL_SALT_SIZE 50 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"$hsrp$000004030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$52e1db09d18d695b8fefb3730ff8d9d6", "password12345"}, {"$hsrp$000004030a5a01000000000000000000ac102801041c01000000ac1028140000000000000000000000000000000000000000$f15dfa631a0679e0801f8e6b0c0c17ac", "openwall"}, {"$hsrp$000010030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$f02fc41b1b516e2d1261d8800d39ccea", "openwall12345"}, /* HSRPv2 hashes */ {"$hsrp$0128020006040001aabbcc000a000000006400000bb8000027100a000064000000000000000000000000041c010000000a00000a0000000000000000000000000000000000000000$642fedafe1f374bd2fdd8f1ba81d87a2", "password"}, {"$hsrp$0128020006040001aabbcc001400000000c800000bb8000027100a000064000000000000000000000000041c010000000a0000140000000000000000000000000000000000000000$0481257f0fe583b275f03a48e88de72f", "password12345"}, {NULL} }; static char (*saved_key)[64]; // 1 full limb of MD5, we do out work IN this buffer. static MD5_CTX (*saved_ctx); static int *saved_len, dirty; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int length; unsigned char salt[2048]; // be safe ;) } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); saved_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_ctx)); } static void done(void) { MEM_FREE(saved_ctx); MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q || q+1==p) return 0; q = q + 1; // if ((q - p - 1) > REAL_SALT_SIZE * 2) // return 0; len = strspn(q, HEXCHARS_lc); if (len != BINARY_SIZE * 2 || len != strlen(q)) return 0; if (strspn(p, HEXCHARS_lc) != q - p - 1) return 0; if (q-p > (sizeof(cur_salt->salt)-1)*2) return 0; return 1; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i, len; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; cs.length = len; return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } // this place would normally contain "print_hex" but I do not want to piss of magnum (yet again) #define PUTCHAR(buf, index, val) ((unsigned char*)(buf))[index] = (val) static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; int len = saved_len[index]; if (dirty) { // we use the saved_key buffer in-line. unsigned int *block = (unsigned int*)saved_key[index]; MD5_Init(&saved_ctx[index]); // set bit saved_key[index][len] = 0x80; block[14] = len << 3; #if (ARCH_LITTLE_ENDIAN==0) block[14] = JOHNSWAP(block[14]); #endif MD5_Update(&saved_ctx[index], (unsigned char*)block, 64); // clear the bit, so that get_key returns proper key. saved_key[index][len] = 0; } memcpy(&ctx, &saved_ctx[index], sizeof(MD5_CTX)); // data MD5_Update(&ctx, cur_salt->salt, cur_salt->length); // key (again) MD5_Update(&ctx, saved_key[index], len); MD5_Final((unsigned char*)crypt_out[index], &ctx); } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void hsrp_set_key(char *key, int index) { int olen = saved_len[index]; int len= strlen(key); saved_len[index] = len; strcpy(saved_key[index], key); if (olen > len) memset(&(saved_key[index][len]), 0, olen-len); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_hsrp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, hsrp_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif
DRB046-doall2-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: Only one loop is associated with the omp for construct. The inner loop's loop iteration variable needs an explicit private() clause, otherwise it will be shared by default. */ int a[100][100]; int main() { int i, j; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name main#0#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<100; j ++ ) { a[i][j]=(i+j); } } #pragma cetus private(i, j) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name main#1#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<100; j ++ ) { a[i][j]=(a[i][j]+1); } } #pragma cetus private(i, j) #pragma loop name main#2 for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name main#2#0 for (j=0; j<100; j ++ ) { printf("%d\n", a[i][j]); } } _ret_val_0=0; return _ret_val_0; }
app_baseline.c
/* * JGL@SAFARI */ /** * @file app.c * @brief Template for a Host Application Source File. * * The macros DPU_BINARY and NR_TASKLETS are directly * used in the static functions, and are not passed as arguments of these functions. */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <stdint.h> #include <sys/mman.h> #include <omp.h> #include "../../support/common.h" #include "../../support/timer.h" // Pointer declaration static T* A; static unsigned int* histo_host; static unsigned int* histo_hwacha; static unsigned int* histo_host_4; typedef struct Params { unsigned int input_size; unsigned int bins; int n_warmup; int n_reps; const char *file_name; int exp; int n_threads; }Params; /** * @brief creates input arrays * @param nr_elements how many elements in input arrays */ static void read_input(T* A, const Params p) { char dctFileName[100]; FILE *File = NULL; // Open input file unsigned short temp; sprintf(dctFileName, p.file_name); if((File = fopen(dctFileName, "rb")) != NULL) { for(unsigned int y = 0; y < p.input_size; y++) { fread(&temp, sizeof(unsigned short), 1, File); A[y] = (unsigned int)ByteSwap16(temp); if(A[y] >= 4096) A[y] = 4095; } fclose(File); } else { printf("%s does not exist\n", dctFileName); exit(1); } } /** * @brief compute output in the host */ static void histogram_host(unsigned int* histo, T* A, unsigned int bins, unsigned int nr_elements, int exp, unsigned int nr_of_dpus, int t) { omp_set_num_threads(t); if(!exp){ #pragma omp parallel for for (unsigned int i = 0; i < nr_of_dpus; i++) { for (unsigned int j = 0; j < nr_elements; j++) { T d = A[j]; histo[i * bins + ((d * bins) >> DEPTH)] += 1; } } } else{ #pragma omp parallel for for (unsigned int j = 0; j < nr_elements; j++) { T d = A[j]; #pragma omp atomic update histo[(d * bins) >> DEPTH] += 1; } } } // Params --------------------------------------------------------------------- void usage() { fprintf(stderr, "\nUsage: ./program [options]" "\n" "\nGeneral options:" "\n -h help" "\n -w <W> # of untimed warmup iterations (default=1)" "\n -e <E> # of timed repetition iterations (default=3)" "\n -t <T> # of threads (default=8)" "\n -x <X> Weak (0) or strong (1) scaling (default=0)" "\n" "\nBenchmark-specific options:" "\n -i <I> input size (default=1536*1024 elements)" "\n -b <B> histogram size (default=256 bins)" "\n -f <F> input image file (default=../input/image_VanHateren.iml)" "\n"); } struct Params input_params(int argc, char **argv) { struct Params p; p.input_size = 1536 * 1024; p.bins = 256; p.n_warmup = 1; p.n_reps = 3; p.n_threads = 1; // 8 p.exp = 1; p.file_name = "./image_VanHateren.iml"; int opt; while((opt = getopt(argc, argv, "hi:b:w:e:f:x:t:")) >= 0) { switch(opt) { case 'h': usage(); exit(0); break; case 'i': p.input_size = atoi(optarg); break; case 'b': p.bins = atoi(optarg); break; case 'w': p.n_warmup = atoi(optarg); break; case 'e': p.n_reps = atoi(optarg); break; case 'f': p.file_name = optarg; break; case 'x': p.exp = atoi(optarg); break; case 't': p.n_threads = atoi(optarg); break; default: fprintf(stderr, "\nUnrecognized option!\n"); usage(); exit(0); } } assert(p.n_threads > 0 && "Invalid # of ranks!"); return p; } /** * @brief Main of the Host Application. */ int main(int argc, char **argv) { if (mlockall(MCL_CURRENT | MCL_FUTURE)) { perror("mlockall failed:"); return 0; } struct Params p = input_params(argc, argv); uint32_t nr_of_dpus; const unsigned int input_size = p.input_size; // Size of input image if(!p.exp) assert(input_size % p.n_threads == 0 && "Input size!"); else assert(input_size % p.n_threads == 0 && "Input size!"); // Input/output allocation A = malloc(input_size * sizeof(T)); T *bufferA = A; if(!p.exp) histo_host = malloc(nr_of_dpus * p.bins * sizeof(unsigned int)); else { histo_host = malloc(p.bins * sizeof(unsigned int)); histo_hwacha = malloc(p.bins * sizeof(unsigned int)); histo_host_4 = malloc(p.bins * sizeof(unsigned int)); } // Create an input file with arbitrary data. read_input(A, p); if(!p.exp) memset(histo_host, 0, nr_of_dpus * p.bins * sizeof(unsigned int)); else { memset(histo_host, 0, p.bins * sizeof(unsigned int)); memset(histo_hwacha, 0, p.bins * sizeof(unsigned int)); memset(histo_host_4, 0, p.bins * sizeof(unsigned int)); } Timer timer; start(&timer, 0, 0); histogram_host(histo_host, A, p.bins, input_size, p.exp, nr_of_dpus, 1); stop(&timer, 0); start(&timer, 1, 0); vec_vvhst_asm(input_size, A, histo_hwacha, p.bins); stop(&timer, 1); start(&timer, 2, 0); histogram_host(histo_host_4, A, p.bins, input_size, p.exp, nr_of_dpus, 4); stop(&timer, 2); bool correct = true; for (int i = 0; i < p.bins; i++) { if (histo_host[i] != histo_hwacha[i]) { printf("Hwacha wrong %d, %d %d\n", i, histo_host[i], histo_hwacha[i]); correct = false; } if (histo_host[i] != histo_host_4[i]) { printf("Omp wrong %d, %d %d\n", i, histo_host[i], histo_host_4[i]); } } printf("******************************\n"); if (correct) printf("Hwacha correct!\n"); else printf("Hwacha wrong T_T\n"); printf("******************************\n"); printf("CPU "); print(&timer, 0, 1); printf("\n"); printf("Hwacha "); print(&timer, 1, 1); printf("\n"); printf("4 threads "); print(&timer, 2, 1); printf("\n"); munlockall(); return 0; }
simd-10.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ int s = 0, i, u; void foo () { #pragma omp for simd schedule(static, 32) reduction(+:s) lastprivate(u) for (i = 0; i < 128; i++) { s++; u = i; } if (i != 128 || s != 128 || u != 127) __builtin_abort (); } int main () { foo (); return 0; }