{"size":12353,"ext":"cuh","lang":"Cuda","max_stars_count":239.0,"content":"\/*\n * Copyright (c) 2019-2022, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#pragma once\n\n#include \n#include \n\n#include \n\n#include \n#include \n\nnamespace cudf {\nnamespace io {\n\n\/**\n * @brief Parses non-negative integral vales.\n *\n * This helper function is only intended to handle positive integers. The input\n * character string is expected to be well-formed.\n *\n * @param begin Pointer to the first element of the string\n * @param end Pointer to the first element after the string\n * @return The parsed and converted value\n *\/\ntemplate \n__inline__ __device__ T to_non_negative_integer(char const* begin, char const* end)\n{\n T value = 0;\n\n for (; begin < end; ++begin) {\n if (*begin >= '0' && *begin <= '9') {\n value *= 10;\n value += *begin - '0';\n }\n }\n\n return value;\n}\n\n\/**\n * @brief Extracts the Day, Month, and Year from a string.\n *\n * This function takes a string and produces a `year_month_day` representation.\n * Acceptable formats are a combination of `YYYY`, `M`, `MM`, `D` and `DD` with\n * `\/` or `-` as separators. Data with only year and month (no day) is also valid.\n *\n * @param begin Pointer to the first element of the string\n * @param end Pointer to the first element after the string\n * @param dayfirst Flag indicating that first field is the day\n * @return Extracted year, month and day in `cuda::std::chrono::year_month_day` format\n *\/\n__inline__ __device__ cuda::std::chrono::year_month_day extract_date(char const* begin,\n char const* end,\n bool dayfirst)\n{\n using namespace cuda::std::chrono;\n\n char sep = '\/';\n\n auto sep_pos = thrust::find(thrust::seq, begin, end, sep);\n\n if (sep_pos == end) {\n sep = '-';\n sep_pos = thrust::find(thrust::seq, begin, end, sep);\n }\n\n year y;\n month m;\n day d;\n\n \/\/--- is year the first filed?\n if ((sep_pos - begin) == 4) {\n y = year{to_non_negative_integer(begin, sep_pos)}; \/\/ year is signed\n\n \/\/ Month\n auto s2 = sep_pos + 1;\n sep_pos = thrust::find(thrust::seq, s2, end, sep);\n\n if (sep_pos == end) {\n \/\/--- Data is just Year and Month - no day\n m = month{to_non_negative_integer(s2, end)}; \/\/ month and day are unsigned\n d = day{1};\n\n } else {\n m = month{to_non_negative_integer(s2, sep_pos)};\n d = day{to_non_negative_integer((sep_pos + 1), end)};\n }\n\n } else {\n \/\/--- if the dayfirst flag is set, then restricts the format options\n if (dayfirst) {\n d = day{to_non_negative_integer(begin, sep_pos)};\n\n auto s2 = sep_pos + 1;\n sep_pos = thrust::find(thrust::seq, s2, end, sep);\n\n m = month{to_non_negative_integer(s2, sep_pos)};\n y = year{to_non_negative_integer((sep_pos + 1), end)};\n\n } else {\n m = month{to_non_negative_integer(begin, sep_pos)};\n\n auto s2 = sep_pos + 1;\n sep_pos = thrust::find(thrust::seq, s2, end, sep);\n\n if (sep_pos == end) {\n \/\/--- Data is just Year and Month - no day\n y = year{to_non_negative_integer(s2, end)};\n d = day{1};\n\n } else {\n d = day{to_non_negative_integer(s2, sep_pos)};\n y = year{to_non_negative_integer((sep_pos + 1), end)};\n }\n }\n }\n\n return year_month_day{y, m, d};\n}\n\n\/**\n * @brief Parses a string to extract the hour, minute, second and millisecond time field\n * values of a day.\n *\n * Incoming format is expected to be `HH:MM:SS.MS`, with the latter second and millisecond fields\n * optional. Each time field can be a single, double, or triple (in the case of milliseconds)\n * digits. 12-hr and 24-hr time format is detected via the absence or presence of AM\/PM characters\n * at the end.\n *\n * @param begin Pointer to the first element of the string\n * @param end Pointer to the first element after the string\n * @return Extracted hours, minutes, seconds and milliseconds of `chrono::hh_mm_ss` type with a\n * precision of milliseconds\n *\/\n__inline__ __device__ cuda::std::chrono::hh_mm_ss extract_time_of_day(\n char const* begin, char const* end)\n{\n constexpr char sep = ':';\n\n \/\/ Adjust for AM\/PM and any whitespace before\n duration_h d_h{0};\n auto last = end - 1;\n if (*last == 'M' || *last == 'm') {\n if (*(last - 1) == 'P' || *(last - 1) == 'p') { d_h = duration_h{12}; }\n last = last - 2;\n while (*last == ' ') {\n --last;\n }\n }\n end = last + 1;\n\n \/\/ Find hour-minute separator\n const auto hm_sep = thrust::find(thrust::seq, begin, end, sep);\n \/\/ Extract hours\n d_h += cudf::duration_h{to_non_negative_integer(begin, hm_sep)};\n\n duration_m d_m{0};\n duration_s d_s{0};\n duration_ms d_ms{0};\n\n \/\/ Find minute-second separator (if present)\n const auto ms_sep = thrust::find(thrust::seq, hm_sep + 1, end, sep);\n if (ms_sep == end) {\n d_m = duration_m{to_non_negative_integer(hm_sep + 1, end)};\n } else {\n d_m = duration_m{to_non_negative_integer(hm_sep + 1, ms_sep)};\n\n \/\/ Find second-millisecond separator (if present)\n const auto sms_sep = thrust::find(thrust::seq, ms_sep + 1, end, '.');\n if (sms_sep == end) {\n d_s = duration_s{to_non_negative_integer(ms_sep + 1, end)};\n } else {\n d_s = duration_s{to_non_negative_integer(ms_sep + 1, sms_sep)};\n d_ms = duration_ms{to_non_negative_integer(sms_sep + 1, end)};\n }\n }\n return cuda::std::chrono::hh_mm_ss{d_h + d_m + d_s + d_ms};\n}\n\n\/**\n * @brief Checks whether `c` is decimal digit\n *\/\nconstexpr bool is_digit(char c) { return c >= '0' and c <= '9'; }\n\n\/**\n * @brief Parses a datetime string and computes the corresponding timestamp.\n *\n * Acceptable date formats are a combination of `YYYY`, `M`, `MM`, `D` and `DD` with `\/` or `-` as\n * separators. Input with only year and month (no day) is also valid. Character `T` or blank space\n * is expected to be the separator between date and time of day. Optional time of day information\n * like hours, minutes, seconds and milliseconds are expected to be `HH:MM:SS.MS`. Each time field\n * can be a single, double, or triple (in the case of milliseconds) digits. 12-hr and 24-hr time\n * format is detected via the absence or presence of AM\/PM characters at the end.\n *\n * @tparam timestamp_type Type of output timestamp\n * @param begin Pointer to the first element of the string\n * @param end Pointer to the first element after the string\n * @param dayfirst Flag to indicate day\/month or month\/day order\n * @return Timestamp converted to `timestamp_type`\n *\/\ntemplate \n__inline__ __device__ timestamp_type to_timestamp(char const* begin, char const* end, bool dayfirst)\n{\n using duration_type = typename timestamp_type::duration;\n\n auto sep_pos = end;\n\n \/\/ Find end of the date portion\n int count = 0;\n bool digits_only = true;\n for (auto i = begin; i < end; ++i) {\n digits_only = digits_only and is_digit(*i);\n if (*i == 'T') {\n sep_pos = i;\n break;\n } else if (count == 3 && *i == ' ') {\n sep_pos = i;\n break;\n } else if ((*i == '\/' || *i == '-') || (count == 2 && *i != ' ')) {\n count++;\n }\n }\n\n \/\/ Exit if the input string is digit-only\n if (digits_only) {\n return timestamp_type{\n duration_type{to_non_negative_integer(begin, end)}};\n }\n\n auto ymd = extract_date(begin, sep_pos, dayfirst);\n timestamp_type answer{cuda::std::chrono::sys_days{ymd}};\n\n \/\/ Extract time only if separator is present\n if (sep_pos != end) {\n auto t = extract_time_of_day(sep_pos + 1, end);\n answer += cuda::std::chrono::duration_cast(t.to_duration());\n }\n\n return answer;\n}\n\n\/**\n * @brief Parses the input string into an integral value of the given type.\n *\n * Moves the `begin` iterator past the parsed value.\n *\n * @param[in, out] begin Pointer to the first element of the string\n * @param end Pointer to the first element after the string\n * @return The parsed and converted value\n *\/\ntemplate \n__inline__ __device__ T parse_integer(char const** begin, char const* end)\n{\n bool const is_negative = (**begin == '-');\n T value = 0;\n\n auto cur = *begin + is_negative;\n while (cur < end) {\n if (*cur >= '0' && *cur <= '9') {\n value *= 10;\n value += *cur - '0';\n } else\n break;\n ++cur;\n }\n *begin = cur;\n\n return is_negative ? -value : value;\n}\n\n\/**\n * @brief Parses the input string into an integral value of the given type if the delimiter is\n * present.\n *\n * Moves the `begin` iterator past the parsed value.\n *\n * @param[in, out] begin Pointer to the first element of the string\n * @param end Pointer to the first element after the string\n * @param delimiter delimiter character\n * @return The parsed and converted value, zero is delimiter is not present\n *\/\ntemplate \n__inline__ __device__ T parse_optional_integer(char const** begin, char const* end, char delimiter)\n{\n if (**begin != delimiter) { return 0; }\n\n ++(*begin);\n return parse_integer(begin, end);\n}\n\n\/**\n * @brief Parses the input string into a duration of `duration_type`.\n *\n * The expected format can be one of the following: `DD days`, `DD days +HH:MM:SS.NS`, `DD days\n * HH:MM::SS.NS`, `HH:MM::SS.NS` and digits-only string. Note `DD` and optional `NS` field can\n * contain arbitrary number of digits while `HH`, `MM` and `SS` can be single or double digits.\n *\n * @tparam duration_type Type of the parsed duration\n * @param begin Pointer to the first element of the string\n * @param end Pointer to the first element after the string\n * @return The parsed duration in `duration_type`\n *\/\ntemplate \n__inline__ __device__ duration_type to_duration(char const* begin, char const* end)\n{\n using cuda::std::chrono::duration_cast;\n\n \/\/ %d days [+]%H:%M:%S.n => %d days, %d days [+]%H:%M:%S, %H:%M:%S.n, %H:%M:%S, %value.\n constexpr char sep = ':';\n\n \/\/ single pass to parse days, hour, minute, seconds, nanosecond\n auto cur = begin;\n auto const value = parse_integer(&cur, end);\n cur = skip_spaces(cur, end);\n if (std::is_same_v || cur >= end) {\n return duration_type{static_cast(value)};\n }\n\n \/\/ \" days [+]\"\n auto const after_days_sep = skip_if_starts_with(cur, end, \"days\");\n auto const has_days_seperator = (after_days_sep != cur);\n cur = skip_spaces(after_days_sep, end);\n cur += (*cur == '+');\n\n duration_D d_d{0};\n duration_h d_h{0};\n if (has_days_seperator) {\n d_d = duration_D{value};\n d_h = duration_h{parse_integer(&cur, end)};\n } else {\n d_h = duration_h{value};\n }\n\n duration_m d_m{parse_optional_integer(&cur, end, sep)};\n duration_s d_s{parse_optional_integer(&cur, end, sep)};\n\n \/\/ Convert all durations to the given type\n auto output_d = duration_cast(d_d + d_h + d_m + d_s);\n\n if constexpr (std::is_same_v) { return output_d; }\n\n auto const d_ns = (*cur != '.') ? duration_ns{0} : [&]() {\n auto const start_subsecond = ++cur;\n auto const unscaled_subseconds = parse_integer(&cur, end);\n auto const scale = min(9L, cur - start_subsecond) - 9;\n auto const rescaled = numeric::decimal64{unscaled_subseconds, numeric::scale_type{scale}};\n return duration_ns{rescaled.value()};\n }();\n\n return output_d + duration_cast(d_ns);\n}\n\n} \/\/ namespace io\n} \/\/ namespace cudf\n","avg_line_length":33.3864864865,"max_line_length":100,"alphanum_fraction":0.6552254513} {"size":1714,"ext":"cuh","lang":"Cuda","max_stars_count":1.0,"content":"\/\/ Copyright (c) 2009-2019 The Regents of the University of Michigan\n\/\/ This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.\n\n\n\/\/ Maintainer: joaander\n\n\/*! \\file BondTablePotentialGPU.cuh\n \\brief Declares GPU kernel code for calculating the table bond forces. Used by BONDTablePotentialGPU.\n*\/\n\n#include \"hoomd\/ParticleData.cuh\"\n#include \"hoomd\/Index1D.h\"\n#include \"hoomd\/HOOMDMath.h\"\n#include \"hoomd\/BondedGroupData.cuh\"\n\n#ifndef __BONDTABLEPOTENTIALGPU_CUH__\n#define __BONDTABLEPOTENTIALGPU_CUH__\n\n\/\/! Kernel driver that computes table forces on the GPU for TablePotentialGPU\ncudaError_t gpu_compute_bondtable_forces(Scalar4* d_force,\n Scalar* d_virial,\n const unsigned int virial_pitch,\n const unsigned int N,\n const Scalar4 *d_pos,\n const BoxDim &box,\n const group_storage<2> *blist,\n const unsigned int pitch,\n const unsigned int *n_bonds_list,\n const unsigned int n_bond_type,\n const Scalar2 *d_tables,\n const Scalar4 *d_params,\n const unsigned int table_width,\n const Index2D &table_value,\n unsigned int *d_flags,\n const unsigned int block_size,\n const unsigned int compute_capability);\n\n#endif\n","avg_line_length":43.9487179487,"max_line_length":105,"alphanum_fraction":0.5250875146} {"size":55962,"ext":"cuh","lang":"Cuda","max_stars_count":null,"content":"\/*\n * Copyright (c) 2021, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n\/\/ Andrei Schaffer, aschaffer@nvidia.com\n\/\/\n#pragma once\n\n#include \n#include \n\n#include \n#include \n\n#include \n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \/\/ FIXME: requirement for temporary std::getenv()\n#include \n#include \n\/\/\n#include \n#include \n#include \n\n#include \"rw_traversals.hpp\"\n\nnamespace cugraph {\n\nnamespace detail {\n\n\/\/ raft random generator:\n\/\/ (using upper-bound cached \"map\"\n\/\/ giving out_deg(v) for each v in [0, |V|);\n\/\/ and a pre-generated vector of float random values\n\/\/ in [0,1] to be brought into [0, d_ub[v]))\n\/\/\ntemplate \nstruct rrandom_gen_t {\n using seed_type = seed_t;\n using real_type = real_t;\n\n \/\/ cnstr. version that provides step-wise in-place\n \/\/ rnd generation:\n \/\/\n rrandom_gen_t(raft::handle_t const& handle,\n index_t num_paths,\n device_vec_t& d_random, \/\/ scratch-pad, non-coalesced\n seed_t seed = seed_t{})\n : handle_(handle), seed_(seed), num_paths_(num_paths), d_ptr_random_(raw_ptr(d_random))\n {\n auto rnd_sz = d_random.size();\n\n CUGRAPH_EXPECTS(rnd_sz >= static_cast(num_paths),\n \"Un-allocated random buffer.\");\n\n \/\/ done in constructor;\n \/\/ this must be done at each step,\n \/\/ but this object is constructed at each step;\n \/\/\n generate_random(handle, d_ptr_random_, num_paths, seed_);\n }\n\n \/\/ cnstr. version for the case when the\n \/\/ random vector is provided by the caller:\n \/\/\n rrandom_gen_t(raft::handle_t const& handle,\n index_t num_paths,\n real_t* ptr_d_rnd, \/\/ supplied\n seed_t seed = seed_t{})\n : handle_(handle), seed_(seed), num_paths_(num_paths), d_ptr_random_(ptr_d_rnd)\n {\n }\n\n \/\/ in place:\n \/\/ for each v in [0, num_paths) {\n \/\/ if out_deg(v) > 0\n \/\/ d_col_indx[v] = random index in [0, out_deg(v))\n \/\/}\n \/\/ d_crt_out_deg is non-coalesced;\n \/\/\n void generate_col_indices(device_vec_t const& d_crt_out_deg,\n device_vec_t& d_col_indx) const\n {\n auto const* d_ptr_out_degs = d_crt_out_deg.data();\n thrust::transform_if(\n handle_.get_thrust_policy(),\n d_ptr_random_,\n d_ptr_random_ + num_paths_, \/\/ input1\n d_ptr_out_degs, \/\/ input2\n d_ptr_out_degs, \/\/ also stencil\n d_col_indx.begin(),\n [] __device__(real_t rnd_vindx, edge_t crt_out_deg) {\n real_t max_ub = static_cast(crt_out_deg - 1);\n auto interp_vindx = rnd_vindx * max_ub;\n vertex_t v_indx = static_cast(interp_vindx);\n return (v_indx >= crt_out_deg ? crt_out_deg - 1 : v_indx);\n },\n [] __device__(auto crt_out_deg) { return crt_out_deg > 0; });\n }\n\n \/\/ abstracts away the random values generation:\n \/\/\n static void generate_random(raft::handle_t const& handle, real_t* p_d_rnd, size_t sz, seed_t seed)\n {\n cugraph::detail::uniform_random_fill(\n handle.get_stream(), p_d_rnd, sz, real_t{0.0}, real_t{1.0}, seed);\n }\n\n private:\n raft::handle_t const& handle_;\n index_t num_paths_;\n real_t* d_ptr_random_; \/\/ device buffer with real random values; size = num_paths_\n seed_t seed_; \/\/ seed to be used for current batch\n};\n\n\/\/ seeding policy: time (clock) dependent,\n\/\/ to avoid RW calls repeating same random data:\n\/\/\ntemplate \nstruct clock_seeding_t {\n clock_seeding_t(void) = default;\n\n seed_t operator()(void) { return static_cast(std::time(nullptr)); }\n};\n\n\/\/ seeding policy: fixed for debug\/testing repro\n\/\/\ntemplate \nstruct fixed_seeding_t {\n \/\/ purposely no default cnstr.\n\n fixed_seeding_t(seed_t seed) : seed_(seed) {}\n seed_t operator()(void) { return seed_; }\n\n private:\n seed_t seed_;\n};\n\n\/\/ classes abstracting the next vertex extraction mechanism:\n\/\/\n\/\/ primary template, purposely undefined\ntemplate \nstruct col_indx_extract_t;\n\n\/\/ specialization for single-gpu functionality:\n\/\/\ntemplate \nstruct col_indx_extract_t> {\n using vertex_t = typename graph_t::vertex_type;\n using edge_t = typename graph_t::edge_type;\n using weight_t = typename graph_t::weight_type;\n\n col_indx_extract_t(raft::handle_t const& handle,\n graph_t const& graph,\n edge_t* p_d_crt_out_degs,\n index_t* p_d_sizes,\n index_t num_paths,\n index_t max_depth)\n : handle_(handle),\n col_indices_(graph.get_matrix_partition_view().get_indices()),\n row_offsets_(graph.get_matrix_partition_view().get_offsets()),\n values_(graph.get_matrix_partition_view().get_weights()),\n out_degs_(p_d_crt_out_degs),\n sizes_(p_d_sizes),\n num_paths_(num_paths),\n max_depth_(max_depth)\n {\n }\n\n \/\/ in-place extractor of next set of vertices and weights,\n \/\/ (d_v_next_vertices, d_v_next_weights),\n \/\/ given start set of vertices. d_v_src_vertices,\n \/\/ and corresponding column index set, d_v_col_indx:\n \/\/\n \/\/ for each indx in [0, num_paths){\n \/\/ v_indx = d_v_src_vertices[indx*max_depth + d_sizes[indx] - 1];\n \/\/ if( out_degs_[v_indx] > 0 ) {\n \/\/ start_row = row_offsets_[v_indx];\n \/\/ delta = d_v_col_indx[indx];\n \/\/ d_v_next_vertices[indx] = col_indices_[start_row + delta];\n \/\/ }\n \/\/ (use tranform_if() with transform iterator)\n \/\/\n void operator()(\n device_vec_t const& d_coalesced_src_v, \/\/ in: coalesced vector of vertices\n device_vec_t const&\n d_v_col_indx, \/\/ in: column indices, given by stepper's random engine\n device_vec_t& d_v_next_vertices, \/\/ out: set of destination vertices, for next step\n device_vec_t&\n d_v_next_weights) \/\/ out: set of weights between src and destination vertices, for next step\n const\n {\n thrust::transform_if(\n handle_.get_thrust_policy(),\n thrust::make_counting_iterator(0),\n thrust::make_counting_iterator(num_paths_), \/\/ input1\n d_v_col_indx.begin(), \/\/ input2\n out_degs_, \/\/ stencil\n thrust::make_zip_iterator(\n thrust::make_tuple(d_v_next_vertices.begin(), d_v_next_weights.begin())), \/\/ output\n [max_depth = max_depth_,\n ptr_d_sizes = sizes_,\n ptr_d_coalesced_v = raw_const_ptr(d_coalesced_src_v),\n row_offsets = row_offsets_,\n col_indices = col_indices_,\n values = values_ ? thrust::optional{*values_}\n : thrust::nullopt] __device__(auto indx, auto col_indx) {\n auto delta = ptr_d_sizes[indx] - 1;\n auto v_indx = ptr_d_coalesced_v[indx * max_depth + delta];\n auto start_row = row_offsets[v_indx];\n\n auto weight_value = (values ? (*values)[start_row + col_indx]\n : weight_t{1}); \/\/ account for un-weighted graphs\n return thrust::make_tuple(col_indices[start_row + col_indx], weight_value);\n },\n [] __device__(auto crt_out_deg) { return crt_out_deg > 0; });\n }\n\n \/\/ Version with selector (sampling strategy):\n \/\/\n template \n void operator()(selector_t const& selector,\n device_vec_t const& d_rnd_val, \/\/ in: random values, one per path\n device_vec_t& d_coalesced_v, \/\/ out: set of coalesced vertices\n device_vec_t& d_coalesced_w, \/\/ out: set of coalesced weights\n real_t tag) \/\/ otherwise. ambiguity with the other operator()\n {\n thrust::for_each(handle_.get_thrust_policy(),\n thrust::make_counting_iterator(0),\n thrust::make_counting_iterator(num_paths_), \/\/ input1\n [max_depth = max_depth_,\n row_offsets = row_offsets_,\n ptr_coalesced_v = raw_ptr(d_coalesced_v),\n ptr_coalesced_w = raw_ptr(d_coalesced_w),\n ptr_d_random = raw_const_ptr(d_rnd_val),\n ptr_d_sizes = sizes_,\n ptr_crt_out_degs = out_degs_,\n sampler = selector.get_strategy()] __device__(index_t path_indx) mutable {\n auto chunk_offset = path_indx * max_depth;\n auto delta = ptr_d_sizes[path_indx] - 1;\n auto start_v_pos = chunk_offset + delta;\n auto start_w_pos = chunk_offset - path_indx + delta;\n\n auto src_v = ptr_coalesced_v[start_v_pos];\n auto rnd_val = ptr_d_random[path_indx];\n\n \/\/ `node2vec` info:\n \/\/\n bool start_path = true;\n auto prev_v = src_v;\n if (delta > 0) {\n start_path = false;\n prev_v = ptr_coalesced_v[start_v_pos - 1];\n }\n\n auto opt_tpl_vn_wn = sampler(src_v, rnd_val, prev_v, path_indx, start_path);\n\n if (opt_tpl_vn_wn.has_value()) {\n auto src_vertex = thrust::get<0>(*opt_tpl_vn_wn);\n auto crt_weight = thrust::get<1>(*opt_tpl_vn_wn);\n\n ptr_coalesced_v[start_v_pos + 1] = src_vertex;\n ptr_coalesced_w[start_w_pos] = crt_weight;\n\n ptr_d_sizes[path_indx]++;\n ptr_crt_out_degs[path_indx] =\n row_offsets[src_vertex + 1] - row_offsets[src_vertex];\n } else {\n ptr_crt_out_degs[path_indx] = 0;\n }\n });\n }\n\n private:\n raft::handle_t const& handle_;\n vertex_t const* col_indices_;\n edge_t const* row_offsets_;\n std::optional values_;\n\n edge_t* out_degs_;\n index_t* sizes_;\n index_t num_paths_;\n index_t max_depth_;\n};\n\n\/**\n * @brief Class abstracting the RW initialization, stepping, and stopping functionality\n * The outline of the algorithm is as follows:\n *\n * (1) vertex sets are coalesced into d_coalesced_v,\n * weight sets are coalesced into d_coalesced_w;\n * i.e., the 2 coalesced vectors are allocated to\n * num_paths * max_depth, and num_paths * (max_depth -1), respectively\n * (since each path has a number of edges equal one\n * less than the number of vertices);\n * d_coalesced_v is initialized for each i*max_depth entry\n * (i=0,,,,num_paths-1) to the corresponding starting vertices;\n * (2) d_sizes maintains the current size is for each path;\n * Note that a path may end prematurely if it reaches a sink vertex;\n * (3) d_crt_out_degs maintains the out-degree of each of the latest\n * vertices in the path; i.e., if N(v) := set of destination\n * vertices from v, then this vector stores |N(v)|\n * for last v in each path; i.e.,\n * d_crt_out_degs[i] =\n * out-degree( d_coalesced_v[i*max_depth + d_sizes[i]-1] ),\n * for i in {0,..., num_paths-1};\n * (4) a set of num_paths floating point numbers between [0,1]\n * are generated at each step; then they get translated into\n * _indices_ k in {0,...d_crt_out_degs[i]-1};\n * (5) the next vertex v is then picked as the k-th out-neighbor:\n * next(v) = N(v)[k];\n * (6) d_sizes are incremented accordingly; i.e., for those paths whose\n * corresponding last vertex has out-degree > 0;\n * (7) then next(v) and corresponding weight of (v, next(v)) are stored\n * at appropriate location in their corresponding coalesced vectors;\n * (8) the client of this class (the random_walks() function) then repeats\n * this process max_depth times or until all paths\n * have reached sinks; i.e., d_crt_out_degs = {0, 0,...,0},\n * whichever comes first;\n * (9) in the end some post-processing is done (stop()) to remove\n * unused entries from the 2 coalesced vectors;\n * (10) the triplet made of the 2 coalesced vectors and d_sizes is then returned;\n *\n *\/\ntemplate ,\n typename index_t = typename graph_t::edge_type>\nstruct random_walker_t {\n using vertex_t = typename graph_t::vertex_type;\n using edge_t = typename graph_t::edge_type;\n using weight_t = typename graph_t::weight_type;\n using seed_t = typename random_engine_t::seed_type;\n using real_t = typename random_engine_t::real_type;\n using rnd_engine_t = random_engine_t;\n\n random_walker_t(raft::handle_t const& handle,\n vertex_t num_vertices,\n index_t num_paths,\n index_t max_depth,\n vertex_t v_padding_val = 0,\n weight_t w_padding_val = 0)\n : handle_(handle),\n num_paths_(num_paths),\n max_depth_(max_depth),\n vertex_padding_value_(v_padding_val != 0 ? v_padding_val : num_vertices),\n weight_padding_value_(w_padding_val)\n {\n }\n\n \/\/ for each i in [0..num_paths_) {\n \/\/ d_paths_v_set[i*max_depth] = d_src_init_v[i];\n \/\/\n void start(device_const_vector_view& d_src_init_v, \/\/ in: start set\n device_vec_t& d_paths_v_set, \/\/ out: coalesced v\n device_vec_t& d_sizes) const \/\/ out: init sizes to {1,...}\n {\n \/\/ intialize path sizes to 1, as they contain at least one vertex each:\n \/\/ the initial set: d_src_init_v;\n \/\/\n thrust::copy_n(handle_.get_thrust_policy(),\n thrust::make_constant_iterator(1),\n num_paths_,\n d_sizes.begin());\n\n \/\/ scatter d_src_init_v to coalesced vertex vector:\n \/\/\n auto dlambda = [stride = max_depth_] __device__(auto indx) { return indx * stride; };\n\n \/\/ use the transform iterator as map:\n \/\/\n auto map_it_begin =\n thrust::make_transform_iterator(thrust::make_counting_iterator(0), dlambda);\n\n thrust::scatter(handle_.get_thrust_policy(),\n d_src_init_v.begin(),\n d_src_init_v.end(),\n map_it_begin,\n d_paths_v_set.begin());\n }\n\n \/\/ overload for start() with device_uvector d_v_start\n \/\/ (handy for testing)\n \/\/\n void start(device_vec_t const& d_start, \/\/ in: start set\n device_vec_t& d_paths_v_set, \/\/ out: coalesced v\n device_vec_t& d_sizes) const \/\/ out: init sizes to {1,...}\n {\n device_const_vector_view d_start_cview{d_start.data(),\n static_cast(d_start.size())};\n\n start(d_start_cview, d_paths_v_set, d_sizes);\n }\n\n \/\/ in-place updates its arguments from one step to next\n \/\/ (to avoid copying); all \"crt\" arguments are updated at each step()\n \/\/ and passed as scratchpad space to avoid copying them\n \/\/ from one step to another\n \/\/\n \/\/ take one step in sync for all paths that have not reached sinks:\n \/\/\n template \n void step(\n graph_t const& graph,\n selector_t const& selector,\n seed_t seed,\n device_vec_t& d_coalesced_v, \/\/ crt coalesced vertex set\n device_vec_t& d_coalesced_w, \/\/ crt coalesced weight set\n device_vec_t& d_paths_sz, \/\/ crt paths sizes\n device_vec_t& d_crt_out_degs, \/\/ crt out-degs for current set of vertices\n device_vec_t& d_random, \/\/ crt set of random real values\n device_vec_t& d_col_indx) \/\/ crt col col indices to be used for retrieving next step\n const\n {\n \/\/ generate random destination indices:\n \/\/\n random_engine_t rgen(handle_, num_paths_, d_random, seed);\n\n \/\/ dst extraction from dst indices:\n \/\/ (d_crt_out_degs to be maintained internally by col_extractor)\n \/\/\n col_indx_extract_t col_extractor(\n handle_, graph, raw_ptr(d_crt_out_degs), raw_ptr(d_paths_sz), num_paths_, max_depth_);\n\n \/\/ The following steps update the next entry in each path,\n \/\/ except the paths that reached sinks;\n \/\/\n \/\/ for each indx in [0..num_paths) {\n \/\/ v_indx = d_v_rnd_n_indx[indx];\n \/\/\n \/\/ -- get the `v_indx`-th out-vertex of d_v_paths_v_set[indx] vertex:\n \/\/ -- also, note the size deltas increased by 1 in dst (d_sizes[]):\n \/\/\n \/\/ d_coalesced_v[indx*max_depth + d_sizes[indx]] =\n \/\/ get_out_vertex(graph, d_coalesced_v[indx*max_depth + d_sizes[indx]-1)], v_indx);\n \/\/ d_coalesced_w[indx*(max_depth-1) + d_sizes[indx] - 1] =\n \/\/ get_out_edge_weight(graph, d_coalesced_v[indx*max_depth + d_sizes[indx]-1], v_indx);\n \/\/\n \/\/ (1) generate actual vertex destinations;\n \/\/ (2) update path sizes;\n \/\/ (3) actual coalesced updates;\n \/\/\n \/\/ performs steps (1) + (2) + (3) in one pass;\n \/\/\n col_extractor(selector, d_random, d_coalesced_v, d_coalesced_w, real_t{0});\n }\n\n \/\/ returns true if all paths reached sinks:\n \/\/\n bool all_paths_stopped(device_vec_t const& d_crt_out_degs) const\n {\n auto how_many_stopped =\n thrust::count_if(handle_.get_thrust_policy(),\n d_crt_out_degs.begin(),\n d_crt_out_degs.end(),\n [] __device__(auto crt_out_deg) { return crt_out_deg == 0; });\n return (static_cast(how_many_stopped) == d_crt_out_degs.size());\n }\n\n \/\/ wrap-up, post-process:\n \/\/ truncate v_set, w_set to actual space used\n \/\/\n void stop(device_vec_t& d_coalesced_v, \/\/ coalesced vertex set\n device_vec_t& d_coalesced_w, \/\/ coalesced weight set\n device_vec_t const& d_sizes) const \/\/ paths sizes\n {\n assert(max_depth_ > 1); \/\/ else, no need to step; and no edges\n\n index_t const* ptr_d_sizes = d_sizes.data();\n\n auto predicate_v = [max_depth = max_depth_, ptr_d_sizes] __device__(auto indx) {\n auto row_indx = indx \/ max_depth;\n auto col_indx = indx % max_depth;\n\n return (col_indx >= ptr_d_sizes[row_indx]);\n };\n\n auto predicate_w = [max_depth = max_depth_, ptr_d_sizes] __device__(auto indx) {\n auto row_indx = indx \/ (max_depth - 1);\n auto col_indx = indx % (max_depth - 1);\n\n return (col_indx >= ptr_d_sizes[row_indx] - 1);\n };\n\n auto new_end_v = thrust::remove_if(handle_.get_thrust_policy(),\n d_coalesced_v.begin(),\n d_coalesced_v.end(),\n thrust::make_counting_iterator(0),\n predicate_v);\n\n auto new_end_w = thrust::remove_if(handle_.get_thrust_policy(),\n d_coalesced_w.begin(),\n d_coalesced_w.end(),\n thrust::make_counting_iterator(0),\n predicate_w);\n\n handle_.sync_stream();\n\n d_coalesced_v.resize(thrust::distance(d_coalesced_v.begin(), new_end_v), handle_.get_stream());\n d_coalesced_w.resize(thrust::distance(d_coalesced_w.begin(), new_end_w), handle_.get_stream());\n }\n\n \/\/ in-place non-static (needs handle_):\n \/\/ for indx in [0, nelems):\n \/\/ gather d_result[indx] = d_src[d_coalesced[indx*stride + d_sizes[indx] -1]]\n \/\/\n template \n void gather_from_coalesced(\n device_vec_t const& d_coalesced, \/\/ |gather map| = stride*nelems\n device_vec_t const& d_src, \/\/ |gather input| = nelems\n device_vec_t const& d_sizes, \/\/ |paths sizes| = nelems, elems in [1, stride]\n device_vec_t& d_result, \/\/ |output| = nelems\n index_t stride, \/\/ stride = coalesce block size (typically max_depth)\n index_t nelems) const \/\/ nelems = number of elements to gather (typically num_paths_)\n {\n vertex_t const* ptr_d_coalesced = raw_const_ptr(d_coalesced);\n index_t const* ptr_d_sizes = raw_const_ptr(d_sizes);\n\n \/\/ delta = ptr_d_sizes[indx] - 1\n \/\/\n auto dlambda = [stride, ptr_d_sizes, ptr_d_coalesced] __device__(auto indx) {\n auto delta = ptr_d_sizes[indx] - 1;\n return ptr_d_coalesced[indx * stride + delta];\n };\n\n \/\/ use the transform iterator as map:\n \/\/\n auto map_it_begin =\n thrust::make_transform_iterator(thrust::make_counting_iterator(0), dlambda);\n\n thrust::gather(handle_.get_thrust_policy(),\n map_it_begin,\n map_it_begin + nelems,\n d_src.begin(),\n d_result.begin());\n }\n\n \/\/ in-place non-static (needs handle_);\n \/\/ pre-condition: path sizes are assumed updated\n \/\/ to reflect new vertex additions;\n \/\/\n \/\/ for indx in [0, nelems):\n \/\/ if ( d_crt_out_degs[indx] > 0 )\n \/\/ d_coalesced[indx*stride + (d_sizes[indx] - adjust)- 1] = d_src[indx]\n \/\/\n \/\/ adjust := 0 for coalesced vertices; 1 for weights\n \/\/ (because |edges| = |vertices| - 1, in each path);\n \/\/\n template \n void scatter_to_coalesced(\n device_vec_t const& d_src, \/\/ |scatter input| = nelems\n device_vec_t& d_coalesced, \/\/ |scatter input| = stride*nelems\n device_vec_t const& d_crt_out_degs, \/\/ |current set of vertex out degrees| = nelems,\n \/\/ to be used as stencil (don't scatter if 0)\n device_vec_t const&\n d_sizes, \/\/ paths sizes used to provide delta in coalesced paths;\n \/\/ pre-condition: assumed as updated to reflect new vertex additions;\n \/\/ also, this is the number of _vertices_ in each path;\n \/\/ hence for scattering weights this needs to be adjusted; hence the `adjust` parameter\n index_t\n stride, \/\/ stride = coalesce block size (max_depth for vertices; max_depth-1 for weights)\n index_t nelems, \/\/ nelems = number of elements to gather (typically num_paths_)\n index_t adjust = 0)\n const \/\/ adjusting parameter for scattering vertices (0) or weights (1); see above for more;\n {\n index_t const* ptr_d_sizes = raw_const_ptr(d_sizes);\n\n auto dlambda = [stride, adjust, ptr_d_sizes] __device__(auto indx) {\n auto delta = ptr_d_sizes[indx] - adjust - 1;\n return indx * stride + delta;\n };\n\n \/\/ use the transform iterator as map:\n \/\/\n auto map_it_begin =\n thrust::make_transform_iterator(thrust::make_counting_iterator(0), dlambda);\n\n thrust::scatter_if(handle_.get_thrust_policy(),\n d_src.begin(),\n d_src.end(),\n map_it_begin,\n d_crt_out_degs.begin(),\n d_coalesced.begin(),\n [] __device__(auto crt_out_deg) {\n return crt_out_deg > 0; \/\/ predicate\n });\n }\n\n \/\/ updates the entries in the corresponding coalesced vector,\n \/\/ for which out_deg > 0\n \/\/\n void scatter_vertices(device_vec_t const& d_src,\n device_vec_t& d_coalesced,\n device_vec_t const& d_crt_out_degs,\n device_vec_t const& d_sizes) const\n {\n scatter_to_coalesced(d_src, d_coalesced, d_crt_out_degs, d_sizes, max_depth_, num_paths_);\n }\n \/\/\n void scatter_weights(device_vec_t const& d_src,\n device_vec_t& d_coalesced,\n device_vec_t const& d_crt_out_degs,\n device_vec_t const& d_sizes) const\n {\n scatter_to_coalesced(\n d_src, d_coalesced, d_crt_out_degs, d_sizes, max_depth_ - 1, num_paths_, 1);\n }\n\n \/\/ in-place update (increment) path sizes for paths\n \/\/ that have not reached a sink; i.e., for which\n \/\/ d_crt_out_degs[indx]>0:\n \/\/\n void update_path_sizes(device_vec_t const& d_crt_out_degs,\n device_vec_t& d_sizes) const\n {\n thrust::transform_if(\n handle_.get_thrust_policy(),\n d_sizes.begin(),\n d_sizes.end(), \/\/ input\n d_crt_out_degs.begin(), \/\/ stencil\n d_sizes.begin(), \/\/ output: in-place\n [] __device__(auto crt_sz) { return crt_sz + 1; },\n [] __device__(auto crt_out_deg) { return crt_out_deg > 0; });\n }\n\n device_vec_t get_out_degs(graph_t const& graph) const\n {\n return graph.compute_out_degrees(handle_);\n }\n\n vertex_t get_vertex_padding_value(void) const { return vertex_padding_value_; }\n\n weight_t get_weight_padding_value(void) const { return weight_padding_value_; }\n\n void init_padding(device_vec_t& d_coalesced_v,\n device_vec_t& d_coalesced_w) const\n {\n thrust::fill(handle_.get_thrust_policy(),\n d_coalesced_v.begin(),\n d_coalesced_v.end(),\n vertex_padding_value_);\n\n thrust::fill(handle_.get_thrust_policy(),\n d_coalesced_w.begin(),\n d_coalesced_w.end(),\n weight_padding_value_);\n }\n\n decltype(auto) get_handle(void) const { return handle_; }\n\n private:\n raft::handle_t const& handle_;\n index_t num_paths_;\n index_t max_depth_;\n vertex_t const vertex_padding_value_;\n weight_t const weight_padding_value_;\n};\n\n\/**\n * @brief returns random walks (RW) from starting sources, where each path is of given maximum\n * length. Single-GPU specialization.\n *\n * @tparam graph_t Type of graph (view).\n * @tparam traversal_t Traversal policy. Either horizontal (faster but requires more memory) or\n * vertical. Defaults to horizontal.\n * @tparam random_engine_t Type of random engine used to generate RW.\n * @tparam seeding_policy_t Random engine seeding policy: variable or fixed (for reproducibility).\n * Defaults to variable, clock dependent.\n * @tparam index_t Indexing type. Defaults to edge_type.\n * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and\n * handles to various CUDA libraries) to run graph algorithms.\n * @param graph Graph object to generate RW on.\n * @param d_v_start Device (view) set of starting vertex indices for the RW.\n * number(paths) == d_v_start.size().\n * @param max_depth maximum length of RWs.\n * @param use_padding (optional) specifies if return uses padded format (true), or coalesced\n * (compressed) format; when padding is used the output is a matrix of vertex paths and a matrix of\n * edges paths (weights); in this case the matrices are stored in row major order; the vertex path\n * matrix is padded with `num_vertices` values and the weight matrix is padded with `0` values;\n * @param seeder (optional) is object providing the random seeding mechanism. Defaults to local\n * clock time as initial seed.\n * @return std::tuple, device_vec_t,\n * device_vec_t> Triplet of either padded or coalesced RW paths; in the coalesced case\n * (default), the return consists of corresponding vertex and edge weights for each, and\n * corresponding path sizes. This is meant to minimize the number of DF's to be passed to the Python\n * layer. The meaning of \"coalesced\" here is that a 2D array of paths of different sizes is\n * represented as a 1D contiguous array. In the padded case the return is a matrix of num_paths x\n * max_depth vertex paths; and num_paths x (max_depth-1) edge (weight) paths, with an empty array of\n * sizes. Note: if the graph is un-weighted the edge (weight) paths consists of `weight_t{1}`\n * entries;\n *\/\ntemplate ,\n typename seeding_policy_t = clock_seeding_t,\n typename index_t = typename graph_t::edge_type>\nstd::enable_if_t,\n device_vec_t,\n device_vec_t,\n typename random_engine_t::seed_type>>\nrandom_walks_impl(raft::handle_t const& handle,\n graph_t const& graph,\n device_const_vector_view& d_v_start,\n index_t max_depth,\n selector_t const& selector,\n bool use_padding = false,\n seeding_policy_t seeder = clock_seeding_t{})\n{\n using vertex_t = typename graph_t::vertex_type;\n using edge_t = typename graph_t::edge_type;\n using weight_t = typename graph_t::weight_type;\n using seed_t = typename random_engine_t::seed_type;\n using real_t = typename random_engine_t::real_type;\n\n vertex_t num_vertices = graph.get_number_of_vertices();\n\n auto how_many_valid = thrust::count_if(handle.get_thrust_policy(),\n d_v_start.begin(),\n d_v_start.end(),\n [num_vertices] __device__(auto crt_vertex) {\n return (crt_vertex >= 0) && (crt_vertex < num_vertices);\n });\n\n CUGRAPH_EXPECTS(static_cast(how_many_valid) == d_v_start.size(),\n \"Invalid set of starting vertices.\");\n\n auto num_paths = d_v_start.size();\n auto stream = handle.get_stream();\n\n random_walker_t rand_walker{handle,\n graph.get_number_of_vertices(),\n static_cast(num_paths),\n static_cast(max_depth)};\n\n \/\/ pre-allocate num_paths * max_depth;\n \/\/\n auto coalesced_sz = num_paths * max_depth;\n device_vec_t d_coalesced_v(coalesced_sz, stream); \/\/ coalesced vertex set\n device_vec_t d_coalesced_w(coalesced_sz, stream); \/\/ coalesced weight set\n device_vec_t d_paths_sz(num_paths, stream); \/\/ paths sizes\n\n \/\/ traversal policy:\n \/\/\n traversal_t traversor(num_paths, max_depth);\n\n auto tmp_buff_sz = traversor.get_tmp_buff_sz();\n\n device_vec_t d_crt_out_degs(tmp_buff_sz, stream); \/\/ crt vertex set out-degs\n device_vec_t d_col_indx(tmp_buff_sz, stream); \/\/ \\in {0,..,out-deg(v)}\n\n \/\/ random data handling:\n \/\/\n auto rnd_data_sz = traversor.get_random_buff_sz();\n device_vec_t d_random(rnd_data_sz, stream);\n \/\/ abstracted out seed initialization:\n \/\/\n seed_t seed0 = static_cast(seeder());\n\n \/\/ if padding used, initialize padding values:\n \/\/\n if (use_padding) rand_walker.init_padding(d_coalesced_v, d_coalesced_w);\n\n \/\/ very first vertex, for each path:\n \/\/\n rand_walker.start(d_v_start, d_coalesced_v, d_paths_sz);\n\n \/\/ traverse paths:\n \/\/\n traversor(graph,\n rand_walker,\n selector,\n seed0,\n d_coalesced_v,\n d_coalesced_w,\n d_paths_sz,\n d_crt_out_degs,\n d_random,\n d_col_indx);\n\n \/\/ wrap-up, post-process:\n \/\/ truncate v_set, w_set to actual space used\n \/\/ unless padding is used\n \/\/\n if (!use_padding) { rand_walker.stop(d_coalesced_v, d_coalesced_w, d_paths_sz); }\n\n \/\/ because device_uvector is not copy-cnstr-able:\n \/\/\n if (!use_padding) {\n return std::make_tuple(std::move(d_coalesced_v),\n std::move(d_coalesced_w),\n std::move(d_paths_sz),\n seed0); \/\/ also return seed for repro\n } else {\n return std::make_tuple(\n std::move(d_coalesced_v),\n std::move(d_coalesced_w),\n device_vec_t(0, stream), \/\/ purposely empty size array for the padded case, to avoid\n \/\/ unnecessary allocations\n seed0); \/\/ also return seed for repro\n }\n}\n\n\/**\n * @brief returns random walks (RW) from starting sources, where each path is of given maximum\n * length. Multi-GPU specialization.\n *\n * @tparam graph_t Type of graph (view).\n * @tparam traversal_t Traversal policy. Either horizontal (faster but requires more memory) or\n * vertical. Defaults to horizontal.\n * @tparam random_engine_t Type of random engine used to generate RW.\n * @tparam seeding_policy_t Random engine seeding policy: variable or fixed (for reproducibility).\n * Defaults to variable, clock dependent.\n * @tparam index_t Indexing type. Defaults to edge_type.\n * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and\n * handles to various CUDA libraries) to run graph algorithms.\n * @param graph Graph object to generate RW on.\n * @param d_v_start Device (view) set of starting vertex indices for the RW. number(RW) ==\n * d_v_start.size().\n * @param max_depth maximum length of RWs.\n * @param use_padding (optional) specifies if return uses padded format (true), or coalesced\n * (compressed) format; when padding is used the output is a matrix of vertex paths and a matrix of\n * edges paths (weights); in this case the matrices are stored in row major order; the vertex path\n * matrix is padded with `num_vertices` values and the weight matrix is padded with `0` values;\n * @param seeder (optional) is object providing the random seeding mechanism. Defaults to local\n * clock time as initial seed.\n * @return std::tuple, device_vec_t,\n * device_vec_t> Triplet of either padded or coalesced RW paths; in the coalesced case\n * (default), the return consists of corresponding vertex and edge weights for each, and\n * corresponding path sizes. This is meant to minimize the number of DF's to be passed to the Python\n * layer. The meaning of \"coalesced\" here is that a 2D array of paths of different sizes is\n * represented as a 1D contiguous array. In the padded case the return is a matrix of num_paths x\n * max_depth vertex paths; and num_paths x (max_depth-1) edge (weight) paths, with an empty array of\n * sizes. Note: if the graph is un-weighted the edge (weight) paths consists of `weight_t{1}`\n * entries;\n *\/\ntemplate ,\n typename seeding_policy_t = clock_seeding_t,\n typename index_t = typename graph_t::edge_type>\nstd::enable_if_t,\n device_vec_t,\n device_vec_t,\n typename random_engine_t::seed_type>>\nrandom_walks_impl(raft::handle_t const& handle,\n graph_t const& graph,\n device_const_vector_view& d_v_start,\n index_t max_depth,\n selector_t const& selector,\n bool use_padding = false,\n seeding_policy_t seeder = clock_seeding_t{})\n{\n CUGRAPH_FAIL(\"Not implemented yet.\");\n}\n\n\/\/ provides conversion to (coalesced) path to COO format:\n\/\/ (which in turn provides an API consistent with egonet)\n\/\/\ntemplate \nstruct coo_convertor_t {\n coo_convertor_t(raft::handle_t const& handle, index_t num_paths)\n : handle_(handle), num_paths_(num_paths)\n {\n }\n\n std::tuple, device_vec_t, device_vec_t> operator()(\n device_const_vector_view& d_coalesced_v,\n device_const_vector_view& d_sizes) const\n {\n CUGRAPH_EXPECTS(static_cast(d_sizes.size()) == num_paths_, \"Invalid size vector.\");\n\n auto tupl_fill = fill_stencil(d_sizes);\n auto&& d_stencil = std::move(std::get<0>(tupl_fill));\n auto total_sz_v = std::get<1>(tupl_fill);\n auto&& d_sz_incl_scan = std::move(std::get<2>(tupl_fill));\n\n CUGRAPH_EXPECTS(static_cast(d_coalesced_v.size()) == total_sz_v,\n \"Inconsistent vertex coalesced size data.\");\n\n auto src_dst_tpl = gather_pairs(d_coalesced_v, d_stencil, total_sz_v);\n\n auto&& d_src = std::move(std::get<0>(src_dst_tpl));\n auto&& d_dst = std::move(std::get<1>(src_dst_tpl));\n\n device_vec_t d_sz_w_scan(num_paths_, handle_.get_stream());\n\n \/\/ copy vertex path sizes that are > 1:\n \/\/ (because vertex_path_sz translates\n \/\/ into edge_path_sz = vertex_path_sz - 1,\n \/\/ and edge_paths_sz == 0 don't contribute\n \/\/ anything):\n \/\/\n auto new_end_it = thrust::copy_if(handle_.get_thrust_policy(),\n d_sizes.begin(),\n d_sizes.end(),\n d_sz_w_scan.begin(),\n [] __device__(auto sz_value) { return sz_value > 1; });\n\n \/\/ resize to new_end:\n \/\/\n d_sz_w_scan.resize(thrust::distance(d_sz_w_scan.begin(), new_end_it), handle_.get_stream());\n\n \/\/ get paths' edge number exclusive scan\n \/\/ by transforming paths' vertex numbers that\n \/\/ are > 1, via tranaformation:\n \/\/ edge_path_sz = (vertex_path_sz-1):\n \/\/\n thrust::transform_exclusive_scan(\n handle_.get_thrust_policy(),\n d_sz_w_scan.begin(),\n d_sz_w_scan.end(),\n d_sz_w_scan.begin(),\n [] __device__(auto sz) { return sz - 1; },\n index_t{0},\n thrust::plus{});\n\n return std::make_tuple(std::move(d_src), std::move(d_dst), std::move(d_sz_w_scan));\n }\n\n std::tuple, index_t, device_vec_t> fill_stencil(\n device_const_vector_view& d_sizes) const\n {\n device_vec_t d_scan(num_paths_, handle_.get_stream());\n thrust::inclusive_scan(\n handle_.get_thrust_policy(), d_sizes.begin(), d_sizes.end(), d_scan.begin());\n\n index_t total_sz{0};\n CUDA_TRY(cudaMemcpy(\n &total_sz, raw_ptr(d_scan) + num_paths_ - 1, sizeof(index_t), cudaMemcpyDeviceToHost));\n\n device_vec_t d_stencil(total_sz, handle_.get_stream());\n\n \/\/ initialize stencil to all 1's:\n \/\/\n thrust::copy_n(handle_.get_thrust_policy(),\n thrust::make_constant_iterator(1),\n d_stencil.size(),\n d_stencil.begin());\n\n \/\/ set to 0 entries positioned at inclusive_scan(sizes[]),\n \/\/ because those are path \"breakpoints\", where a path end\n \/\/ and the next one starts, hence there cannot be an edge\n \/\/ between a path ending vertex and next path starting vertex;\n \/\/\n thrust::scatter(handle_.get_thrust_policy(),\n thrust::make_constant_iterator(0),\n thrust::make_constant_iterator(0) + num_paths_,\n d_scan.begin(),\n d_stencil.begin());\n\n return std::make_tuple(std::move(d_stencil), total_sz, std::move(d_scan));\n }\n\n std::tuple, device_vec_t> gather_pairs(\n device_const_vector_view& d_coalesced_v,\n device_vec_t const& d_stencil,\n index_t total_sz_v) const\n {\n auto total_sz_w = total_sz_v - num_paths_;\n device_vec_t valid_src_indx(total_sz_w, handle_.get_stream());\n\n \/\/ generate valid vertex src indices,\n \/\/ which is any index in {0,...,total_sz_v - 2}\n \/\/ provided the next index position; i.e., (index+1),\n \/\/ in stencil is not 0; (if it is, there's no \"next\"\n \/\/ or dst index, because the path has ended);\n \/\/\n thrust::copy_if(handle_.get_thrust_policy(),\n thrust::make_counting_iterator(0),\n thrust::make_counting_iterator(total_sz_v - 1),\n valid_src_indx.begin(),\n [ptr_d_stencil = raw_const_ptr(d_stencil)] __device__(auto indx) {\n auto dst_indx = indx + 1;\n return ptr_d_stencil[dst_indx] == 1;\n });\n\n device_vec_t d_src_v(total_sz_w, handle_.get_stream());\n device_vec_t d_dst_v(total_sz_w, handle_.get_stream());\n\n \/\/ construct pair of src[], dst[] by gathering\n \/\/ from d_coalesced_v all pairs\n \/\/ at entries (valid_src_indx, valid_src_indx+1),\n \/\/ where the set of valid_src_indx was\n \/\/ generated at the previous step;\n \/\/\n thrust::transform(\n handle_.get_thrust_policy(),\n valid_src_indx.begin(),\n valid_src_indx.end(),\n thrust::make_zip_iterator(thrust::make_tuple(d_src_v.begin(), d_dst_v.begin())), \/\/ start_zip\n [ptr_d_vertex = raw_const_ptr(d_coalesced_v)] __device__(auto indx) {\n return thrust::make_tuple(ptr_d_vertex[indx], ptr_d_vertex[indx + 1]);\n });\n\n return std::make_tuple(std::move(d_src_v), std::move(d_dst_v));\n }\n\n private:\n raft::handle_t const& handle_;\n index_t num_paths_;\n};\n\n} \/\/ namespace detail\n\n\/**\n * @brief returns random walks (RW) from starting sources, where each path is of given maximum\n * length. Uniform distribution is assumed for the random engine.\n *\n * @tparam graph_t Type of graph\/view (typically, graph_view_t).\n * @tparam index_t Type used to store indexing and sizes.\n * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and\n * handles to various CUDA libraries) to run graph algorithms.\n * @param graph Graph (view )object to generate RW on.\n * @param ptr_d_start Device pointer to set of starting vertex indices for the RW.\n * @param num_paths = number(paths).\n * @param max_depth maximum length of RWs.\n * @param use_padding (optional) specifies if return uses padded format (true), or coalesced\n * (compressed) format; when padding is used the output is a matrix of vertex paths and a matrix of\n * edges paths (weights); in this case the matrices are stored in row major order; the vertex path\n * matrix is padded with `num_vertices` values and the weight matrix is padded with `0` values;\n * @param selector_type identifier for sampling strategy: uniform, biased, etc.; defaults to\n * uniform = 0;\n * @return std::tuple, rmm::device_uvector,\n * rmm::device_uvector> Triplet of either padded or coalesced RW paths; in the coalesced\n * case (default), the return consists of corresponding vertex and edge weights for each, and\n * corresponding path sizes. This is meant to minimize the number of DF's to be passed to the Python\n * layer. The meaning of \"coalesced\" here is that a 2D array of paths of different sizes is\n * represented as a 1D contiguous array. In the padded case the return is a matrix of num_paths x\n * max_depth vertex paths; and num_paths x (max_depth-1) edge (weight) paths, with an empty array of\n * sizes. Note: if the graph is un-weighted the edge (weight) paths consists of `weight_t{1}`\n * entries;\n *\/\ntemplate \nstd::tuple,\n rmm::device_uvector,\n rmm::device_uvector>\nrandom_walks(raft::handle_t const& handle,\n graph_t const& graph,\n typename graph_t::vertex_type const* ptr_d_start,\n index_t num_paths,\n index_t max_depth,\n bool use_padding,\n std::unique_ptr sampling_strategy)\n{\n using vertex_t = typename graph_t::vertex_type;\n using edge_t = typename graph_t::edge_type;\n using weight_t = typename graph_t::weight_type;\n using real_t = float; \/\/ random engine type;\n \/\/ FIXME: this should not be hardcoded; at least tag-dispatched\n\n \/\/ 0-copy const device view:\n \/\/\n detail::device_const_vector_view d_v_start{ptr_d_start, num_paths};\n\n \/\/ GPU memory availability:\n \/\/\n size_t free_mem_sp_bytes{0};\n size_t total_mem_sp_bytes{0};\n cudaMemGetInfo(&free_mem_sp_bytes, &total_mem_sp_bytes);\n\n \/\/ GPU memory requirements:\n \/\/\n size_t coalesced_v_count = num_paths * max_depth;\n auto coalesced_e_count = coalesced_v_count - num_paths;\n size_t req_mem_common = sizeof(vertex_t) * coalesced_v_count +\n sizeof(weight_t) * coalesced_e_count + \/\/ coalesced_v + coalesced_w\n (sizeof(vertex_t) + sizeof(index_t)) * num_paths; \/\/ start_v + sizes\n\n size_t req_mem_horizontal = req_mem_common + sizeof(real_t) * coalesced_e_count; \/\/ + rnd_buff\n size_t req_mem_vertical =\n req_mem_common + (sizeof(edge_t) + 2 * sizeof(vertex_t) + sizeof(weight_t) + sizeof(real_t)) *\n num_paths; \/\/ + smaller_rnd_buff + tmp_buffs\n\n bool use_vertical_strategy{false};\n if (req_mem_horizontal > req_mem_vertical && req_mem_horizontal > free_mem_sp_bytes) {\n use_vertical_strategy = true;\n std::cerr\n << \"WARNING: Due to GPU memory availability, slower vertical traversal will be used.\\n\";\n }\n\n int selector_type{0};\n if (sampling_strategy) selector_type = static_cast(sampling_strategy->sampling_type_);\n\n \/\/ node2vec is only possible for weight_t being a floating-point type:\n \/\/\n if constexpr (!std::is_floating_point_v) {\n CUGRAPH_EXPECTS(selector_type != static_cast(sampling_strategy_t::NODE2VEC),\n \"node2vec requires floating point type for weights.\");\n }\n\n if (use_vertical_strategy) {\n if (selector_type == static_cast(sampling_strategy_t::BIASED)) {\n detail::biased_selector_t selector{handle, graph, real_t{0}};\n\n auto quad_tuple =\n detail::random_walks_impl(\n handle, graph, d_v_start, max_depth, selector, use_padding);\n \/\/ ignore last element of the quad, seed,\n \/\/ since it's meant for testing \/ debugging, only:\n \/\/\n return std::make_tuple(std::move(std::get<0>(quad_tuple)),\n std::move(std::get<1>(quad_tuple)),\n std::move(std::get<2>(quad_tuple)));\n } else if (selector_type == static_cast(sampling_strategy_t::NODE2VEC)) {\n weight_t p(sampling_strategy->p_);\n weight_t q(sampling_strategy->q_);\n\n edge_t alpha_num_paths = sampling_strategy->use_alpha_cache_ ? num_paths : 0;\n\n weight_t roundoff = std::numeric_limits::epsilon();\n CUGRAPH_EXPECTS(p > roundoff, \"node2vec p parameter is too small.\");\n\n CUGRAPH_EXPECTS(q > roundoff, \"node2vec q parameter is too small.\");\n\n detail::node2vec_selector_t selector{\n handle, graph, real_t{0}, p, q, alpha_num_paths};\n\n auto quad_tuple =\n detail::random_walks_impl(\n handle, graph, d_v_start, max_depth, selector, use_padding);\n \/\/ ignore last element of the quad, seed,\n \/\/ since it's meant for testing \/ debugging, only:\n \/\/\n return std::make_tuple(std::move(std::get<0>(quad_tuple)),\n std::move(std::get<1>(quad_tuple)),\n std::move(std::get<2>(quad_tuple)));\n } else {\n detail::uniform_selector_t selector{handle, graph, real_t{0}};\n\n auto quad_tuple =\n detail::random_walks_impl(\n handle, graph, d_v_start, max_depth, selector, use_padding);\n \/\/ ignore last element of the quad, seed,\n \/\/ since it's meant for testing \/ debugging, only:\n \/\/\n return std::make_tuple(std::move(std::get<0>(quad_tuple)),\n std::move(std::get<1>(quad_tuple)),\n std::move(std::get<2>(quad_tuple)));\n }\n } else { \/\/ horizontal traversal strategy\n if (selector_type == static_cast(sampling_strategy_t::BIASED)) {\n detail::biased_selector_t selector{handle, graph, real_t{0}};\n\n auto quad_tuple =\n detail::random_walks_impl(handle, graph, d_v_start, max_depth, selector, use_padding);\n \/\/ ignore last element of the quad, seed,\n \/\/ since it's meant for testing \/ debugging, only:\n \/\/\n return std::make_tuple(std::move(std::get<0>(quad_tuple)),\n std::move(std::get<1>(quad_tuple)),\n std::move(std::get<2>(quad_tuple)));\n } else if (selector_type == static_cast(sampling_strategy_t::NODE2VEC)) {\n weight_t p(sampling_strategy->p_);\n weight_t q(sampling_strategy->q_);\n\n edge_t alpha_num_paths = sampling_strategy->use_alpha_cache_ ? num_paths : 0;\n\n weight_t roundoff = std::numeric_limits::epsilon();\n CUGRAPH_EXPECTS(p > roundoff, \"node2vec p parameter is too small.\");\n\n CUGRAPH_EXPECTS(q > roundoff, \"node2vec q parameter is too small.\");\n\n detail::node2vec_selector_t selector{\n handle, graph, real_t{0}, p, q, alpha_num_paths};\n\n auto quad_tuple =\n detail::random_walks_impl(handle, graph, d_v_start, max_depth, selector, use_padding);\n \/\/ ignore last element of the quad, seed,\n \/\/ since it's meant for testing \/ debugging, only:\n \/\/\n return std::make_tuple(std::move(std::get<0>(quad_tuple)),\n std::move(std::get<1>(quad_tuple)),\n std::move(std::get<2>(quad_tuple)));\n } else {\n detail::uniform_selector_t selector{handle, graph, real_t{0}};\n\n auto quad_tuple =\n detail::random_walks_impl(handle, graph, d_v_start, max_depth, selector, use_padding);\n \/\/ ignore last element of the quad, seed,\n \/\/ since it's meant for testing \/ debugging, only:\n \/\/\n return std::make_tuple(std::move(std::get<0>(quad_tuple)),\n std::move(std::get<1>(quad_tuple)),\n std::move(std::get<2>(quad_tuple)));\n }\n }\n}\n\n\/**\n * @brief returns the COO format (src_vector, dst_vector) from the random walks (RW)\n * paths.\n *\n * @tparam vertex_t Type of vertex indices.\n * @tparam index_t Type used to store indexing and sizes.\n * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and\n * handles to various CUDA libraries) to run graph algorithms.\n * @param coalesced_sz_v coalesced vertex vector size.\n * @param num_paths number of paths.\n * @param d_coalesced_v coalesced vertex buffer.\n * @param d_sizes paths size buffer.\n * @return tuple of (src_vertex_vector, dst_Vertex_vector, path_offsets), where\n * path_offsets are the offsets where the COO set of each path starts.\n *\/\ntemplate \nstd::\n tuple, rmm::device_uvector, rmm::device_uvector>\n convert_paths_to_coo(raft::handle_t const& handle,\n index_t coalesced_sz_v,\n index_t num_paths,\n rmm::device_buffer&& d_coalesced_v,\n rmm::device_buffer&& d_sizes)\n{\n detail::coo_convertor_t to_coo(handle, num_paths);\n\n detail::device_const_vector_view d_v_view(\n static_cast(d_coalesced_v.data()), coalesced_sz_v);\n\n detail::device_const_vector_view d_sz_view(static_cast(d_sizes.data()),\n num_paths);\n\n return to_coo(d_v_view, d_sz_view);\n}\n\n\/**\n * @brief returns additional RW information on vertex paths offsets and weight path sizes and\n * offsets, for the coalesced case (the padded case does not need or provide this information)\n *\n * @tparam index_t Type used to store indexing and sizes.\n * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and\n * handles to various CUDA libraries) to run graph algorithms.\n * @param num_paths number of paths.\n * @param ptr_d_sizes sizes of vertex paths.\n * @return tuple of (vertex_path_offsets, weight_path_sizes, weight_path_offsets), where offsets are\n * exclusive scan of corresponding sizes.\n *\/\ntemplate \nstd::tuple, rmm::device_uvector, rmm::device_uvector>\nquery_rw_sizes_offsets(raft::handle_t const& handle, index_t num_paths, index_t const* ptr_d_sizes)\n{\n rmm::device_uvector d_vertex_offsets(num_paths, handle.get_stream());\n rmm::device_uvector d_weight_sizes(num_paths, handle.get_stream());\n rmm::device_uvector d_weight_offsets(num_paths, handle.get_stream());\n\n thrust::exclusive_scan(\n handle.get_thrust_policy(), ptr_d_sizes, ptr_d_sizes + num_paths, d_vertex_offsets.begin());\n\n thrust::transform(handle.get_thrust_policy(),\n ptr_d_sizes,\n ptr_d_sizes + num_paths,\n d_weight_sizes.begin(),\n [] __device__(auto vertex_path_sz) { return vertex_path_sz - 1; });\n\n handle.sync_stream();\n\n thrust::exclusive_scan(handle.get_thrust_policy(),\n d_weight_sizes.begin(),\n d_weight_sizes.end(),\n d_weight_offsets.begin());\n\n return std::make_tuple(\n std::move(d_vertex_offsets), std::move(d_weight_sizes), std::move(d_weight_offsets));\n}\n\n} \/\/ namespace cugraph\n","avg_line_length":42.7190839695,"max_line_length":100,"alphanum_fraction":0.6470104714} {"size":4494,"ext":"cu","lang":"Cuda","max_stars_count":4.0,"content":"\/*************************************************************************\n * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.\n * Modifications Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved.\n *\n * See LICENSE.txt for license information\n ************************************************************************\/\n\n#include \n#include \"common.h\"\n\nvoid print_header() {\n PRINT(\"# %10s %12s %8s %6s out-of-place in-place \\n\", \"\", \"\", \"\", \"\");\n PRINT(\"# %10s %12s %8s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\\n\", \"size\", \"count\", \"type\", \"redop\", \"root\",\n \"time\", \"algbw\", \"busbw\", \"error\", \"time\", \"algbw\", \"busbw\", \"error\");\n PRINT(\"# %10s %12s %8s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\\n\", \"(B)\", \"(elements)\", \"\", \"\", \"\",\n \"(us)\", \"(GB\/s)\", \"(GB\/s)\", \"\", \"(us)\", \"(GB\/s)\", \"(GB\/s)\", \"\");\n}\n\nvoid print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {\n PRINT(\"%12li %12li %8s %6s %6i\", size, count, typeName, opName, root);\n}\n\nvoid ReduceGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {\n *sendcount = count;\n *recvcount = count;\n *sendInplaceOffset = 0;\n *recvInplaceOffset = 0;\n *paramcount = *sendcount;\n}\n\ntestResult_t ReduceInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {\n size_t sendcount = args->sendBytes \/ wordSize(type);\n size_t recvcount = args->expectedBytes \/ wordSize(type);\n int nranks = args->nProcs*args->nThreads*args->nGpus;\n\n for (int i=0; inGpus; i++) {\n int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;\n HIPCHECK(hipSetDevice(gpuid));\n int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);\n HIPCHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes));\n void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];\n TESTCHECK(InitData(data, sendcount, type, rep, rank));\n HIPCHECK(hipMemcpy(args->expected[i], args->recvbuffs[i], args->expectedBytes, hipMemcpyDefault));\n if (rank == root) TESTCHECK(InitDataReduce(args->expected[i], recvcount, 0, type, op, rep, nranks));\n HIPCHECK(hipDeviceSynchronize());\n }\n return testSuccess;\n}\n\nvoid ReduceGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {\n double baseBw = (double)(count * typesize) \/ 1.0E9 \/ sec;\n *algBw = baseBw;\n *busBw = baseBw;\n}\n\ntestResult_t ReduceRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {\n NCCLCHECK(ncclReduce(sendbuff, recvbuff, count, type, op, root, comm, stream));\n return testSuccess;\n}\n\nstruct testColl reduceTest = {\n \"Reduce\",\n ReduceGetCollByteCount,\n ReduceInitData,\n ReduceGetBw,\n ReduceRunColl\n};\n\nvoid ReduceGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {\n size_t paramcount, sendInplaceOffset, recvInplaceOffset;\n ReduceGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);\n}\n\ntestResult_t ReduceRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {\n args->collTest = &reduceTest;\n ncclDataType_t *run_types;\n ncclRedOp_t *run_ops;\n const char **run_typenames, **run_opnames;\n int type_count, op_count;\n int begin_root, end_root;\n\n if ((int)type != -1) {\n type_count = 1;\n run_types = &type;\n run_typenames = &typeName;\n } else {\n type_count = test_typenum;\n run_types = test_types;\n run_typenames = test_typenames;\n }\n\n if ((int)op != -1) {\n op_count = 1;\n run_ops = &op;\n run_opnames = &opName;\n } else {\n op_count = test_opnum;\n run_ops = test_ops;\n run_opnames = test_opnames;\n }\n\n if (root != -1) {\n begin_root = end_root = root;\n } else {\n begin_root = 0;\n end_root = args->nProcs*args->nThreads*args->nGpus-1;\n }\n\n for (int i=0; i\n\n#define CHECK_CUDA(x) \\\n AT_ASSERTM(x.device().is_cuda(), #x \" must be CUDA tensor\")\n#define CHECK_INPUT(x) AT_ASSERTM(x, \"Input mismatch\")\n\n__device__ __inline__ at::Half __shfl_up_sync(const unsigned mask,\n const at::Half var,\n const unsigned int delta) {\n return __shfl_up_sync(mask, (__half)var, delta);\n}\n\n__device__ __inline__ at::Half __shfl_down_sync(const unsigned mask,\n const at::Half var,\n const unsigned int delta) {\n return __shfl_down_sync(mask, (__half)var, delta);\n}\n","avg_line_length":38.6,"max_line_length":80,"alphanum_fraction":0.5129533679} {"size":17936,"ext":"cu","lang":"Cuda","max_stars_count":491.0,"content":"\/**\n * Copyright 2020 Xiaomi Corporation (authors: Haowen Qiu)\n *\n * See LICENSE for clarification regarding multiple authors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\n#include \"k2\/csrc\/fsa_utils.h\"\n#include \"k2\/csrc\/host_shim.h\"\n#include \"k2\/csrc\/math.h\"\n#include \"k2\/csrc\/rm_epsilon.h\"\n#include \"k2\/csrc\/test_utils.h\"\n\nnamespace k2 {\n\nvoid CheckComputeSubset(FsaVec &src, FsaVec &dst, Array1 &state_map,\n Array1 &arc_map, bool epsilon_subset) {\n ContextPtr cpu = GetCpuContext();\n src = src.To(cpu);\n dst = dst.To(cpu);\n state_map = state_map.To(cpu);\n arc_map = arc_map.To(cpu);\n const int32_t *src_row_splits1_data = src.RowSplits(1).Data(),\n *src_row_ids1_data = src.RowIds(1).Data(),\n *src_row_ids2_data = src.RowIds(2).Data(),\n *dst_row_splits1_data = dst.RowSplits(1).Data(),\n *dst_row_ids1_data = dst.RowIds(1).Data(),\n *dst_row_ids2_data = dst.RowIds(2).Data();\n const Arc *src_arcs_data = src.values.Data(),\n *dst_arcs_data = dst.values.Data();\n int32_t src_num_arcs = src.NumElements(), dst_num_arcs = dst.NumElements();\n int32_t expected_dst_num_arcs = 0;\n std::set kept_states;\n \/\/ get those kept states except start state and final state\n for (int32_t arc_idx012 = 0; arc_idx012 != src_num_arcs; ++arc_idx012) {\n int32_t fsa_idx0 = src_row_ids1_data[src_row_ids2_data[arc_idx012]];\n int32_t start_state_this_fsa = src_row_splits1_data[fsa_idx0],\n start_state_next_fsa = src_row_splits1_data[fsa_idx0 + 1];\n \/\/ push start state and final state of each fsa to kept_states\n if (start_state_next_fsa > start_state_this_fsa) {\n kept_states.insert(start_state_this_fsa);\n kept_states.insert(start_state_next_fsa - 1);\n }\n const Arc &src_arc = src_arcs_data[arc_idx012];\n bool keep = (epsilon_subset ? (src_arc.label == 0) : (src_arc.label != 0));\n if (keep) {\n ++expected_dst_num_arcs;\n \/\/ convert state_idx1 to state_idx01 and insert to kept_state\n kept_states.insert(start_state_this_fsa + src_arc.src_state);\n kept_states.insert(start_state_this_fsa + src_arc.dest_state);\n }\n }\n \/\/ check kept states, noted we use std::set as the type of kept_states, so\n \/\/ there's no need to sort\n std::vector expected_state_map(kept_states.begin(),\n kept_states.end());\n CheckArrayData(state_map, expected_state_map);\n\n \/\/ check arcs\n EXPECT_EQ(expected_dst_num_arcs, dst_num_arcs);\n for (int32_t dst_arc_idx012 = 0; dst_arc_idx012 != dst_num_arcs;\n ++dst_arc_idx012) {\n int32_t src_arc_idx012 = arc_map[dst_arc_idx012];\n Arc src_arc = src_arcs_data[src_arc_idx012],\n dst_arc = dst_arcs_data[dst_arc_idx012];\n int32_t fsa_idx0 = src_row_ids1_data[src_row_ids2_data[src_arc_idx012]];\n EXPECT_EQ(fsa_idx0, dst_row_ids1_data[dst_row_ids2_data[dst_arc_idx012]]);\n int32_t src_start_state_this_fsa = src_row_splits1_data[fsa_idx0],\n dst_start_state_this_fsa = dst_row_splits1_data[fsa_idx0];\n src_arc.src_state = src_arc.src_state + src_start_state_this_fsa;\n src_arc.dest_state = src_arc.dest_state + src_start_state_this_fsa;\n dst_arc.src_state = state_map[dst_arc.src_state + dst_start_state_this_fsa];\n dst_arc.dest_state =\n state_map[dst_arc.dest_state + dst_start_state_this_fsa];\n EXPECT_EQ(src_arc, dst_arc);\n }\n}\n\nTEST(RmEpsilon, ComputeEpsilonAndNonEpsilonSubsetSimple) {\n for (auto &context : {GetCpuContext(), GetCudaContext()}) {\n std::string s1 = R\"(0 1 1 1\n 1 2 0 1\n 1 3 2 1\n 2 3 3 1\n 3 4 4 1\n 3 5 5 1\n 4 5 6 1\n 4 6 7 1\n 5 6 0 1\n 5 7 -1 0\n 6 7 -1 0\n 7\n )\";\n std::string s2 = R\"(0 1 0 1\n 1 2 0 1\n 2 3 0 1\n 3 4 4 1\n 3 5 -1 1\n 4 5 -1 1\n 5\n )\";\n Fsa fsa1 = FsaFromString(s1);\n Fsa fsa2 = FsaFromString(s2);\n Fsa *fsa_array[] = {&fsa1, &fsa2};\n FsaVec fsa_vec = CreateFsaVec(2, &fsa_array[0]);\n fsa_vec = fsa_vec.To(context);\n\n \/\/ get epsilon subset\n FsaVec eps_subset;\n Array1 eps_state_map, eps_arc_map;\n ComputeEpsilonSubset(fsa_vec, &eps_subset, &eps_state_map, &eps_arc_map);\n EXPECT_EQ(eps_subset.Dim0(), fsa_vec.Dim0());\n {\n std::vector expected_state_map = {0, 1, 2, 5, 6, 7,\n 8, 9, 10, 11, 13};\n std::vector expected_arc_map = {1, 8, 11, 12, 13};\n CheckArrayData(eps_state_map, expected_state_map);\n CheckArrayData(eps_arc_map, expected_arc_map);\n }\n\n \/\/ get non-epsilon subset\n FsaVec non_eps_subset;\n Renumbering non_eps_state_map_renumbering;\n Array1 non_eps_arc_map;\n ComputeNonEpsilonSubset(fsa_vec, &non_eps_subset,\n &non_eps_state_map_renumbering, &non_eps_arc_map);\n EXPECT_EQ(non_eps_subset.Dim0(), fsa_vec.Dim0());\n Array1 non_eps_state_map = non_eps_state_map_renumbering.New2Old();\n {\n std::vector expected_state_map = {0, 1, 2, 3, 4, 5,\n 6, 7, 8, 11, 12, 13};\n std::vector expected_arc_map = {0, 2, 3, 4, 5, 6,\n 7, 9, 10, 14, 15, 16};\n CheckArrayData(non_eps_state_map, expected_state_map);\n CheckArrayData(non_eps_arc_map, expected_arc_map);\n }\n CheckComputeSubset(fsa_vec, eps_subset, eps_state_map, eps_arc_map, true);\n CheckComputeSubset(fsa_vec, non_eps_subset, non_eps_state_map,\n non_eps_arc_map, false);\n EXPECT_LE(eps_subset.TotSize(1), fsa_vec.TotSize(1));\n EXPECT_LE(non_eps_subset.TotSize(1), fsa_vec.TotSize(1));\n \/\/ eps_subset and non_eps_subset may have duplicate states\n EXPECT_GE(eps_subset.TotSize(1) + non_eps_subset.TotSize(1),\n fsa_vec.TotSize(1));\n \/\/ check num_arcs\n EXPECT_EQ(eps_subset.NumElements() + non_eps_subset.NumElements(),\n fsa_vec.NumElements());\n }\n}\nTEST(RmEpsilon, ComputeEpsilonAndNonEpsilonSubsetRandom) {\n for (auto &context : {GetCpuContext(), GetCudaContext()}) {\n for (int32_t i = 0; i != 2; ++i) {\n FsaVec fsa_vec = RandomFsaVec(1, 100, false, 50, 0, 1000);\n \/\/ get epsilon subset\n FsaVec eps_subset;\n Array1 eps_state_map, eps_arc_map;\n ComputeEpsilonSubset(fsa_vec, &eps_subset, &eps_state_map, &eps_arc_map);\n EXPECT_EQ(eps_subset.Dim0(), fsa_vec.Dim0());\n \/\/ get non-epsilon subset\n FsaVec non_eps_subset;\n Renumbering non_eps_state_map_renumbering;\n Array1 non_eps_arc_map;\n ComputeNonEpsilonSubset(fsa_vec, &non_eps_subset,\n &non_eps_state_map_renumbering, &non_eps_arc_map);\n EXPECT_EQ(non_eps_subset.Dim0(), fsa_vec.Dim0());\n Array1 non_eps_state_map =\n non_eps_state_map_renumbering.New2Old();\n CheckComputeSubset(fsa_vec, eps_subset, eps_state_map, eps_arc_map, true);\n CheckComputeSubset(fsa_vec, non_eps_subset, non_eps_state_map,\n non_eps_arc_map, false);\n EXPECT_LE(eps_subset.TotSize(1), fsa_vec.TotSize(1));\n EXPECT_LE(non_eps_subset.TotSize(1), fsa_vec.TotSize(1));\n \/\/ we cannot do below CHECK for random cases, as there may be some states\n \/\/ in `fsa_vec` who has no any entering or leving arcs, then those states\n \/\/ would not occur in either eps_subset or non_eps_subset\n \/\/ EXPECT_GE(eps_subset.TotSize(1) + non_eps_subset.TotSize(1),\n \/\/ fsa_vec.TotSize(1)); check num_arcs\n EXPECT_EQ(eps_subset.NumElements() + non_eps_subset.NumElements(),\n fsa_vec.NumElements());\n }\n }\n}\n\nTEST(RmEpsilon, MapFsaVecStatesSimple) {\n for (auto &context : {GetCpuContext(), GetCudaContext()}) {\n std::string s1 = R\"(0 1 1 1\n 1 2 0 1\n 1 3 2 1\n 2 3 3 1\n 3 4 4 1\n 3 5 5 1\n 4 5 6 1\n 4 6 -1 1\n 5 6 -1 1\n 6\n )\";\n std::string s2 = R\"(0 1 0 1\n 1 2 0 1\n 2 3 0 1\n 3 4 4 1\n 3 5 -1 1\n 4 5 -1 1\n 5\n )\";\n Fsa fsa1 = FsaFromString(s1);\n Fsa fsa2 = FsaFromString(s2);\n Fsa *fsa_array[] = {&fsa1, &fsa2};\n FsaVec fsa_vec = CreateFsaVec(2, &fsa_array[0]);\n fsa_vec = fsa_vec.To(context);\n\n \/\/ dest has 3 fsas\n const std::vector dest_row_splits1_values = {0, 8, 12, 16};\n Array1 dest_row_splits1(context, dest_row_splits1_values);\n int32_t dest_num_states = dest_row_splits1_values.back();\n Array1 dest_row_ids1(context, dest_num_states);\n RowSplitsToRowIds(dest_row_splits1, &dest_row_ids1);\n \/\/ keep state 0, 1, 2, 4, 6 in fsa1, map to 0, 2, 4, 6, 7 in dest;\n \/\/ keep state 0, 1, 3 in fsa2, map to 13, 14, 15 in dest\n const std::vector state_map_values = {0, 2, 4, -1, 6, -1, 7,\n 13, 14, -1, 15, -1, -1};\n Array1 state_map(context, state_map_values);\n\n FsaVec dest;\n Array1 arc_map;\n MapFsaVecStates(fsa_vec, dest_row_splits1, dest_row_ids1, state_map, &dest,\n &arc_map);\n EXPECT_EQ(dest.NumAxes(), 3);\n EXPECT_TRUE(Equal(dest.RowSplits(1), dest_row_splits1));\n std::vector expected_dest_row_ids2 = {0, 2, 6, 13};\n CheckArrayData(dest.RowIds(2), expected_dest_row_ids2);\n\n EXPECT_EQ(dest.NumElements(), arc_map.Dim());\n std::vector expected_arc_map = {0, 1, 7, 9};\n CheckArrayData(arc_map, expected_arc_map);\n K2_LOG(INFO) << dest;\n K2_LOG(INFO) << arc_map;\n }\n \/\/ TODO(haowen): add random tests\n}\nTEST(RmEpsilon, ComputeEpsilonClosureOneIterSimple) {\n for (auto &context : {GetCpuContext(), GetCudaContext()}) {\n std::string s1 = R\"(0 1 0 1\n 1 2 0 1\n 1 3 0 1\n 2 3 0 1\n 3 4 0 1\n 3 5 0 1\n 4 5 0 1\n 6\n )\";\n std::string s2 = R\"(0 1 0 1\n 1 2 0 1\n 2 3 0 1\n 3 4 0 1\n 5\n )\";\n Fsa fsa1 = FsaFromString(s1);\n Fsa fsa2 = FsaFromString(s2);\n Fsa *fsa_array[] = {&fsa1, &fsa2};\n FsaVec fsa_vec = CreateFsaVec(2, &fsa_array[0]);\n fsa_vec = fsa_vec.To(context);\n\n FsaVec dest;\n Ragged arc_map;\n ComputeEpsilonClosureOneIter(fsa_vec, &dest, &arc_map);\n EXPECT_EQ(dest.NumAxes(), 3);\n EXPECT_EQ(arc_map.NumAxes(), 2);\n K2_LOG(INFO) << dest;\n K2_LOG(INFO) << arc_map;\n EXPECT_EQ(dest.NumElements(), 20);\n EXPECT_EQ(arc_map.Dim0(), 20);\n }\n \/\/ TODO(haowen): add random tests\n}\n\nTEST(RmEpsilon, ComputeEpsilonClosureSimple) {\n for (auto &context : {GetCpuContext(), GetCudaContext()}) {\n std::string s1 = R\"(0 1 0 1\n 1 2 0 1\n 1 3 0 1\n 2 3 0 1\n 3 4 0 1\n 3 5 0 1\n 4 5 0 1\n 6\n )\";\n std::string s2 = R\"(0 1 0 1\n 1 2 0 1\n 2 3 0 1\n 3 4 0 1\n 5\n )\";\n Fsa fsa1 = FsaFromString(s1);\n Fsa fsa2 = FsaFromString(s2);\n Fsa *fsa_array[] = {&fsa1, &fsa2};\n FsaVec fsa_vec = CreateFsaVec(2, &fsa_array[0]);\n fsa_vec = fsa_vec.To(context);\n\n FsaVec dest;\n Ragged arc_map;\n ComputeEpsilonClosure(fsa_vec, &dest, &arc_map);\n EXPECT_EQ(dest.NumAxes(), 3);\n EXPECT_EQ(arc_map.NumAxes(), 2);\n K2_LOG(INFO) << dest;\n K2_LOG(INFO) << arc_map;\n EXPECT_EQ(dest.NumElements(), 25);\n EXPECT_EQ(arc_map.Dim0(), 25);\n }\n \/\/ TODO(haowen): add random tests\n}\n\n\/\/ arc_map is arc_map from dest to src\nvoid CheckArcMap(FsaVec &src, FsaVec &dest, Ragged &arc_map) {\n ContextPtr cpu = GetCpuContext();\n src = src.To(cpu);\n dest = dest.To(cpu);\n arc_map = arc_map.To(cpu);\n int32_t dest_num_arcs = dest.NumElements();\n ASSERT_EQ(arc_map.NumAxes(), 2);\n ASSERT_EQ(dest_num_arcs, arc_map.Dim0());\n const Arc *src_arcs = src.values.Data();\n const Arc *dest_arcs = dest.values.Data();\n const int32_t *arc_map_row_splits = arc_map.RowSplits(1).Data(),\n *arc_map_values = arc_map.values.Data();\n for (int32_t i = 0; i != dest_num_arcs; ++i) {\n float score = 0.0;\n for (int32_t j = arc_map_row_splits[i]; j != arc_map_row_splits[i + 1];\n ++j) {\n score += src_arcs[arc_map_values[j]].score;\n }\n float dest_arc_score = dest_arcs[i].score;\n EXPECT_NEAR(dest_arc_score, score, 0.01);\n }\n}\n\nTEST(RmEpsilon, RemoveEpsilonDeviceSimple) {\n for (auto &context : {GetCpuContext(), GetCudaContext()}) {\n std::string s1 = R\"(0 1 1 1\n 1 2 0 1\n 1 3 2 1\n 2 3 3 1\n 3 4 4 1\n 3 5 5 1\n 4 5 6 1\n 4 6 7 1\n 5 6 0 1\n 5 7 -1 0\n 6 7 -1 0\n 7\n )\";\n std::string s2 = R\"(0 1 0 1\n 1 2 0 1\n 2 3 0 1\n 3 4 4 1\n 3 5 -1 1\n 4 5 -1 1\n 5\n )\";\n {\n \/\/ test with single Fsa\n Fsa fsa = FsaFromString(s2);\n fsa = fsa.To(context);\n\n FsaVec dest;\n Ragged arc_map;\n RemoveEpsilonDevice(fsa, &dest, &arc_map);\n EXPECT_EQ(dest.NumAxes(), 2);\n EXPECT_EQ(arc_map.NumAxes(), 2);\n K2_LOG(INFO) << dest;\n K2_LOG(INFO) << arc_map;\n int32_t p = GetFsaBasicProperties(dest);\n EXPECT_EQ(p & kFsaPropertiesEpsilonFree, kFsaPropertiesEpsilonFree);\n bool log_semiring = false;\n float beam = std::numeric_limits::infinity();\n fsa = fsa.To(GetCpuContext());\n dest = dest.To(GetCpuContext());\n EXPECT_TRUE(IsRandEquivalent(fsa, dest, log_semiring, beam, true, 0.001));\n CheckArcMap(fsa, dest, arc_map);\n }\n {\n \/\/ test with FsaVec\n Fsa fsa1 = FsaFromString(s1);\n Fsa fsa2 = FsaFromString(s2);\n Fsa *fsa_array[] = {&fsa1, &fsa2};\n FsaVec fsa_vec = CreateFsaVec(2, &fsa_array[0]);\n fsa_vec = fsa_vec.To(context);\n\n FsaVec dest;\n Ragged arc_map;\n RemoveEpsilonDevice(fsa_vec, &dest, &arc_map);\n EXPECT_EQ(dest.NumAxes(), 3);\n EXPECT_EQ(arc_map.NumAxes(), 2);\n K2_LOG(INFO) << dest;\n K2_LOG(INFO) << arc_map;\n Array1 properties;\n int32_t p;\n GetFsaVecBasicProperties(dest, &properties, &p);\n EXPECT_EQ(p & kFsaPropertiesEpsilonFree, kFsaPropertiesEpsilonFree);\n bool log_semiring = false;\n float beam = std::numeric_limits::infinity();\n fsa_vec = fsa_vec.To(GetCpuContext());\n dest = dest.To(GetCpuContext());\n EXPECT_TRUE(\n IsRandEquivalent(fsa_vec, dest, log_semiring, beam, true, 0.001));\n CheckArcMap(fsa_vec, dest, arc_map);\n }\n }\n}\n\nTEST(RmEpsilon, TestRemoveEpsilonDeviceWithRandomTopSortedFsa) {\n for (int32_t i = 0; i != 1; ++i) {\n for (auto &context : {GetCpuContext(), GetCudaContext()}) {\n int32_t min_num_fsas = 1;\n int32_t max_num_fsas = 1000;\n bool acyclic = true;\n \/\/ set max_symbol=10 so that we have a high probability\n \/\/ to create Fsas with epsilon arcs.\n int32_t max_symbol = 10;\n int32_t min_num_arcs = 0;\n int32_t max_num_arcs = 10000;\n FsaVec fsa_vec = RandomFsaVec(min_num_fsas, max_num_fsas, acyclic,\n max_symbol, min_num_arcs, max_num_arcs);\n fsa_vec = fsa_vec.To(context);\n\n FsaVec dest;\n Ragged arc_map;\n RemoveEpsilonDevice(fsa_vec, &dest, &arc_map);\n EXPECT_EQ(dest.NumAxes(), 3);\n EXPECT_EQ(arc_map.NumAxes(), 2);\n Array1 properties;\n int32_t p;\n GetFsaVecBasicProperties(dest, &properties, &p);\n EXPECT_EQ(p & kFsaPropertiesEpsilonFree, kFsaPropertiesEpsilonFree);\n bool log_semiring = false;\n float beam = std::numeric_limits::infinity();\n fsa_vec = fsa_vec.To(GetCpuContext());\n dest = dest.To(GetCpuContext());\n EXPECT_TRUE(\n IsRandEquivalent(fsa_vec, dest, log_semiring, beam, true, 0.1));\n CheckArcMap(fsa_vec, dest, arc_map);\n }\n }\n}\n\nTEST(RmEpsilon, TestRemoveEpsilonDeviceWithRandomNonTopSortedFsa) {\n for (int32_t i = 0; i != 1; ++i) {\n for (auto &context : {GetCpuContext(), GetCudaContext()}) {\n int32_t min_num_fsas = 1;\n int32_t max_num_fsas = 1000;\n bool acyclic = false;\n \/\/ set max_symbol=10 so that we have a high probability\n \/\/ to create Fsas with epsilon arcs.\n int32_t max_symbol = 10;\n int32_t min_num_arcs = 0;\n int32_t max_num_arcs = 10000;\n FsaVec fsa_vec = RandomFsaVec(min_num_fsas, max_num_fsas, acyclic,\n max_symbol, min_num_arcs, max_num_arcs);\n \/\/ convert arcs' scores to negative as we don't allow positive epsilon\n \/\/ cycles in `RemoveEpsilonHostIterativeTropical`.\n Arc *fsa_vec_arcs_data = fsa_vec.values.Data();\n for (int32_t n = 0; n != fsa_vec.NumElements(); ++n) {\n Arc &cur_arc = fsa_vec_arcs_data[n];\n if (cur_arc.score > 0) cur_arc.score = -cur_arc.score;\n }\n fsa_vec = fsa_vec.To(context);\n\n FsaVec dest;\n Ragged arc_map;\n RemoveEpsilonDevice(fsa_vec, &dest, &arc_map);\n EXPECT_EQ(dest.NumAxes(), 3);\n EXPECT_EQ(arc_map.NumAxes(), 2);\n Array1 properties;\n int32_t p;\n GetFsaVecBasicProperties(dest, &properties, &p);\n EXPECT_EQ(p & kFsaPropertiesEpsilonFree, kFsaPropertiesEpsilonFree);\n fsa_vec = fsa_vec.To(GetCpuContext());\n dest = dest.To(GetCpuContext());\n EXPECT_TRUE(IsRandEquivalentUnweighted(fsa_vec, dest, true));\n CheckArcMap(fsa_vec, dest, arc_map);\n \/\/ TODO(haowen): how to check if the weights of sequences are correct?\n }\n }\n}\n\n} \/\/ namespace k2\n","avg_line_length":35.5168316832,"max_line_length":80,"alphanum_fraction":0.6433429973} {"size":15105,"ext":"cu","lang":"Cuda","max_stars_count":7.0,"content":"\/*\n This file is part of ParTI!.\n\n ParTI! is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Lesser General Public License as\n published by the Free Software Foundation, either version 3 of\n the License, or (at your option) any later version.\n\n ParTI! is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU Lesser General Public\n License along with ParTI!.\n If not, see .\n*\/\n\n#include \n#include \"sptensor.h\"\n#include \"..\/cudawrap.h\"\n#include \"mttkrp_cuda_kernels.h\"\n\n\n\n\/**\n * CUDA parallelized Matriced sparse tensor times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode\n * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size\n * ndims[mode] * R\n * @param[in] X the sparse tensor input X\n * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary\n * @param[in] mats_order the order of the Khatri-Rao products\n * @param[in] mode the mode on which the MTTKRP is performed\n * @param[in] scratch an temporary array to store intermediate results, space assigned before this function\n *\n * This function uses support arbitrary-order sparse tensors with Khatri-Rao\n * products of dense factor matrices, the output is the updated dense matrix for the \"mode\".\n * In this version, atomic function to lock the global reduction and a large\n * scratch is used to maximize parallelism. (To be optimized)\n *\/\nint ptiCudaMTTKRPOneKernel(\n ptiSparseTensor const * const X,\n ptiMatrix ** const mats, \/\/ mats[nmodes] as temporary space.\n ptiIndex * const mats_order, \/\/ Correspond to the mode order of X.\n ptiIndex const mode,\n ptiIndex const impl_num)\n{\n ptiIndex const nmodes = X->nmodes;\n ptiNnzIndex const nnz = X->nnz;\n ptiIndex const * const ndims = X->ndims;\n ptiIndex const R = mats[mode]->ncols;\n ptiIndex const stride = mats[mode]->stride;\n int result;\n\n double time_h2d, time_exe, time_d2h;\n double gbw_h2d, gflops_exe, gbytes_exe, gbw_d2h;\n ptiTimer timer;\n ptiNewTimer(&timer, 0);\n\n \/* Check the mats. *\/\n for(ptiIndex i=0; incols != mats[nmodes]->ncols) {\n pti_CheckError(PTIERR_SHAPE_MISMATCH, \"CUDA SpTns MTTKRP\", \"mats[i]->cols != mats[nmodes]->ncols\");\n }\n if(mats[i]->nrows != ndims[i]) {\n pti_CheckError(PTIERR_SHAPE_MISMATCH, \"CUDA SpTns MTTKRP\", \"mats[i]->nrows != ndims[i]\");\n }\n }\n\n\n \/* Transfer tensor and matrices *\/\n \/* dev_mats_order: 1st gpu. *\/\n ptiIndex * dev_mats_order;\n \/* dev_Xndims: 1st gpu. *\/\n ptiIndex * dev_Xndims;\n \/* dev_Xvals: 1st gpu. *\/\n ptiValue * dev_Xvals;\n \/* Xinds_header: 1st cpu, 2nd cpu (ghost pointers) *\/\n ptiIndex ** Xinds_header = new ptiIndex *[nmodes];\n \/* dev_Xinds: 1st gpu, 2nd gpu. *\/\n ptiIndex ** dev_Xinds;\n \/* mats_header: 1st cpu, 2nd cpu (ghost pointers) *\/\n ptiValue ** mats_header = new ptiValue *[nmodes+1];\n \/* lengths: 1st cpu, store the lengths of mats *\/\n ptiNnzIndex * const lengths = new ptiNnzIndex[nmodes+1];\n \/* dev_mats: 1st gpu, 2nd gpu. *\/\n ptiValue ** dev_mats;\n \/* dev_scratch: 1st gpu. *\/\n ptiValue * dev_scratch;\n \/* the pointer to dev_mats[nmodes] *\/\n ptiValue *dev_part_prod;\n ptiNnzIndex dev_mem_size = 0;\n uint64_t dev_flops = 2 * nnz * R + (nmodes - 1) * R;\n uint64_t dev_bytes = ( nmodes * sizeof(ptiIndex) + sizeof(ptiValue) ) * nnz;\n for (ptiIndex m=0; mvalues.data, nnz * sizeof (ptiValue), cudaMemcpyHostToDevice);\n pti_CheckCudaError(result != 0, \"CUDA SpTns SpltMTTKRP\");\n dev_mem_size += nnz * sizeof (ptiValue);\n\n \/* Xinds_header *\/\n for(ptiIndex m = 0; m < nmodes; ++m) {\n Xinds_header[m] = X->inds[m].data;\n }\n \/* dev_Xinds *\/\n result = ptiCudaDuplicateMemoryIndirect(&dev_Xinds, Xinds_header, nmodes, nnz, cudaMemcpyHostToDevice);\n pti_CheckCudaError(result != 0, \"CUDA SpTns SpltMTTKRP\");\n dev_mem_size += nmodes * nnz * sizeof(ptiIndex);\n\n \/* mats_header and lengths *\/\n memset(mats[nmodes]->values, 0, mats[mode]->nrows * stride * sizeof(ptiValue));\n ptiNnzIndex sum_mat_length = 0;\n for(ptiIndex m = 0; m < nmodes; ++m) {\n mats_header[m] = mats[m]->values;\n lengths[m] = mats[m]->nrows * stride;\n sum_mat_length += mats[m]->nrows * stride;\n }\n mats_header[nmodes] = mats[nmodes]->values;\n lengths[nmodes] = mats[mode]->nrows * stride;\n sum_mat_length += mats[mode]->nrows * stride;\n \/* dev_mats *\/\n result = ptiCudaDuplicateMemoryIndirect(&dev_mats, mats_header, nmodes+1, lengths, cudaMemcpyHostToDevice);\n pti_CheckCudaError(result != 0, \"CUDA SpTns SpltMTTKRP\");\n dev_mem_size += sum_mat_length * sizeof(ptiValue);\n\n if(nmodes > 4) {\n \/* dev_scratch *\/\n result = cudaMalloc((void **) &dev_scratch, nnz * stride * sizeof (ptiValue));\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n result = cudaMemset(dev_scratch, 0, nnz * stride * sizeof (ptiValue));\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n dev_mem_size += nnz * stride * sizeof (ptiValue);\n }\n\n ptiStopTimer(timer);\n time_h2d = ptiElapsedTime(timer);\n gbw_h2d = dev_mem_size \/ time_h2d \/1e9;\n ptiPrintElapsedTime(timer, \"CUDA SpTns MTTKRP H2D\");\n printf(\"[Bandwidth H2D]: %lf GBytes\/sec\\n\", gbw_h2d);\n\n\n \/\/ ptiNnzIndex max_nthreads_per_block = 512; \/\/ old run\n ptiNnzIndex max_nthreads_per_block = 256;\n ptiNnzIndex max_nblocks = 32768;\n ptiNnzIndex max_nthreadsy = 16;\n\n ptiNnzIndex nthreadsx = 0;\n ptiNnzIndex nthreadsy = 0;\n ptiNnzIndex all_nblocks = 0;\n ptiNnzIndex nblocks = 0;\n switch(impl_num) {\n \/\/ case 1:\n case 11: \/\/ Naive, 1D\n if(nnz < max_nthreads_per_block) {\n nthreadsx = nnz;\n nblocks = 1;\n } else {\n nthreadsx = max_nthreads_per_block;\n all_nblocks = (nnz + nthreadsx -1) \/ nthreadsx;\n if(all_nblocks < max_nblocks) {\n nblocks = all_nblocks;\n } else {\n nblocks = max_nblocks;\n }\n }\n break;\n \/\/ case 2: \/\/ 2D\n case 12:\n if(R <= max_nthreadsy)\n nthreadsy = R;\n else\n nthreadsy = max_nthreadsy;\n nthreadsx = max_nthreads_per_block \/ nthreadsy;\n\n if(nnz < nthreadsx) {\n nthreadsx = nnz;\n nblocks = 1;\n } else {\n all_nblocks = (nnz + nthreadsx -1) \/ nthreadsx;\n if(all_nblocks < max_nblocks) {\n nblocks = all_nblocks;\n } else {\n nblocks = max_nblocks;\n } \n }\n break;\n \/\/ case 3: \/\/ 2D, rank split\n \/\/ if(R <= max_nthreadsy)\n \/\/ nthreadsy = R;\n \/\/ else\n \/\/ nthreadsy = max_nthreadsy;\n \/\/ nthreadsx = max_nthreads_per_block \/ nthreadsy;\n \/\/ all_nblocks = (nnz + nthreadsx -1) \/ nthreadsx;\n \/\/ break;\n \/\/ case 4: \/\/ 2D, exchange x and y\n \/\/ nthreadsx = R;\n \/\/ nthreadsy = max_nthreads_per_block \/ nthreadsx;\n \/\/ all_nblocks = (nnz + nthreadsy -1) \/ nthreadsy;\n \/\/ break;\n \/\/ case 5:\n case 15: \/\/ 2D, exchange x and y, rank split. Best performance\n case 16:\n if(R <= max_nthreadsy)\n nthreadsx = R;\n else\n nthreadsx = max_nthreadsy;\n nthreadsy = max_nthreads_per_block \/ nthreadsx;\n\n if(nnz < nthreadsy) {\n nthreadsy = nnz;\n nblocks = 1;\n } else {\n all_nblocks = (nnz + nthreadsy -1) \/ nthreadsy;\n if(all_nblocks < max_nblocks) {\n nblocks = all_nblocks;\n } else {\n nblocks = max_nblocks;\n } \n }\n break;\n }\n dim3 dimBlock(nthreadsx, nthreadsy);\n printf(\"all_nblocks: %lu, nthreadsx: %lu, nthreadsy: %lu\\n\", all_nblocks, nthreadsx, nthreadsy);\n\n\n ptiStartTimer(timer);\n\n switch(nmodes) {\n case 3:\n switch(impl_num) {\n \/\/ case 1:\n case 11: \/\/ Naive\n printf(\"Execute pti_MTTKRPKernelNnz3DOneKernel (%lu, %lu)\\n\", nblocks, nthreadsx);\n pti_MTTKRPKernelNnz3DOneKernel<<>>(\n mode,\n nmodes,\n nnz,\n R,\n stride,\n dev_Xndims,\n dev_Xinds,\n dev_Xvals,\n dev_mats_order,\n dev_mats);\n break;\n \/\/ case 2:\n case 12:\n printf(\"Execute pti_MTTKRPKernelRankNnz3DOneKernel (%lu, (%u, %u))\\n\", nblocks, dimBlock.x, dimBlock.y);\n pti_MTTKRPKernelRankNnz3DOneKernel<<>>(\n mode,\n nmodes,\n nnz,\n R,\n stride,\n dev_Xndims,\n dev_Xinds,\n dev_Xvals,\n dev_mats_order,\n dev_mats);\n break;\n case 3:\n printf(\"Execute pti_MTTKRPKernelNnzRankSplit3D (%lu, (%u, %u))\\n\", nblocks, dimBlock.x, dimBlock.y);\n \/\/ pti_MTTKRPKernelNnzRankSplit3D<<>>(\n \/\/ mode,\n \/\/ nmodes,\n \/\/ nnz,\n \/\/ R,\n \/\/ stride,\n \/\/ dev_Xndims,\n \/\/ dev_Xinds,\n \/\/ dev_Xvals,\n \/\/ dev_mats_order,\n \/\/ dev_mats,\n \/\/ block_offset);\n break;\n case 4:\n printf(\"Execute pti_MTTKRPKernelRankNnz3D (%lu, (%u, %u))\\n\", nblocks, dimBlock.x, dimBlock.y);\n \/\/ pti_MTTKRPKernelRankNnz3D<<>>(\n \/\/ mode,\n \/\/ nmodes,\n \/\/ nnz,\n \/\/ R,\n \/\/ stride,\n \/\/ dev_Xndims,\n \/\/ dev_Xinds,\n \/\/ dev_Xvals,\n \/\/ dev_mats_order,\n \/\/ dev_mats,\n \/\/ block_offset);\n break;\n \/\/ case 5:\n case 15:\n printf(\"Execute pti_MTTKRPKernelRankSplitNnz3DOneKernel (%lu, (%u, %u))\\n\", nblocks, dimBlock.x, dimBlock.y);\n pti_MTTKRPKernelRankSplitNnz3DOneKernel<<>>(\n mode,\n nmodes,\n nnz,\n R,\n stride,\n dev_Xndims,\n dev_Xinds,\n dev_Xvals,\n dev_mats_order,\n dev_mats);\n break;\n case 16:\n printf(\"Execute pti_MTTKRPKernelRankSplitNnzRB3DOneKernel (%lu, (%u, %u))\\n\", nblocks, dimBlock.x, dimBlock.y);\n pti_MTTKRPKernelRankSplitNnzRB3DOneKernel<<>>(\n mode,\n nmodes,\n nnz,\n R,\n stride,\n dev_Xndims,\n dev_Xinds,\n dev_Xvals,\n dev_mats_order,\n dev_mats);\n break;\n } \/\/ End switch impl_num\n break;\n\n case 4: \n switch(impl_num) {\n default:\n printf(\"Not support: Execute pti_MTTKRPKernelScratch (%lu, %lu)\\n\", nblocks, nthreadsx);\n \/\/ pti_MTTKRPKernelScratch<<>>(\n \/\/ mode,\n \/\/ nmodes,\n \/\/ nnz,\n \/\/ R,\n \/\/ stride,\n \/\/ dev_Xndims,\n \/\/ dev_Xinds,\n \/\/ dev_Xvals,\n \/\/ dev_mats_order,\n \/\/ dev_mats,\n \/\/ dev_scratch,\n \/\/ block_offset);\n } \/\/ End switch impl_num\n break;\n\n default:\n printf(\"Execute pti_MTTKRPKernelScratch (%lu, %lu)\\n\", nblocks, nthreadsx);\n \/\/ pti_MTTKRPKernelScratch<<>>(\n \/\/ mode,\n \/\/ nmodes,\n \/\/ nnz,\n \/\/ R,\n \/\/ stride,\n \/\/ dev_Xndims,\n \/\/ dev_Xinds,\n \/\/ dev_Xvals,\n \/\/ dev_mats_order,\n \/\/ dev_mats,\n \/\/ dev_scratch,\n \/\/ block_offset);\n } \/\/ End switch nmodes\n result = cudaThreadSynchronize();\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n\n\n\n ptiStopTimer(timer);\n time_exe = ptiElapsedTime(timer);\n gflops_exe = (double)dev_flops \/ time_exe \/ 1e9;\n gbytes_exe = (double)dev_bytes \/ time_exe \/ 1e9;\n ptiPrintElapsedTime(timer, \"CUDA SpTns MTTKRP\");\n printf(\"[GFLOPS]: %.2lf GFlops, [Bandwidth]: %.2lf GB\/s\\n\", gflops_exe, gbytes_exe);\n\n ptiStartTimer(timer);\n\n dev_mem_size = 0;\n \/* Copy back the pointer to dev_mats[nmodes] to the result *\/\n result = cudaMemcpy(&dev_part_prod, dev_mats + nmodes, sizeof dev_part_prod, cudaMemcpyDeviceToHost);\n pti_CheckCudaError(result != 0, \"CUDA SpTns SpltMTTKRP\");\n dev_mem_size += sizeof dev_part_prod;\n\n result = cudaMemcpy(mats[nmodes]->values, dev_part_prod, mats[mode]->nrows * stride * sizeof (ptiValue), cudaMemcpyDeviceToHost);\n pti_CheckCudaError(result != 0, \"CUDA SpTns SpltMTTKRP\");\n dev_mem_size += mats[mode]->nrows * stride * sizeof (ptiValue);\n\n ptiStopTimer(timer);\n time_d2h = ptiElapsedTime(timer);\n gbw_d2h = dev_mem_size \/ time_d2h \/1e9;\n ptiPrintElapsedTime(timer, \"CUDA SpTns MTTKRP D2H\");\n printf(\"[Bandwidth D2H]: %lf GBytes\/sec\\n\", gbw_d2h);\n printf(\"\\n\");\n ptiFreeTimer(timer);\n\n result = cudaFree(dev_mats_order);\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n result = cudaFree(dev_Xndims);\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n result = cudaFree(dev_Xvals);\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n result = cudaFree(dev_Xinds);\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n result = cudaFree(dev_mats);\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n if(nmodes > 4) {\n result = cudaFree(dev_scratch);\n pti_CheckCudaError(result != 0, \"CUDA SpTns MTTKRP\");\n }\n delete[] Xinds_header;\n delete[] mats_header;\n delete[] lengths;\n\n return 0;\n}\n\n\n","avg_line_length":35.2097902098,"max_line_length":133,"alphanum_fraction":0.5730552797} {"size":1619,"ext":"cuh","lang":"Cuda","max_stars_count":null,"content":"#ifndef CUDAAGGREGATION_CUH_\n#define CUDAAGGREGATION_CUH_\n\n#include \"..\/Store\/StoreElement.cuh\"\n#include \"..\/Query\/QueryAggregationResults.cuh\"\n#include \"..\/Query\/QueryAggregationData.h\"\n#include \"..\/Query\/Query.h\"\n#include \"..\/Store\/StoreTypedefs.h\"\n#include \n\n\/\/ TODO: Move this define to config\n#define CUDA_THREADS_PER_BLOCK 256\n\nusing namespace ddj::store;\nusing namespace ddj::query;\n\nextern \"C\" {\n\n\t\/\/ AGGREGATION OF VALUES\n\n\tsize_t gpu_sum(storeElement* elements, size_t dataSize, void** result);\n\tsize_t gpu_max(storeElement* elements, size_t dataSize, void** result);\n\tsize_t gpu_min(storeElement* elements, size_t dataSize, void** result);\n\tsize_t gpu_average(storeElement* elements, size_t dataSize, void** result);\n\tsize_t gpu_variance(storeElement* elements, size_t dataSize, void** result);\n\tsize_t gpu_skewness(storeElement* elements, size_t dataSize, void** result);\n\tsize_t gpu_kurtosis(storeElement* elements, size_t dataSize, void** result);\n\tsize_t gpu_histogram_value(storeElement* elements, size_t dataSize, void** result,\n\t\t\tfloat2* buckets, int bucketCount);\n\tsize_t gpu_histogram_time(storeElement* elements, size_t dataSize, void** result,\n\t\t\tullint2* buckets, int bucketCount);\n\tsize_t gpu_trunk_integral(storeElement* elements, size_t dataSize, void** result,\n\t\t\tddj::ullintPair* dataLocationInfo, int locationInfoCount);\n\n\t\/\/ AGGREGATION OF SERIES\n\n\tsize_t gpu_sum_series(storeElement* elements, size_t dataSize, void** result, ullint* timePoints,\n\t\t\tint timePointCount, metric_type* metrics, int metricCount, int* tags, int tagCount);\n}\n\n#endif \/* CUDAAGGREGATION_CUH_ *\/\n","avg_line_length":38.5476190476,"max_line_length":98,"alphanum_fraction":0.7776405188} {"size":30943,"ext":"cu","lang":"Cuda","max_stars_count":206.0,"content":"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n#define START_IND(a,b,c) (int)std::floor((float)(a * c) \/ b)\n#define END_IND(a,b,c) (int)std::ceil((float)((a + 1) * c) \/ b)\n\n#define START_IND_INT(a,b,c) ((a * c) \/ b)\n#define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) \/ b)\n\/\/ #define START_IND(a,b,c) a * c \/ b\n\/\/ #define END_IND(a,b,c) (a + 1) * c \/ b + ((a + 1) * c % b > 0)?1:0\n\n#define CUDA_MAX_THREADS 1024 \/\/ this is safe, in reality 256 is our limit\n#define BLOCK_STRIDE 2 \/\/ increasing block_stride to lower # of blocks launched\n\nnamespace at {\nnamespace native {\n\nnamespace {\n\n \/\/ 4d tensor B x D x H x W\n \/\/ All kernels view batch dim B and feature dim D as collapsed.\n\n \/*\n * Description:\n * this function adaptively average pools an input 4D tensor along dimensions 2 and 3\n * 4D input, 4D output\n *\/\n template \n __global__ void adaptive_average_pool(T *input, T *output,\n int isizeH, int isizeW,\n int osizeH, int osizeW,\n int64_t istrideD, int64_t istrideH, int64_t istrideW)\n {\n \/\/ iterators on output pixels\n int oh, ow;\n\n \/\/ select input\/output plane based on thread\/block ID\n int o_plane = blockIdx.x;\n int i_plane = o_plane;\n\n output = output + o_plane*osizeH*osizeW;\n input = input + i_plane*istrideD;\n\n int ostartH = blockDim.y*blockIdx.y + threadIdx.y;\n int oendH = osizeH;\n const int ostepH = blockDim.y*gridDim.y;\n\n int ostartW = threadIdx.x;\n int oendW = osizeW;\n const int ostepW = blockDim.x;\n\n \/\/ For all output pixels...\n for(oh = ostartH; oh < oendH; oh += ostepH) {\n\n int istartH = START_IND(oh, osizeH, isizeH);\n int iendH = END_IND(oh, osizeH, isizeH);\n int kH = iendH - istartH;\n\n for(ow = ostartW; ow < oendW; ow += ostepW) {\n\n int istartW = START_IND(ow, osizeW, isizeW);\n int iendW = END_IND(ow, osizeW, isizeW);\n int kW = iendW - istartW;\n\n \/\/ Compute the average pooling over corresponding input pixels\n T *ptr_input = input + istartH*istrideH + istartW*istrideW;\n T *ptr_output = output + oh*osizeW + ow;\n T sum = ScalarConvert::to(0);\n int ih, iw;\n for(ih = 0; ih < kH; ++ih) {\n for(iw = 0; iw < kW; ++iw) {\n T val = ptr_input[iw*istrideW];\n sum += val;\n }\n ptr_input += istrideH; \/\/ next input line\n }\n \/\/ Update output\n *ptr_output = sum \/ kH \/ kW;\n }\n }\n }\n\n \/*\n * Description:\n * this function computes the gradInput from gradOutput\n *\/\n template \n __global__ void adaptive_average_gradinput(\n T *gradInput, T *gradOutput,\n int isizeH, int isizeW, int osizeH, int osizeW\n )\n {\n \/\/ iterators on input pixels\n int ih, iw;\n\n \/\/ select input\/output plane based on thread\/block ID\n int i_plane = blockIdx.x;\n int o_plane = i_plane;\n\n gradOutput = gradOutput + o_plane*osizeH*osizeW;\n gradInput = gradInput + i_plane*isizeH*isizeW;\n\n int istartH = blockDim.y*blockIdx.y + threadIdx.y;\n int iendH = isizeH;\n int istepH = blockDim.y*gridDim.y;\n\n int istartW = threadIdx.x;\n int iendW = isizeW;\n int istepW = blockDim.x;\n\n \/\/ compute gradInput\n for(ih = istartH; ih < iendH; ih += istepH) {\n\n int ostartH = START_IND(ih, isizeH, osizeH);\n int oendH = END_IND(ih, isizeH, osizeH);\n\n for(iw = istartW; iw < iendW; iw += istepW) {\n\n int ostartW = START_IND(iw, isizeW, osizeW);\n int oendW = END_IND(iw, isizeW, osizeW);\n\n \/\/ Compute the gradients over corresponding output pixels\n T *ptr_gradInput = gradInput + ih*isizeW + iw;\n\n int oh, ow;\n for(oh = ostartH; oh < oendH; ++oh) {\n int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH);\n for(ow = ostartW; ow < oendW; ++ow) {\n int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW);\n T grad_delta = gradOutput[ow + oh*osizeW] \/ kH \/ kW;\n *ptr_gradInput += grad_delta;\n }\n }\n }\n }\n }\n\n \/*\n * Description:\n * this function computes the gradInput from gradOutput\n * (uses atomic add)\n *\/\n template \n __global__ void atomic_adaptive_average_gradinput(\n T *gradInput, T *gradOutput,\n int isizeH, int isizeW, int osizeH, int osizeW\n )\n {\n \/\/ iterators on output indices\n int oh, ow;\n\n \/\/ select input\/output plane based on thread\/block ID\n int o_plane = blockIdx.x;\n int i_plane = o_plane;\n\n gradOutput = gradOutput + o_plane*osizeW*osizeH;\n gradInput = gradInput + i_plane*isizeW*isizeH;\n\n int ostartH = blockDim.y*blockIdx.y + threadIdx.y;\n int oendH = osizeH;\n int ostepH = blockDim.y*gridDim.y;\n\n int ostartW = threadIdx.x;\n int oendW = osizeW;\n int ostepW = blockDim.x;\n\n \/\/ For all output pixels...\n for(oh = ostartH; oh < oendH; oh += ostepH) {\n\n int istartH = START_IND(oh, osizeH, isizeH);\n int iendH = END_IND(oh, osizeH, isizeH);\n int kH = iendH - istartH;\n\n for(ow = ostartW; ow < oendW; ow += ostepW) {\n\n int istartW = START_IND(ow, osizeW, isizeW);\n int iendW = END_IND(ow, osizeW, isizeW);\n int kW = iendW - istartW;\n\n \/\/ Compute the gradients for over corresponding input pixels\n T *ptr_gradInput = gradInput + istartH*isizeW + istartW;\n T *ptr_gradOutput = gradOutput + oh*osizeW + ow;\n T grad_delta = *ptr_gradOutput \/ kW \/ kH;\n\n int ih, iw;\n for(ih = 0; ih < kH; ++ih) {\n for(iw = 0; iw < kW; ++iw) {\n \/\/ atomic add since different threads could update same variable\n gpuAtomicAdd(&(ptr_gradInput[iw]), grad_delta);\n }\n ptr_gradInput += isizeW; \/\/ next input line\n }\n }\n }\n }\n\n \/*\n * Description:\n * this function adaptively average pools an input 4D tensor along dimensions 2 and 3\n * NHWC layout for both input and output tensor\n * 4D input, 4D output\n *\/\n template \n C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)\n __global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output,\n int sizeB, int sizeC,\n int isizeH, int isizeW,\n int osizeH, int osizeW,\n int kernel_stride_C, int kernel_size_C,\n index_t istrideB, index_t istrideC,\n index_t istrideH, index_t istrideW)\n {\n extern __shared__ int smem[];\n scalar_t *out_cached = reinterpret_cast(smem);\n\n \/\/ flattening cta for pre-computation & smem initialization;\n int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);\n int block_size = blockDim.x * blockDim.y * blockDim.z;\n\n \/\/ use shared memory to store temporary output value. This is simply to\n \/\/ reduce register usage.\n for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {\n out_cached[i] = scalar_t(0.0);\n }\n\n __syncthreads();\n\n \/\/ each CTA handles a portion of a single slice on batch dimension;\n int batch_id = blockIdx.x % sizeB;\n int channel_id = blockIdx.x \/ sizeB;\n int channel_offset = threadIdx.x + channel_id * blockDim.x;\n\n \/\/ each CTA handles a single slice on batch dimension;\n \/\/ We use gridDim.x to handle striding on C as well.\n output = output + batch_id * osizeH * osizeW * sizeC;\n input = input + batch_id * istrideB;\n\n \/\/ split out_cached and exclusively it assigned to each thread;\n out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x];\n\n \/\/ iterate on output H & W.\n \/\/ Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on\n \/\/ tile so there's a better chance to hit L1 cache.\n index_t oH = (osizeH + gridDim.z-1) \/ gridDim.z;\n index_t oW = (osizeW + gridDim.y-1) \/ gridDim.y;\n index_t ostartH = threadIdx.z + blockIdx.z*oH;\n index_t oendH = ::min(ostartH+oH, osizeH);\n index_t ostartW = threadIdx.y + blockIdx.y*oW;\n index_t oendW = ::min(ostartW+oW, osizeW);\n\n \/\/ Stride for threads, each warp can reuse L1 as they go. So theoretically\n \/\/ better chance to survive cache eviction.\n for (int oh = ostartH; oh < oendH; oh+=blockDim.z) {\n int istartH = START_IND_INT(oh, osizeH, isizeH);\n int iendH = END_IND_INT(oh, osizeH, isizeH);\n for (int ow = ostartW; ow < oendW; ow+=blockDim.y) {\n int istartW = START_IND_INT(ow, osizeW, isizeW);\n int iendW = END_IND_INT(ow, osizeW, isizeW);\n scalar_t factor = scalar_t(1.0) \/ ((iendH-istartH) * (iendW-istartW));\n\n \/\/ loop on input: hierarchy h->w->c, use shared memory here hopefully\n \/\/ would not stall global memory read;\n for (index_t ih = istartH; ih < iendH; ih++) {\n for (index_t iw = istartW; iw < iendW; iw++) {\n int cached_index = threadIdx.x;\n const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW;\n for (index_t c = channel_offset;\n c < sizeC;\n c += blockDim.x*kernel_stride_C) {\n out_cached[cached_index] += ptr_input[c*istrideC];\n cached_index += blockDim.x;\n }\n }\n }\n scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC;\n\n int cached_index = threadIdx.x;\n \/\/ write accumulated output to global memory;\n for (index_t c = channel_offset;\n c < sizeC;\n c += blockDim.x*kernel_stride_C) {\n \/\/ This causes numerical issueptr when unit test with NCHW kernel;\n \/\/ switch to could verify the correctness;\n \/\/ output[c] = out_cached[c] \/ (iendH-istartH) \/ (iendW-istartW);\n ptr_output[c] = out_cached[cached_index] * factor;\n out_cached[cached_index] = scalar_t(0.0);\n cached_index += blockDim.x;\n }\n \/\/ no need to __syncthreads() since out_cached is not shared.\n }\n }\n }\n\n \/*\n * Description:\n * this function computes the gradInput from gradOutput\n * NHWC layout for both input and output tensor\n * 4D input, 4D output\n *\/\n template \n C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)\n __global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput,\n int sizeB, int sizeC,\n int isizeH, int isizeW,\n int osizeH, int osizeW,\n int kernel_stride_C, int kernel_size_C,\n index_t ostrideB, index_t ostrideC,\n index_t ostrideH, index_t ostrideW)\n {\n extern __shared__ int smem[];\n index_t *ostartW_cached = smem;\n index_t *oendW_cached = &ostartW_cached[isizeW];\n\n \/\/ be careful with alignment, in case scalar_t is fp16, we want to assign\n \/\/ int pointers first.\n scalar_t *r_kW_cached = reinterpret_cast(&oendW_cached[isizeW]);\n scalar_t *r_kH_cached = &r_kW_cached[osizeW];\n scalar_t *out_cached = &r_kH_cached[osizeH];\n\n \/\/ flattening cta for pre-computation & smem initialization;\n int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);\n int block_size = blockDim.x * blockDim.y * blockDim.z;\n\n \/\/ Precompute output start\/end index per input index on width dimension;\n \/\/ Not doing this for height dimension, as that's our out-most loop.\n for (index_t i = thread_id; i < isizeW; i+= block_size) {\n ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW);\n oendW_cached[i] = END_IND_INT(i, isizeW, osizeW);\n }\n\n \/\/ Precompute pooling height\/weight factor for each output element;\n \/\/ This is used to weight output gradient when accumulate them on input\n \/\/ gradient.\n \/\/ Technically we don't have to compute it for the whole `osizeH`, since\n \/\/ each cta only covers a consecutive portion of the entire output. But it's\n \/\/ not going to save us from code divergence, and shared memory save is not\n \/\/ an issue neither, so just leave it as is for now.\n for (index_t i = thread_id; i < osizeH; i+= block_size) {\n r_kH_cached[i] = scalar_t(1.0) \/ (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH));\n }\n for (index_t i = thread_id; i < osizeW; i+= block_size) {\n r_kW_cached[i] = scalar_t(1.0) \/ (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW));\n }\n\n \/\/ each CTA handles a portion of a single slice on batch dimension;\n int batch_id = blockIdx.x % sizeB;\n int channel_id = blockIdx.x \/ sizeB;\n int channel_offset = threadIdx.x + channel_id * blockDim.x;\n\n \/\/ use shared memory to store temporary output value. This is simply to\n \/\/ reduce register usage.\n for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {\n out_cached[i] = scalar_t(0.0);\n }\n\n __syncthreads();\n\n \/\/ each CTA handles a portion of a single slice on batch dimension;\n \/\/ We use gridDim.x to handle striding on C as well.\n gradInput = gradInput + batch_id * isizeH * isizeW * sizeC;\n gradOutput = gradOutput + batch_id * ostrideB;\n\n \/\/ split out_cached and exclusively it assigned to each thread;\n out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C];\n\n \/\/ iterate on input H & W.\n \/\/ Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on\n \/\/ tile so there's a better chance to hit L1 cache.\n index_t iH = (isizeH + gridDim.z-1) \/ gridDim.z;\n index_t iW = (isizeW + gridDim.y-1) \/ gridDim.y;\n index_t istartH = threadIdx.z + blockIdx.z*iH;\n index_t iendH = ::min(istartH+iH, isizeH);\n index_t istartW = threadIdx.y + blockIdx.y*iW;\n index_t iendW = ::min(istartW+iW, isizeW);\n\n \/\/ Stride for threads, each warp can reuse L1 as they go. So theoretically\n \/\/ better chance to survive cache eviction.\n for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) {\n index_t ostartH = START_IND_INT(ih, isizeH, osizeH);\n index_t oendH = END_IND_INT(ih, isizeH, osizeH);\n for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) {\n \/\/ loop on output: hierarchy h->w->c, so we could reuse weight factor f\n \/\/ because it remains the same for given oh & ow\n for(index_t oh = ostartH; oh < oendH; ++oh) {\n for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) {\n scalar_t f = r_kW_cached[ow] * r_kH_cached[oh];\n const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW;\n int cached_index = threadIdx.x;\n for (index_t c = channel_offset;\n c < sizeC;\n c += blockDim.x*kernel_stride_C) {\n out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f;\n cached_index += blockDim.x;\n }\n }\n }\n scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC;\n int cached_index = threadIdx.x;\n \/\/ write accumulated gradIput to global memory;\n for (index_t c = channel_offset;\n c < sizeC;\n c += blockDim.x*kernel_stride_C) {\n ptr_gradInput[c] = out_cached[cached_index];\n out_cached[cached_index] = scalar_t(0.0);\n cached_index += blockDim.x;\n }\n \/\/ no need to __syncthreads() since out_cached is not shared.\n }\n }\n }\n\n \/\/ 4d tensor B x D x H x W\n\n void adaptive_avg_pool2d_out_cuda_template(\n Tensor& output,\n const Tensor& input,\n IntArrayRef output_size)\n {\n TensorArg input_arg{ input, \"input\", 1 },\n output_arg{ output, \"output\", 2 };\n checkAllSameGPU(\"cudnn_adaptive_avg_pooling2d\", {input_arg, output_arg});\n\n for (int64_t i = 0; i < input.ndimension(); i++) {\n TORCH_CHECK(input.size(i) > 0,\n \"adaptive_avg_pooling2d(): expected input to have non-empty spatial dimensions, \"\n \"but input has sizes \", input.sizes(), \" with dimension \", i, \" being \"\n \"empty\");\n }\n\n Tensor input_ = input;\n switch (input.suggest_memory_format()) {\n case at::MemoryFormat::ChannelsLast: {\n \/\/ special case for tensor memory format in channels_last\n TORCH_CHECK(input.ndimension() == 4,\n \"non-empty 4D (batch mode) tensor expected for input with channels_last layout\");\n\n int sizeB = input_.size(0);\n int sizeC = input_.size(1);\n int isizeH = input_.size(2);\n int isizeW = input_.size(3);\n\n int64_t istrideB = input_.stride(0);\n int64_t istrideC = input_.stride(1);\n int64_t istrideH = input_.stride(2);\n int64_t istrideW = input_.stride(3);\n\n int osizeH = output_size[0];\n int osizeW = output_size[1];\n\n \/\/ preserve channels_last stride on output tensor;\n if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) {\n \/\/ TODO: modify this after resize_ added `memory_format` tag\n output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC});\n }\n\n const int max_threads = std::min(\n at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);\n int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;\n int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;\n size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;\n\n \/\/ Launch kernel on output tensor elements. Logic behind launch config:\n \/\/ output tensor size NCHW, strides NHWC;\n \/\/ Launch on:\n \/\/ N -> grid.x\n \/\/ H -> grid.z * block.z\n \/\/ W -> grid.y * block.y\n \/\/ C -> block.x\n \/\/ encourage larger block_y & block_z for better cache hit while maintain\n \/\/ reasonable block_x for coalesced memory access;\n int block_x = std::min(\n maxThreadsDim[0], std::min(lastPow2(sizeC), at::cuda::warp_size()));\n int block_y = std::min(\n maxThreadsDim[1], std::min(lastPow2(osizeW), max_threads \/ block_x));\n int block_z = std::min(\n maxThreadsDim[2], std::min(lastPow2(osizeH), max_threads \/ block_x \/ block_y));\n block_x = std::min(\n maxThreadsDim[0], std::min(lastPow2(sizeC), max_threads \/ block_y \/ block_z));\n const dim3 block(block_x, block_y, block_z);\n int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4);\n int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C);\n\n \/\/ Do NOT clip grid_x, striding on Batch dimension is not in the kernel,\n \/\/ although it could be easily implemented given current kernel.\n int grid_x = sizeB*kernel_stride_C;\n \/\/ it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;\n int grid_y = std::min(\n maxGridSize[1], cuda::ATenCeilDiv(osizeW, block_y*BLOCK_STRIDE));\n int grid_z = std::min(\n maxGridSize[2], cuda::ATenCeilDiv(osizeH, block_z*BLOCK_STRIDE));\n const dim3 grid(grid_x, grid_y, grid_z);\n\n\n \/\/ we are dealing with packed tensor here. max index is the same as numel.\n \/\/ TODO: to really support input tensor large enought to go beyond int32,\n \/\/ we will need to restrict out shared memory usage and adjust the launch\n \/\/ config;\n AT_ASSERT(input_.numel() < std::numeric_limits::max());\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,\n input_.scalar_type(), \"adaptive_avg_pool2d_nhwc_cuda\", [&] {\n size_t shmem_size = (kernel_size_C * block_x * block_y * block_z) * sizeof(scalar_t);\n AT_ASSERT(shmem_size <= sharedMemPerBlock);\n adaptive_average_pool_nhwc<<>> (\n input_.data_ptr(),\n output.data_ptr(),\n sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,\n kernel_stride_C, kernel_size_C,\n istrideB, istrideC, istrideH, istrideW);\n }\n );\n break;\n }\n case at::MemoryFormat::Contiguous: {\n TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4),\n \"non-empty 3D or 4D (batch mode) tensor expected for input\");\n int64_t grid_x = input.size(-3);\n if (input.ndimension() == 4) {\n input_ = input.contiguous();\n grid_x *= input_.size(-4);\n }\n int64_t sizeD = input_.size(-3);\n int64_t isizeH = input_.size(-2);\n int64_t isizeW = input_.size(-1);\n\n int64_t istrideD = input_.stride(-3);\n int64_t istrideH = input_.stride(-2);\n int64_t istrideW = input_.stride(-1);\n\n int64_t osizeH = output_size[0];\n int64_t osizeW = output_size[1];\n if (input.ndimension() == 4) {\n output.resize_({input_.size(-4), sizeD, osizeH, osizeW});\n } else {\n output.resize_({sizeD, osizeH, osizeW});\n }\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,\n input_.scalar_type(), \"adaptive_avg_pool2d_cuda\", [&] {\n scalar_t *input_data = input_.data_ptr();\n scalar_t *output_data = output.data_ptr();\n\n \/\/ cuda blocks & threads:\n int blocksH = std::max((int)(16L \/ sizeD), 1);\n dim3 blocks(grid_x, blocksH);\n dim3 threads(32, 8);\n\n \/\/ run averagepool kernel\n adaptive_average_pool <<>> (\n input_data, output_data,\n isizeH, isizeW, osizeH, osizeW,\n istrideD, istrideH, istrideW);\n }\n );\n break;\n }\n default:\n TORCH_CHECK(\n false,\n \"Unsupported memory format. Supports only ChannelsLast, Contiguous\");\n }\n AT_CUDA_CHECK(cudaGetLastError());\n }\n\n void adaptive_avg_pool2d_backward_out_cuda_template(\n Tensor& gradInput,\n const Tensor& gradOutput_,\n const Tensor& input)\n {\n TensorArg grad_input_arg{ gradInput, \"gradInput\", 1 },\n grad_output_arg{ gradOutput_, \"gradOutput_\", 2 },\n input_arg{ input, \"input\", 3 };\n checkAllSameGPU(\"cudnn_adaptive_avg_pooling2d_out\",\n {grad_input_arg, grad_output_arg, input_arg});\n\n switch (input.suggest_memory_format()) {\n case at::MemoryFormat::ChannelsLast: {\n \/\/ special case for tensor memory format in channels_last\n TORCH_CHECK(input.ndimension() == 4,\n \"non-empty 4D (batch mode) tensor expected for input with channels_last layout\");\n\n int sizeB = input.size(0);\n int sizeC = input.size(1);\n int isizeH = input.size(2);\n int isizeW = input.size(3);\n\n Tensor gradOutput = gradOutput_;\n\n int64_t ostrideB = gradOutput.stride(0);\n int64_t ostrideC = gradOutput.stride(1);\n int64_t ostrideH = gradOutput.stride(2);\n int64_t ostrideW = gradOutput.stride(3);\n\n int osizeH = gradOutput.size(-2);\n int osizeW = gradOutput.size(-1);\n\n \/\/ preserve channels_last stride on input tensor;\n if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) {\n gradInput.as_strided_(\n {sizeB, sizeC, isizeH, isizeW},\n {sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC});\n }\n\n const int max_threads = std::min(\n at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);\n int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;\n int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;\n size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;\n\n \/\/ Launch kernel on input tensor elements. Logic behind launch config:\n \/\/ input tensor size NCHW, strides NHWC;\n \/\/ Launch on:\n \/\/ N(C) -> grid.x (striding on C to reduce sh_mem usage)\n \/\/ H -> grid.z * block.z\n \/\/ W -> grid.y * block.y\n \/\/ C -> block.x\n \/\/ encourage larger block_y & block_z for better cache hit while maintain\n \/\/ reasonable block_x for coalesced memory access;\n int block_x = std::min(\n maxThreadsDim[0], std::min(lastPow2(sizeC), at::cuda::warp_size()));\n int block_y = std::min(\n maxThreadsDim[1], std::min(lastPow2(isizeW), max_threads \/ block_x));\n int block_z = std::min(\n maxThreadsDim[2], std::min(lastPow2(isizeH), max_threads \/ block_x \/ block_y));\n block_x = std::min(\n maxThreadsDim[0], std::min(lastPow2(sizeC), max_threads \/ block_y \/ block_z));\n const dim3 block(block_x, block_y, block_z);\n int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4);\n int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C);\n\n \/\/ Do NOT clip grid_x, striding on Batch dimension is not in the kernel,\n \/\/ although it could be easily implemented given current kernel.\n int grid_x = sizeB*kernel_stride_C;\n \/\/ it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;\n int grid_y = std::min(\n maxGridSize[1], cuda::ATenCeilDiv(isizeW, block_y*BLOCK_STRIDE));\n int grid_z = std::min(\n maxGridSize[2], cuda::ATenCeilDiv(isizeH, block_z*BLOCK_STRIDE));\n const dim3 grid(grid_x, grid_y, grid_z);\n\n \/\/ we are dealing with packed tensor here. max index is the same as numel.\n \/\/ TODO: to really support input tensor large enought to go beyond int32,\n \/\/ we will need to restrict out shared memory usage and adjust the launch\n \/\/ config;\n AT_ASSERT(input.numel() < std::numeric_limits::max());\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,\n input.scalar_type(), \"adaptive_avg_pool2d_backward_nhwc_cuda\", [&] {\n size_t shmem_size = (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t);\n AT_ASSERT(shmem_size <= sharedMemPerBlock);\n adaptive_average_gradinput_nhwc<<>> (\n gradInput.data_ptr(),\n gradOutput.data_ptr(),\n sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,\n kernel_stride_C, kernel_size_C,\n ostrideB, ostrideC, ostrideH, ostrideW);\n }\n );\n break;\n }\n case at::MemoryFormat::Contiguous: {\n bool atomic = true; \/\/ suboptimal, but without atomic it doesn't pass the tests\n\n Tensor gradOutput = gradOutput_.contiguous();\n\n int64_t sizeD = input.size(-3);\n int64_t isizeH = input.size(-2);\n int64_t isizeW = input.size(-1);\n\n int64_t osizeH = gradOutput.size(-2);\n int64_t osizeW = gradOutput.size(-1);\n\n int64_t grid_x = sizeD;\n if (input.ndimension() == 4) grid_x *= input.size(-4);\n\n \/\/bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0);\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,\n input.scalar_type(), \"adaptive_avg_pool2d_backward_cuda\", [&] {\n scalar_t *gradOutput_data = gradOutput.data_ptr();\n scalar_t *gradInput_data = gradInput.data_ptr();\n\n \/\/ cuda blocks & threads:\n int blocksH = std::max((int)(16L \/ sizeD), 1);\n dim3 blocks(grid_x, blocksH);\n dim3 threads(32, 8);\n\n if(atomic)\n {\n \/\/ run updateGradInput kernel, accumulate gradients atomically\n atomic_adaptive_average_gradinput <<>> (\n gradInput_data, gradOutput_data,\n isizeH, isizeW, osizeH, osizeW);\n }\n else\n {\n \/\/ run updateGradInput kernel\n adaptive_average_gradinput <<>> (\n gradInput_data, gradOutput_data,\n isizeH, isizeW, osizeH, osizeW);\n }\n }\n );\n break;\n }\n default:\n TORCH_CHECK(\n false,\n \"Unsupported memory format. Supports only ChannelsLast, Contiguous\");\n\n }\n AT_CUDA_CHECK(cudaGetLastError());\n }\n\n} \/\/ namespace\n\n Tensor& adaptive_avg_pool2d_out_cuda(\n Tensor& output,\n const Tensor& input,\n IntArrayRef output_size)\n {\n adaptive_avg_pool2d_out_cuda_template(\n output, input, output_size);\n return output;\n }\n\n Tensor adaptive_avg_pool2d_cuda(\n at::Tensor const& input,\n IntArrayRef output_size)\n {\n auto output = at::empty({0}, input.options());\n adaptive_avg_pool2d_out_cuda_template(\n output, input, output_size);\n return output;\n }\n\n Tensor& adaptive_avg_pool2d_backward_out_cuda(\n Tensor& gradInput,\n const Tensor& gradOutput,\n const Tensor& input)\n {\n \/\/ See Note [Writing Nondeterministic Operations]\n \/\/ Nondeterministic because of atomicAdd usage\n globalContext().alertNotDeterministic(\"adaptive_avg_pool2d_backward_out_cuda\");\n gradInput.resize_as_(input);\n adaptive_avg_pool2d_backward_out_cuda_template(\n gradInput, gradOutput, input);\n return gradInput;\n }\n\n Tensor adaptive_avg_pool2d_backward_cuda(\n const Tensor& gradOutput,\n const Tensor& input)\n {\n \/\/ See Note [Writing Nondeterministic Operations]\n \/\/ Nondeterministic because of atomicAdd usage\n globalContext().alertNotDeterministic(\"adaptive_avg_pool2d_backward_cuda\");\n auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);\n adaptive_avg_pool2d_backward_out_cuda_template(\n gradInput, gradOutput, input);\n return gradInput;\n }\n\n} \/\/ at::native\n} \/\/ at\n\n#undef BLOCK_STRIDE\n#undef CUDA_MAX_THREADS\n#undef START_IND\n#undef END_IND\n","avg_line_length":39.6197183099,"max_line_length":148,"alphanum_fraction":0.6192676857} {"size":1533,"ext":"cu","lang":"Cuda","max_stars_count":7.0,"content":"#include \"energyMinimizerNesterovAG.cuh\"\n\/*! \\file energyMinimizerNesterovAG.cu\n\n\\addtogroup updaterKernels\n@{\n*\/\n\n__global__ void gpu_nesterovAG_step_kernel(dVec *force,\n dVec *position,\n dVec *alternatePosition,\n scalar deltaT,\n scalar mu,\n int N)\n {\n \/\/ read in the index that belongs to this thread\n unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;\n int pidx = idx\/DIMENSION;\n if(pidx>=N) return;\n int didx = idx%DIMENSION;\n\n scalar f = force[pidx][didx];\n scalar oldAltPos = alternatePosition[pidx][didx];\n\n alternatePosition[pidx][didx] = position[pidx][didx] + deltaT*force[pidx][didx];\n position[pidx][didx] = alternatePosition[pidx][didx] + mu*(alternatePosition[pidx][didx] - oldAltPos);\n }\n\n\/*!\nA memory-efficiency optimization has each thread acting on one dimension of one degree of freedom...\n *\/\nbool gpu_nesterovAG_step(dVec *force,\n dVec *position,\n dVec *alternatePosition,\n scalar deltaT,\n scalar mu,\n int N,\n int blockSize)\n {\n int block_size=blockSize;\n if (N < 128) block_size = 32;\n unsigned int nblocks = DIMENSION*N\/block_size + 1;\n gpu_nesterovAG_step_kernel<<>>(force,position,alternatePosition,\n deltaT,mu,N);\n HANDLE_ERROR(cudaGetLastError());\n return cudaSuccess;\n };\n\n\/** @} *\/ \/\/end of group declaration\n","avg_line_length":31.2857142857,"max_line_length":106,"alphanum_fraction":0.6086105675} {"size":4767,"ext":"cu","lang":"Cuda","max_stars_count":1.0,"content":"#include \n#include \n#include \n#include \n\n#include \n\n#include \n#include \n#include \n#include \n\n#include \"rmm\/rmm.h\"\n\n\/\/TODO: I pulled this rom Andrei's sql ops code, we should consider putting this somewhere\n\/\/others can access this api\nnamespace{ \/\/annonymus\n\n \/\/helper function:\n \/\/flatten AOS info from gdf_columns into SOA (2 arrays):\n \/\/(1) column array pointers and (2) types;\n \/\/\n gdf_error soa_col_info(gdf_column** cols, size_t ncols, void** d_cols, int* d_types, cudaStream_t stream)\n {\n std::vector v_cols(ncols,nullptr);\n std::vector v_types(ncols, 0);\n for(int i=0;idata;\n\tv_types[i] = cols[i]->dtype;\n }\n\n void** h_cols = &v_cols[0];\n int* h_types = &v_types[0];\n CUDA_TRY( cudaMemcpyAsync(d_cols, h_cols, \n ncols*sizeof(void*), \n cudaMemcpyHostToDevice,stream) ); \/\/TODO: add streams\n CUDA_TRY( cudaMemcpy(d_types, h_types, \n ncols*sizeof(int), \n cudaMemcpyHostToDevice,stream) ); \/\/TODO: add streams\n }\n}\n\n\n\ngdf_error gpu_window_function(gdf_column ** window_order_columns, int num_window_order_columns, window_function_type window_type,\n\t\tgdf_column ** window_reduction_columns, int num_window_reduction_columns, window_reduction_type * reductions,\n\t\tgdf_column ** window_partition_columns, int num_window_partition_columns,\n\t\torder_by_type * order_by_types, char * workspace,gdf_size_type & workspace_size){\n\n\t\/\/will always have at least one reduction\n\tgdf_size_type num_rows = window_reduction_columns[0].size;\n\n\tif(num_window_partition_columns > 0){\n\n\t\t\/\/can be used to get the amount of space that should be preallocated for this\n\t\tif(workspace == nullptr){\n\t\t\tworkspace_size = 0;\n\t\t\tif(num_window_partition_columns > 1){\n\t\t\t\t\/\/more than one partition column so we will hash the data together\n\t\t\t\tworkspace_size += sizeof(unsigned long long) * window_partition_columns[0].size + ((window_partition_columns[0].size + (GDF_VALID_BITSIZE -1 )) \/GDF_VALID_BITSIZE));\n\t\t\t}\n\t\t\treturn GDF_SUCCESS;\n\t\t}\n\t}\n\n\t\/\/a gdf column\n\n\tcudaStream_t stream;\n\tcudaStreamCreate(&stream);\n\n\n\t\/\/hash the partition columns\n\tgdf_column hashed_data;\n\n\tif(num_window_partition_columns > 0){\n\t\tgdf_hash_columns(window_partition_columns, num_window_partition_columns,\n\t\t\t\t&hashed_data, &stream);\n\t\tgdf_column-view(&data, (void *) data,\n\t\t\t\t(gdf_valid_type *) ( ((char *) data ) + (sizeof(unsigned long long) * num_rows))\n\t\t\t\t,window_partition_columns[0].size,GDF_INT64);\n\n\t}else{\n\t\thashed_data.size = 0;\n\t}\n\n\n\n\t\/\/stable sort backwards starting with the least significant order by\n\t\/*template\n__host__ __device__\nvoid multi_col_order_by(size_t nrows,\n\t\t\tsize_t ncols,\n\t\t\tvoid* const* d_cols,\n\t\t\tint* const d_gdf_t,\n\t\t\tIndexT* d_indx,\n\t\t\tcudaStream_t stream = NULL)*\/\n\n\tvoid ** device_order_columns;\n\tRMM_TRY( RMM_ALLOC(&device_order_columns,sizeof(void *) * num_window_order_columns + 1, stream) );\n\n\t\/\/copy copy device pointers\n\n\tint * device_column_types;\n\tRMM_TRY( RMM_ALLOC((void**)&device_column_types,sizeof(int) * num_window_order_columns + 1, stream) );\n\n\tgdf_column** order_by_cols = new gdf_column*[num_window_order_columns + 1];\n\tfor(int i = 0; i < num_window_order_columns; i++){\n\t\torder_by_cols[i] = window_order_columns[i];\n\t}\n\torder_by_cols[num_window_order_columns] = &hashed_data;\n\n\tsoa_col_info(order_by_cols, num_window_order_columns + 1,\n\t\t\t\t device_order_columns, device_column_types, stream);\n\n\tgdf_size_type * device_index_outputs;\n\tRMM_TRY( RMM_ALLOC((void**)&device_index_outputs, sizeof(gdf_size_type) * num_rows, stream) );\n\n\tmulti_col_order_by(num_rows, num_window_order_columns + 1, device_order_columns,\n\t\t\t device_column_types, device_index_outputs, stream);\n\n\t\/\/now we have our ordered arangement for the table\n\t\/\/process reduction\n\n\tdelete[] order_by_cols;\n\tRMM_TRY( RMM_FREE(device_order_columns, stream) );\n\tRMM_TRY( RMM_FREE(device_column_types, stream) );\n\n\t\/\/no stable sort by the hash of the partition column\n\n\t\/\/perform windowed functions here\n\n\tCUDA_TRY( cudaStreamSynchronize(stream) );\n\tCUDA_TRY( cudaStreamDestroy(stream) );\n}\n\n\n\/\/so we have no segmented sorting in thrust, which means that one way to accomplish our goals is to stable sort in\n\/\/backwards order\n\/\/this will line up all of our columns nicely\n\n\n\/\/because our partitions are a group of columns hashing the values together makes for fewer operations later on when determining\n\/\/partitions\n\n\/\/so primitives we need are\n\/\/the sorting\n\/\/hashing\n","avg_line_length":31.9932885906,"max_line_length":169,"alphanum_fraction":0.7302286553} {"size":1831,"ext":"cu","lang":"Cuda","max_stars_count":6.0,"content":"\/\/=======================================================================\n\/\/ Copyright (c) 2017 Baptiste Wicht\n\/\/ Distributed under the terms of the MIT License.\n\/\/ (See accompanying file LICENSE or copy at\n\/\/ http:\/\/opensource.org\/licenses\/MIT)\n\/\/=======================================================================\n\n#include \n#include \n\n#include \"egblas\/transpose_front.hpp\"\n#include \"egblas\/assert.hpp\"\n#include \"egblas\/utils.hpp\"\n#include \"egblas\/sum.hpp\"\n#include \"egblas\/cuda_check.hpp\"\n\ntemplate \n__global__ void transpose_front_kernel(size_t M, size_t N, size_t K, const T* x, T* y) {\n const auto mk = threadIdx.x + blockIdx.x * blockDim.x;\n\n if (mk < M * K) {\n \/\/ Note: Ideally, we would use 2D Indexing. But I can't get it to be as fast as the 1D index\n const size_t m = mk \/ K;\n const size_t k = mk % K;\n\n for (size_t n = 0; n < N; ++n) {\n \/\/ y(n, m) = x(m, n)\n \/\/ x[M, N, K]\n \/\/ y[N, M, K]\n\n y[n * (M * K) + m * K + k] = x[m * (N * K) + n * K + k];\n }\n }\n}\n\nvoid egblas_stranspose_front(size_t m, size_t n, size_t k, float* x, float* y) {\n int blockSize;\n int minGridSize;\n\n cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, &transpose_front_kernel, 0, 0);\n\n int gridSize = (m * k + blockSize - 1) \/ blockSize;\n\n transpose_front_kernel<<>>(m, n, k, x, y);\n}\n\nvoid egblas_dtranspose_front(size_t m, size_t n, size_t k, double* x, double* y) {\n int blockSize;\n int minGridSize;\n\n cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, &transpose_front_kernel, 0, 0);\n\n int gridSize = (m * k + blockSize - 1) \/ blockSize;\n\n transpose_front_kernel<<>>(m, n, k, x, y);\n}\n","avg_line_length":32.1228070175,"max_line_length":104,"alphanum_fraction":0.5767340251} {"size":43,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \"MeshTest.h\"\n#include \"..\/main.h\"\n","avg_line_length":14.3333333333,"max_line_length":21,"alphanum_fraction":0.6511627907} {"size":5921,"ext":"cu","lang":"Cuda","max_stars_count":21.0,"content":"\/*\n -- MAGMA (version 1.6.1) --\n Univ. of Tennessee, Knoxville\n Univ. of California, Berkeley\n Univ. of Colorado, Denver\n @date January 2015\n\n @precisions normal z -> s d c\n*\/\n#include \"common_magma.h\"\n\n#define PRECISION_z\n\n#define zswap_bs 64\n\n\/\/#if (GPUSHMEM < 200)\n#define zgeru_bs 512 \/\/ 512 is max threads for 1.x cards\n\/\/#else\n\/\/#define zgeru_bs 1024\n\/\/#endif\n\nvoid magma_zgetf2_swap(\n magma_int_t n, magmaDoubleComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx);\n\nvoid magma_zscal_zgeru(\n magma_int_t m, magma_int_t n, magmaDoubleComplex *A, magma_int_t lda);\n\n\n\/**\n ZGETF2 computes an LU factorization of a general m-by-n matrix A\n using partial pivoting with row interchanges.\n\n The factorization has the form\n A = P * L * U\n where P is a permutation matrix, L is lower triangular with unit\n diagonal elements (lower trapezoidal if m > n), and U is upper\n triangular (upper trapezoidal if m < n).\n\n This is the right-looking Level 2 BLAS version of the algorithm.\n\n Arguments\n ---------\n\n @param[in]\n m INTEGER\n The number of rows of the matrix A. M >= 0.\n\n @param[in]\n n INTEGER\n The number of columns of the matrix A. N >= 0 and N <= 1024.\n On CUDA architecture 1.x cards, N <= 512.\n\n @param[in,out]\n A COMPLEX_16 array, dimension (LDA,N)\n On entry, the m by n matrix to be factored.\n On exit, the factors L and U from the factorization\n A = P*L*U; the unit diagonal elements of L are not stored.\n\n @param[in]\n lda INTEGER\n The leading dimension of the array A. LDA >= max(1,M).\n\n @param[out]\n ipiv INTEGER array, dimension (min(M,N))\n The pivot indices; for 1 <= i <= min(M,N), row i of the\n matrix was interchanged with row IPIV(i).\n\n @param[out]\n info INTEGER\n - = 0: successful exit\n - < 0: if INFO = -k, the k-th argument had an illegal value\n - > 0: if INFO = k, U(k,k) is exactly zero. The factorization\n has been completed, but the factor U is exactly\n singular, and division by zero will occur if it is used\n to solve a system of equations.\n\n @ingroup magma_zgesv_aux\n ********************************************************************\/\nextern \"C\" magma_int_t\nmagma_zgetf2_gpu(\n magma_int_t m, magma_int_t n,\n magmaDoubleComplex_ptr dA, magma_int_t ldda,\n magma_int_t *ipiv,\n magma_int_t *info )\n{\n #define dA(i, j) (dA + (i) + (j)*ldda)\n\n *info = 0;\n if (m < 0) {\n *info = -1;\n } else if (n < 0 || n > zgeru_bs) {\n *info = -2;\n } else if (ldda < max(1,m)) {\n *info = -4;\n }\n\n if (*info != 0) {\n magma_xerbla( __func__, -(*info) );\n return *info;\n }\n\n \/\/ Quick return if possible\n if (m == 0 || n == 0) {\n return *info;\n }\n\n magma_int_t min_mn = min(m, n);\n magma_int_t j, jp;\n \n for( j=0; j < min_mn; j++ ) {\n cudaDeviceSetCacheConfig( cudaFuncCachePreferShared );\n\n \/\/ Find pivot and test for singularity.\n jp = j - 1 + magma_izamax(m-j, dA(j,j), 1);\n ipiv[j] = jp + 1; \/\/ ipiv uses Fortran one-based index\n \/\/ Can't check value of dA since it is on GPU\n \/\/if ( dA(jp, j) != 0.0) {\n cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );\n \n \/\/ Apply the interchange to columns 1:N.\n if (jp != j) {\n magma_zgetf2_swap(n, dA, j, jp, ldda);\n }\n \n \/\/ Compute elements J+1:M of J-th column.\n if (j < m) {\n magma_zscal_zgeru(m-j, n-j, dA(j, j), ldda);\n }\n \/\/}\n \/\/else if (*info == 0) {\n \/\/ *info = j;\n \/\/}\n }\n\n return *info;\n}\n\n\n__global__\nvoid kernel_zswap(int n, magmaDoubleComplex *x, int i, int j, int incx)\n{\n int id = blockIdx.x * zswap_bs + threadIdx.x;\n\n if (id < n) {\n magmaDoubleComplex tmp = x[i + incx*id];\n x[i + incx*id] = x[j + incx*id];\n x[j + incx*id] = tmp;\n }\n}\n\n\nvoid magma_zgetf2_swap(magma_int_t n, magmaDoubleComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx)\n{\n\/*\n zswap two row vectors: ith and jth\n*\/\n dim3 threads(zswap_bs, 1, 1);\n int num_blocks = (n - 1)\/zswap_bs + 1;\n dim3 grid(num_blocks,1);\n kernel_zswap<<< grid, threads, 0, magma_stream >>>(n, x, i, j, incx);\n}\n\n\n\/\/ dynamically allocated shared memory, set to size n when the kernel is launched.\n\/\/ See CUDA Guide B.2.3\nextern __shared__ magmaDoubleComplex shared_data[];\n\n__global__\nvoid kernel_zscal_zgeru(int m, int n, magmaDoubleComplex *A, int lda)\n{\n magmaDoubleComplex *shared_y = shared_data;\n\n int tid = blockIdx.x * zgeru_bs + threadIdx.x;\n\n magmaDoubleComplex reg = MAGMA_Z_ZERO;\n\n if (threadIdx.x < n) {\n shared_y[threadIdx.x] = A[lda * threadIdx.x];\n }\n\n __syncthreads();\n\n if (tid < m && tid > 0) {\n reg = A[tid];\n\n reg *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_y[0]);\n\n A[tid] = reg;\n\n #pragma unroll\n for(int i=1; i < n; i++) {\n A[tid + i*lda] += (MAGMA_Z_NEG_ONE) * shared_y[i] * reg;\n }\n }\n}\n\n\nvoid magma_zscal_zgeru(magma_int_t m, magma_int_t n, magmaDoubleComplex *A, magma_int_t lda)\n{\n\/*\n\n Specialized kernel which merged zscal and zgeru the two kernels\n 1) zscale the first column vector A(1:M-1,0) with 1\/A(0,0);\n 2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where \n alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);\n\n*\/\n dim3 threads(zgeru_bs, 1, 1);\n int num_blocks = (m - 1)\/zgeru_bs + 1;\n dim3 grid(num_blocks,1);\n size_t shared_size = sizeof(magmaDoubleComplex)*(n);\n kernel_zscal_zgeru<<< grid, threads, shared_size, magma_stream>>>(m, n, A, lda);\n}\n","avg_line_length":27.6682242991,"max_line_length":108,"alphanum_fraction":0.5725384226} {"size":7968,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/\/ Copyright (c) 2009-2019 The Regents of the University of Michigan\n\/\/ This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.\n\n\n\/\/ Maintainer: joaander\n\n#include \"Integrator.cuh\"\n\n#include \n\n\/\/! make a scalar4 value - remind NVCC to find the Device version of make_scalar4\n__host__ __device__ Scalar4 make_scalar4(Scalar x, Scalar y, Scalar z, Scalar w);\n\n\/*! \\file Integrator.cu\n \\brief Defines methods and data structures used by the Integrator class on the GPU\n*\/\n\n\/\/! helper to add a given force\/virial pointer pair\ntemplate< unsigned int compute_virial >\n__device__ void add_force_total(Scalar4& net_force, Scalar *net_virial, Scalar4& net_torque, Scalar4* d_f, Scalar* d_v, const unsigned int virial_pitch, Scalar4* d_t, int idx)\n {\n if (d_f != NULL && d_v != NULL && d_t != NULL)\n {\n Scalar4 f = d_f[idx];\n Scalar4 t = d_t[idx];\n\n net_force.x += f.x;\n net_force.y += f.y;\n net_force.z += f.z;\n net_force.w += f.w;\n\n if (compute_virial)\n {\n for (int i=0; i < 6; i++)\n net_virial[i] += d_v[i*virial_pitch+idx];\n }\n\n net_torque.x += t.x;\n net_torque.y += t.y;\n net_torque.z += t.z;\n net_torque.w += t.w;\n }\n }\n\n\/\/! Kernel for summing forces on the GPU\n\/*! The specified forces and virials are summed for every particle into \\a d_net_force and \\a d_net_virial\n\n \\param d_net_force Output device array to hold the computed net force\n \\param d_net_virial Output device array to hold the computed net virial\n \\param net_virial_pitch The pitch of the 2D net_virial array\n \\param d_net_torque Output device array to hold the computed net torque\n \\param force_list List of pointers to force data to sum\n \\param nwork Number of particles this GPU processes\n \\param clear When true, initializes the sums to 0 before adding. When false, reads in the current \\a d_net_force\n and \\a d_net_virial and adds to that\n \\param offset of this GPU in ptls array\n\n \\tparam compute_virial When set to 0, the virial sum is not computed\n*\/\ntemplate< unsigned int compute_virial >\n__global__ void gpu_integrator_sum_net_force_kernel(Scalar4 *d_net_force,\n Scalar *d_net_virial,\n const unsigned int net_virial_pitch,\n Scalar4 *d_net_torque,\n const gpu_force_list force_list,\n unsigned int nwork,\n bool clear,\n unsigned int offset)\n {\n \/\/ calculate the index we will be handling\n int idx = blockDim.x * blockIdx.x + threadIdx.x;\n\n if (idx < nwork)\n {\n idx += offset;\n\n \/\/ set the initial net_force and net_virial to sum into\n Scalar4 net_force;\n Scalar net_virial[6];\n Scalar4 net_torque;\n if (clear)\n {\n net_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));\n if (compute_virial)\n {\n for (int i=0; i<6; i++)\n net_virial[i] = Scalar(0.0);\n }\n net_torque = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));\n }\n else\n {\n \/\/ if clear is false, initialize to the current d_net_force and d_net_virial\n net_force = d_net_force[idx];\n if (compute_virial)\n {\n for (int i=0; i<6; i++)\n net_virial[i] = d_net_virial[i*net_virial_pitch+idx];\n }\n net_torque = d_net_torque[idx];\n }\n\n \/\/ sum up the totals\n add_force_total(net_force, net_virial, net_torque, force_list.f0, force_list.v0, force_list.vpitch0, force_list.t0, idx);\n add_force_total(net_force, net_virial, net_torque, force_list.f1, force_list.v1, force_list.vpitch1, force_list.t1, idx);\n add_force_total(net_force, net_virial, net_torque, force_list.f2, force_list.v2, force_list.vpitch2, force_list.t2, idx);\n add_force_total(net_force, net_virial, net_torque, force_list.f3, force_list.v3, force_list.vpitch3, force_list.t3, idx);\n add_force_total(net_force, net_virial, net_torque, force_list.f4, force_list.v4, force_list.vpitch4, force_list.t4, idx);\n add_force_total(net_force, net_virial, net_torque, force_list.f5, force_list.v5, force_list.vpitch5, force_list.t5, idx);\n\n \/\/ write out the final result\n d_net_force[idx] = net_force;\n if (compute_virial)\n {\n for (int i=0; i < 6; i++)\n d_net_virial[i*net_virial_pitch+idx] = net_virial[i];\n }\n d_net_torque[idx] = net_torque;\n }\n }\n\ncudaError_t gpu_integrator_sum_net_force(Scalar4 *d_net_force,\n Scalar *d_net_virial,\n const unsigned int net_virial_pitch,\n Scalar4 *d_net_torque,\n const gpu_force_list& force_list,\n unsigned int nparticles,\n bool clear,\n bool compute_virial,\n const GPUPartition& gpu_partition)\n {\n \/\/ sanity check\n assert(d_net_force);\n assert(d_net_virial);\n assert(d_net_torque);\n\n const int block_size = 256;\n\n \/\/ iterate over active GPUs in reverse, to end up on first GPU when returning from this function\n for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)\n {\n auto range = gpu_partition.getRangeAndSetGPU(idev);\n\n unsigned int nwork = range.second - range.first;\n\n if (compute_virial)\n {\n gpu_integrator_sum_net_force_kernel<1><<< nwork\/block_size+1, block_size >>>(d_net_force,\n d_net_virial,\n net_virial_pitch,\n d_net_torque,\n force_list,\n nwork,\n clear,\n range.first);\n }\n else\n {\n gpu_integrator_sum_net_force_kernel<0><<< nwork\/block_size+1, block_size >>>(d_net_force,\n d_net_virial,\n net_virial_pitch,\n d_net_torque,\n force_list,\n nwork,\n clear,\n range.first);\n }\n }\n\n return cudaSuccess;\n }\n","avg_line_length":46.3255813953,"max_line_length":175,"alphanum_fraction":0.4882028112} {"size":1786,"ext":"cu","lang":"Cuda","max_stars_count":3.0,"content":"\n\/* This is a automatically generated test. Do not modify *\/\n\n#include \n#include \n#include \n\n__global__\nvoid compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15) {\nfloat tmp_1 = var_3 + var_4;\nfloat tmp_2 = +1.9720E12f;\nfloat tmp_3 = +1.2828E-21f;\ncomp = tmp_3 + tmp_2 - tmp_1 \/ var_5 * var_6 \/ atan2f((var_7 \/ (-1.4202E-41f - -1.5509E-43f * var_8)), (var_9 \/ coshf((var_10 \/ (+1.4353E21f + (var_11 \/ +1.4272E22f \/ -1.2727E34f - +1.7556E-43f))))));\nfor (int i=0; i < var_1; ++i) {\n comp += (var_12 * expf(ceilf(var_13 + -1.6095E12f \/ ldexpf(+1.6936E-36f, 2))));\n}\nfor (int i=0; i < var_2; ++i) {\n comp += +1.8186E6f + logf((-1.8026E-42f - var_14 + (-0.0f * (+1.6522E-44f * var_15 + -1.6749E-35f))));\n}\n printf(\"%.17g\\n\", comp);\n\n}\n\nfloat* initPointer(float v) {\n float *ret = (float*) malloc(sizeof(float)*10);\n for(int i=0; i < 10; ++i)\n ret[i] = v;\n return ret;\n}\n\nint main(int argc, char** argv) {\n\/* Program variables *\/\n\n float tmp_1 = atof(argv[1]);\n int tmp_2 = atoi(argv[2]);\n int tmp_3 = atoi(argv[3]);\n float tmp_4 = atof(argv[4]);\n float tmp_5 = atof(argv[5]);\n float tmp_6 = atof(argv[6]);\n float tmp_7 = atof(argv[7]);\n float tmp_8 = atof(argv[8]);\n float tmp_9 = atof(argv[9]);\n float tmp_10 = atof(argv[10]);\n float tmp_11 = atof(argv[11]);\n float tmp_12 = atof(argv[12]);\n float tmp_13 = atof(argv[13]);\n float tmp_14 = atof(argv[14]);\n float tmp_15 = atof(argv[15]);\n float tmp_16 = atof(argv[16]);\n\n compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16);\n cudaDeviceSynchronize();\n\n return 0;\n}\n","avg_line_length":31.8928571429,"max_line_length":209,"alphanum_fraction":0.6450167973} {"size":6847,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \n\n#define wbCheck(stmt) \\\n do { \\\n cudaError_t err = stmt; \\\n if (err != cudaSuccess) { \\\n wbLog(ERROR, \"CUDA error: \", cudaGetErrorString(err)); \\\n wbLog(ERROR, \"Failed to run stmt \", #stmt); \\\n return -1; \\\n } \\\n } while (0)\n\n\/\/@@ Define any useful program-wide constants here\n#define KERNEL_WIDTH 3\n#define KERNEL_RADIUS (KERNEL_WIDTH \/ 2)\n#define TILE_WIDTH 8\n#define PADDED_TILE_WIDTH (TILE_WIDTH + KERNEL_WIDTH - 1)\n\n\/\/@@ Define constant memory for device kernel here\n__constant__ float deviceKernel[KERNEL_WIDTH * KERNEL_WIDTH * KERNEL_WIDTH];\n\n__global__ void conv3d(\n float *input, float *output,\n const int z_size, const int y_size, const int x_size\n) {\n \/\/@@ Insert kernel code here\n __shared__ float tile[PADDED_TILE_WIDTH][PADDED_TILE_WIDTH][PADDED_TILE_WIDTH];\n\n int tmp;\n\n \/\/ some alias\n int bx = blockIdx.x, by = blockIdx.y, bz = blockIdx.z;\n int tx = threadIdx.x, ty = threadIdx.y, tz = threadIdx.z;\n\n \/\/ destination linear index\n int dst = (tz * TILE_WIDTH * TILE_WIDTH) + (ty * TILE_WIDTH) + tx;\n \/\/ 3D index inside a padded tiles\n tmp = dst;\n int dst_x = tmp % PADDED_TILE_WIDTH;\n tmp \/= PADDED_TILE_WIDTH;\n int dst_y = tmp % PADDED_TILE_WIDTH;\n tmp \/= PADDED_TILE_WIDTH;\n int dst_z = tmp;\n \/\/ 3D index in global array, simply subtract the pad size\n int src_x = (bx * TILE_WIDTH + dst_x) - KERNEL_RADIUS;\n int src_y = (by * TILE_WIDTH + dst_y) - KERNEL_RADIUS;\n int src_z = (bz * TILE_WIDTH + dst_z) - KERNEL_RADIUS;\n int src = (src_z * y_size * x_size) + (src_y * x_size) + src_x;\n\n \/\/ load 1, this include \"left halos\" and \"content\"\n if (\n ((src_x >= 0) && (src_x < x_size)) &&\n ((src_y >= 0) && (src_y < y_size)) &&\n ((src_z >= 0) && (src_z < z_size))\n ) {\n tile[dst_z][dst_y][dst_x] = input[src];\n } else {\n tile[dst_z][dst_y][dst_x] = 0.0f;\n }\n\n \/\/ load 2, \"right halos\",\n dst = (tz * TILE_WIDTH * TILE_WIDTH) + (ty * TILE_WIDTH) + tx\n + (TILE_WIDTH * TILE_WIDTH * TILE_WIDTH);\n tmp = dst;\n dst_x = tmp % PADDED_TILE_WIDTH;\n tmp \/= PADDED_TILE_WIDTH;\n dst_y = tmp % PADDED_TILE_WIDTH;\n tmp \/= PADDED_TILE_WIDTH;\n dst_z = tmp;\n src_x = (bx * TILE_WIDTH + dst_x) - KERNEL_RADIUS;\n src_y = (by * TILE_WIDTH + dst_y) - KERNEL_RADIUS;\n src_z = (bz * TILE_WIDTH + dst_z) - KERNEL_RADIUS;\n src = (src_z * y_size * x_size) + (src_y * x_size) + src_x;\n if (dst_z < PADDED_TILE_WIDTH) {\n if (\n ((src_x >= 0) && (src_x < x_size)) &&\n ((src_y >= 0) && (src_y < y_size)) &&\n ((src_z >= 0) && (src_z < z_size))\n ) {\n tile[dst_z][dst_y][dst_x] = input[src];\n } else {\n tile[dst_z][dst_y][dst_x] = 0.0f;\n }\n }\n\n __syncthreads();\n\n \/\/ the actual convolution\n float sum = 0;\n for (int k = 0; k < KERNEL_WIDTH; k++) {\n for (int j = 0; j < KERNEL_WIDTH; j++) {\n for (int i = 0; i < KERNEL_WIDTH; i++) {\n sum += tile[tz + k][ty + j][tx + i] *\n deviceKernel[\n (k * KERNEL_WIDTH * KERNEL_WIDTH) + (j * KERNEL_WIDTH) + i\n ];\n }\n }\n }\n \/\/ update the destination 3D index\n dst_x = bx * TILE_WIDTH + tx;\n dst_y = by * TILE_WIDTH + ty;\n dst_z = bz * TILE_WIDTH + tz;\n \/\/ restore the linear index in global scope\n dst = (dst_z * y_size * x_size) + (dst_y * x_size) + dst_x;\n if ((dst_x < x_size) && (dst_y < y_size) && (dst_z < z_size)) {\n output[dst] = sum;\n }\n\n __syncthreads();\n}\n\nint main(int argc, char *argv[]) {\n wbArg_t args;\n int z_size;\n int y_size;\n int x_size;\n int inputLength, kernelLength;\n float *hostInput;\n float *hostKernel;\n float *hostOutput;\n float *deviceInput;\n float *deviceOutput;\n\n args = wbArg_read(argc, argv);\n\n \/\/ Import data\n hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);\n hostKernel =\n (float *)wbImport(wbArg_getInputFile(args, 1), &kernelLength);\n hostOutput = (float *)malloc(inputLength * sizeof(float));\n\n \/\/ First three elements are the input dimensions\n z_size = hostInput[0];\n y_size = hostInput[1];\n x_size = hostInput[2];\n wbLog(TRACE, \"The input size is \", z_size, \"x\", y_size, \"x\", x_size);\n assert(z_size * y_size * x_size == inputLength - 3);\n assert(kernelLength == KERNEL_WIDTH * KERNEL_WIDTH * KERNEL_WIDTH);\n\n wbTime_start(GPU, \"Doing GPU Computation (memory + compute)\");\n\n wbTime_start(GPU, \"Doing GPU memory allocation\");\n \/\/@@ Allocate GPU memory here\n size_t inputSize = x_size * y_size * z_size * sizeof(float);\n wbCheck(cudaMalloc(&deviceInput, inputSize));\n wbCheck(cudaMalloc(&deviceOutput, inputSize));\n \/\/ Recall that inputLength is 3 elements longer than the input data\n \/\/ because the first three elements were the dimensions\n wbTime_stop(GPU, \"Doing GPU memory allocation\");\n\n wbTime_start(Copy, \"Copying data to the GPU\");\n \/\/@@ Copy input and kernel to GPU here\n size_t kernelSize = kernelLength * sizeof(float);\n wbCheck(cudaMemcpyToSymbol(deviceKernel, hostKernel, kernelSize));\n \/\/ Recall that the first three elements of hostInput are dimensions and\n \/\/ do not need to be copied to the gpu\n wbCheck(cudaMemcpy(deviceInput, hostInput+3, inputSize, cudaMemcpyHostToDevice));\n wbTime_stop(Copy, \"Copying data to the GPU\");\n\n wbTime_start(Compute, \"Doing the computation on the GPU\");\n \/\/@@ Initialize grid and block dimensions here\n dim3 blockDim(TILE_WIDTH, TILE_WIDTH, TILE_WIDTH);\n dim3 gridDim(\n ceil((float)x_size\/TILE_WIDTH), ceil((float)y_size\/TILE_WIDTH), ceil((float)z_size\/TILE_WIDTH)\n );\n \/\/@@ Launch the GPU kernel here\n conv3d<<>>(\n deviceInput, deviceOutput, z_size, y_size, x_size\n );\n wbCheck(cudaDeviceSynchronize());\n wbTime_stop(Compute, \"Doing the computation on the GPU\");\n\n wbTime_start(Copy, \"Copying data from the GPU\");\n \/\/@@ Copy the device memory back to the host here\n wbCheck(cudaMemcpy(hostOutput+3, deviceOutput, inputSize, cudaMemcpyDeviceToHost));\n \/\/ Recall that the first three elements of the output are the dimensions\n \/\/ and should not be set here (they are set below)\n wbTime_stop(Copy, \"Copying data from the GPU\");\n\n wbTime_stop(GPU, \"Doing GPU Computation (memory + compute)\");\n\n \/\/ Set the output dimensions for correctness checking\n hostOutput[0] = z_size;\n hostOutput[1] = y_size;\n hostOutput[2] = x_size;\n wbSolution(args, hostOutput, inputLength);\n\n \/\/ Free device memory\n cudaFree(deviceInput);\n cudaFree(deviceOutput);\n\n \/\/ Free host memory\n free(hostInput);\n free(hostOutput);\n return 0;\n}\n","avg_line_length":34.5808080808,"max_line_length":98,"alphanum_fraction":0.6214400467} {"size":2268,"ext":"cu","lang":"Cuda","max_stars_count":1.0,"content":"#include \n#include \n\n#include \"caffe\/layers\/relu_layer.hpp\"\n\nnamespace caffe {\n\ntemplate \n__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,\n float negative_slope) {\n CUDA_KERNEL_LOOP(index, n) {\n out[index] = in[index] > 0 ? in[index] : Dtype(in[index] * negative_slope);\n }\n}\n\ntemplate \nvoid ReLULayer::Forward_gpu(const vector& bottom,\n const vector& top) {\n this->Quantize_gpu(bottom, top);\n \n const Ftype* bottom_data = bottom[0]->gpu_data();\n Ftype* top_data = top[0]->mutable_gpu_data();\n\n const int count = bottom[0]->count();\n float negative_slope = this->layer_param_.relu_param().negative_slope();\n \/\/ NOLINT_NEXT_LINE(whitespace\/operators)\n ReLUForward<<>>(\n count, bottom_data, top_data, negative_slope);\n CUDA_POST_KERNEL_CHECK;\n CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));\n \n this->Quantize_gpu(bottom, top);\n}\n\ntemplate \n__global__ void ReLUBackward(const int n, const Dtype* in_diff,\n const Dtype* in_data, Dtype* out_diff, float negative_slope) {\n CUDA_KERNEL_LOOP(index, n) {\n out_diff[index] = in_diff[index] * ((in_data[index] > 0)\n + (in_data[index] <= 0) * negative_slope);\n }\n}\n\ntemplate \nvoid ReLULayer::Backward_gpu(const vector& top,\n const vector& propagate_down,\n const vector& bottom) {\n if (propagate_down[0]) {\n const Btype* bottom_data = bottom[0]->gpu_data();\n const Btype* top_diff = top[0]->gpu_diff();\n Btype* bottom_diff = bottom[0]->mutable_gpu_diff();\n const int count = bottom[0]->count();\n float negative_slope = this->layer_param_.relu_param().negative_slope();\n \/\/ NOLINT_NEXT_LINE(whitespace\/operators)\n ReLUBackward<<>>(\n count, top_diff, bottom_data, bottom_diff, negative_slope);\n CUDA_POST_KERNEL_CHECK;\n CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));\n }\n}\n\nINSTANTIATE_LAYER_GPU_FUNCS_FB(ReLULayer);\n\n} \/\/ namespace caffe\n","avg_line_length":34.8923076923,"max_line_length":97,"alphanum_fraction":0.7164902998} {"size":1705,"ext":"cu","lang":"Cuda","max_stars_count":1.0,"content":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\n\/*!\n * Copyright (c) 2016 by Contributors\n * \\file broadcast_reduce_op_index.cu\n * \\brief GPU Implementation of broadcast and reduce functions based on index.\n *\/\n#include \".\/broadcast_reduce_op.h\"\n\nnamespace mxnet {\nnamespace op {\nNNVM_REGISTER_OP(argmax).set_attr(\"FCompute\",\n SearchAxisCompute);\n\nNNVM_REGISTER_OP(argmin).set_attr(\"FCompute\",\n SearchAxisCompute);\n\n\/\/ Legacy support\nNNVM_REGISTER_OP(argmax_channel)\n .set_attr(\"FCompute\", SearchAxisCompute);\n\nNNVM_REGISTER_OP(pick).set_attr(\"FCompute\", PickOpForward);\n\nNNVM_REGISTER_OP(_backward_pick).set_attr(\"FCompute\", PickOpBackward);\n\n} \/\/ namespace op\n} \/\/ namespace mxnet\n","avg_line_length":37.8888888889,"max_line_length":91,"alphanum_fraction":0.7167155425} {"size":5915,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \n#include \n#include \"src\/helper.h\"\n\nint main()\n{\n cudnnHandle_t cudnn;\n cudnnTensorDescriptor_t input_desc;\n cudnnTensorDescriptor_t output_desc;\n cudnnFilterDescriptor_t filter_desc;\n cudnnConvolutionDescriptor_t conv_desc;\n cudnnTensorDescriptor_t bias_desc;\n\n cudnnConvolutionFwdAlgoPerf_t falgo_perf;\n cudnnConvolutionBwdFilterAlgoPerf_t b_falgo_perf;\n cudnnConvolutionBwdDataAlgoPerf_t b_dalgo_perf;\n\n float *d_input = nullptr;\n float *d_output = nullptr;\n float *d_filter = nullptr;\n float *d_bias = nullptr;\n\n int input_n = 64;\n int input_c = 1;\n int input_h = 28;\n int input_w = 28;\n\n \/\/ output size\n int output_n = input_n;\n int output_c = 20;\n int output_h = 1;\n int output_w = 1;\n\n \/\/ kernel size\n int filter_h = 5;\n int filter_w = 5;\n\n \/\/ alpha, beta\n float one = 1.f;\n float zero = 0.f;\n\n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n cudnnCreate(&cudnn);\n\n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n \/* Create Resources *\/\n cudnnCreateTensorDescriptor(&input_desc);\n cudnnCreateTensorDescriptor(&output_desc);\n cudnnCreateFilterDescriptor(&filter_desc);\n cudnnCreateConvolutionDescriptor(&conv_desc);\n cudnnCreateTensorDescriptor(&bias_desc);\n\n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n \/\/ Initilziae resources\n cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w);\n cudnnSetFilter4dDescriptor(filter_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c, filter_h, filter_w);\n cudnnSetConvolution2dDescriptor(conv_desc,\n 0, 0,\n 1, 1,\n 1, 1,\n CUDNN_CROSS_CORRELATION,\n CUDNN_DATA_FLOAT);\n cudnnGetConvolution2dForwardOutputDim(conv_desc, input_desc, filter_desc, &output_n, &output_c, &output_h, &output_w);\n cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, output_h, output_w);\n cudnnSetTensor4dDescriptor(bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1);\n\n int weight_size = output_c * input_c * filter_h * filter_w;\n int bias_size = output_c;\n\n std::cout << \"input size: \" << input_n << \" \" << input_c << \" \" << input_h << \" \" << input_w << std::endl;\n std::cout << \"output size: \" << output_n << \" \" << output_c << \" \" << output_h << \" \" << output_w << std::endl;\n\n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n \/\/ convolution\n size_t workspace_size = 0;\n size_t temp_size = 0;\n float *d_workspace = nullptr;\n cudnnFindConvolutionForwardAlgorithm(cudnn, input_desc, filter_desc, conv_desc, output_desc, 1, 0, &falgo_perf);\n cudnnGetConvolutionForwardWorkspaceSize(cudnn, input_desc, filter_desc, conv_desc, output_desc, falgo_perf.algo, &temp_size);\n workspace_size = max(workspace_size, temp_size);\n\n \/\/ convolution (bwd - filter)\n cudnnFindConvolutionBackwardFilterAlgorithm(cudnn, input_desc, output_desc, conv_desc, filter_desc, 1, 0, &b_falgo_perf);\n cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn, input_desc, output_desc, conv_desc, filter_desc, b_falgo_perf.algo, &temp_size);\n workspace_size = max(workspace_size, temp_size);\n\n \/\/ convolution (bwd - data)\n cudnnFindConvolutionBackwardDataAlgorithm(cudnn, filter_desc, output_desc, conv_desc, input_desc, 1, 0, &b_dalgo_perf);\n cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn, filter_desc, output_desc, conv_desc, input_desc, b_dalgo_perf.algo, &temp_size);\n workspace_size = max(workspace_size, temp_size);\n\n std::cout << \"workspace size: \" << workspace_size << std::endl;\n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n \/\/ allocate memory space\n cudaMalloc((void**)&d_input, sizeof(float) * input_n * input_c * input_h * input_w);\n cudaMalloc((void**)&d_filter, sizeof(float) * weight_size);\n cudaMalloc((void**)&d_output, sizeof(float) * output_n * output_c * output_h * output_w);\n cudaMalloc((void**)&d_workspace, sizeof(float) * workspace_size);\n cudaMalloc((void**)&d_bias, sizeof(float) * bias_size);\n\n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n \/\/ Forward\n checkCudnnErrors(cudnnConvolutionForward(cudnn, &one, input_desc, d_input, filter_desc, d_filter, conv_desc, falgo_perf.algo, d_workspace, workspace_size, &zero, output_desc, d_output));\n checkCudnnErrors(cudnnAddTensor(cudnn, &one, bias_desc, d_bias, &one, output_desc, d_output));\n checkCudaErrors(cudaGetLastError());\n \n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n \/\/ backward\n checkCudnnErrors(cudnnConvolutionBackwardBias(cudnn, &one, output_desc, d_output, &zero, bias_desc, d_bias));\n checkCudnnErrors(cudnnConvolutionBackwardFilter(cudnn, &one, input_desc, d_input, output_desc, d_output, conv_desc, b_falgo_perf.algo, d_workspace, workspace_size, &zero, filter_desc, d_filter));\n checkCudnnErrors(cudnnConvolutionBackwardData(cudnn, &one, filter_desc, d_filter, output_desc, d_output, conv_desc, b_dalgo_perf.algo, d_workspace, workspace_size, &zero, input_desc, d_input));\n checkCudaErrors(cudaGetLastError());\n \n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n cudnnDestroyTensorDescriptor(input_desc);\n cudnnDestroyTensorDescriptor(output_desc);\n cudnnDestroyFilterDescriptor(filter_desc);\n cudnnDestroyConvolutionDescriptor(conv_desc);\n cudnnDestroyTensorDescriptor(bias_desc);\n\n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n\n cudaFree(d_input); \n cudaFree(d_filter);\n cudaFree(d_output);\n cudaFree(d_workspace);\n cudaFree(d_bias);\n\n cudnnDestroy(cudnn);\n\n std::cout << \"[\" << __LINE__ << \"]\" << std::endl;\n}\n","avg_line_length":41.9503546099,"max_line_length":199,"alphanum_fraction":0.6791208791} {"size":1848,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#if !MEGDNN_TEGRA_X1\n\/\/ generated by gen_cuda_conv_bias_kern_impls.py\n\/\/ ignore warning of cutlass\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Wstrict-aliasing\"\n#include \"src\/cuda\/conv_bias\/int8\/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl\"\n\nusing LayoutSrc = cutlass::layout::TensorNCxHWx<4>;\nusing LayoutFilter = cutlass::layout::TensorCxRSKx<4>;\nusing LayoutDst = cutlass::layout::TensorNCxHWx<32>;\nusing ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;\nusing WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;\nusing InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;\nusing EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<\n int8_t, 4, int32_t, int32_t, float>;\nusing Convolution = cutlass::conv::device::Convolution<\n int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, \n LayoutDst, int32_t, LayoutDst, int32_t, \n cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, \n ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \n cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, \n 2, 4, 16, true, \n cutlass::arch::OpMultiplyAddSaturate>;\ntemplate void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper(\n const typename Convolution::ElementSrc* d_src, \n const typename Convolution::ElementFilter* d_filter, \n const typename Convolution::ElementBias* d_bias, \n const typename Convolution::ElementDst* d_z, \n typename Convolution::ElementDst* d_dst, \n int* workspace, \n typename Convolution::ConvolutionParameter const& conv_param, \n typename Convolution::EpilogueOutputOp::Params const& epilogue, \n cudaStream_t stream);\n#pragma GCC diagnostic pop\n#endif\n","avg_line_length":49.9459459459,"max_line_length":92,"alphanum_fraction":0.7510822511} {"size":542,"ext":"cuh","lang":"Cuda","max_stars_count":2.0,"content":"\/\/ Copyright 2004-present Facebook. All Rights Reserved.\n\n#include \"cuda\/DeviceTensor.cuh\"\n\nnamespace facebook { namespace deeplearning { namespace torch {\n\nvoid\nrunTemporalKMaxPoolingUpdateOutput(\n const cuda::DeviceTensor& input,\n const cuda::DeviceTensor& indices,\n cuda::DeviceTensor& output,\n int k);\n\nvoid\nrunTemporalKMaxPoolingUpdateGradInput(\n const cuda::DeviceTensor& gradOutput,\n const cuda::DeviceTensor& indices,\n cuda::DeviceTensor& gradInput,\n int k);\n\n} } }\n","avg_line_length":24.6363636364,"max_line_length":63,"alphanum_fraction":0.7490774908} {"size":6802,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#include \n#include \n#include \n\n#include \n#endif\n\nnamespace at { namespace native {\n\ntemplate class Op>\nstd::vector foreach_tensor_list_op(TensorList tensors1, TensorList tensors2, const Scalar& alpha = 1) {\n std::vector> tensor_lists;\n std::vector vec_res;\n vec_res.reserve(tensors1.size());\n for (const auto& t: tensors1) {\n vec_res.emplace_back(at::native::empty_like(t));\n }\n\n tensor_lists.emplace_back(tensors1.vec());\n tensor_lists.emplace_back(tensors2.vec());\n tensor_lists.emplace_back(std::move(vec_res));\n\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), \"foreach_binary_op_list_cuda\", [&]() {\n using opmath_t = at::opmath_type;\n multi_tensor_apply<3>(tensor_lists,\n BinaryOpListAlphaFunctor(),\n Op(),\n alpha.to());\n });\n\n return tensor_lists[2];\n}\n\ntemplate class Op>\nvoid foreach_tensor_list_op_(TensorList tensors1, TensorList tensors2, const Scalar& alpha = 1) {\n std::vector> tensor_lists;\n tensor_lists.emplace_back(tensors1.vec());\n tensor_lists.emplace_back(tensors2.vec());\n\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), \"foreach_binary_op_list_cuda_\", [&]() {\n using opmath_t = at::opmath_type;\n multi_tensor_apply<2>(tensor_lists,\n BinaryOpListAlphaFunctor(),\n Op(),\n alpha.to());\n });\n}\n\n#define FOREACH_BINARY_OP_LIST(NAME, OP, DIVISION_OP) \\\nvoid foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2) { \\\n check_foreach_api_restrictions(tensors1, tensors2); \\\n if (!can_use_fast_route(tensors1, tensors2, DIVISION_OP)) { \\\n return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2); \\\n } \\\n \\\n foreach_tensor_list_op_(tensors1, tensors2); \\\n} \\\n \\\nstd::vector foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2) { \\\n check_foreach_api_restrictions(tensors1, tensors2); \\\n if (!can_use_fast_route(tensors1, tensors2, DIVISION_OP)) { \\\n return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2); \\\n } \\\n \\\n return foreach_tensor_list_op(tensors1, tensors2); \\\n}\n\n#define FOREACH_BINARY_OP_LIST_ALPHA(NAME, OP) \\\nvoid foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2, const Scalar& alpha) { \\\n check_foreach_api_restrictions(tensors1, tensors2); \\\n if (!can_use_fast_route({tensors1, tensors2}, alpha)) { \\\n return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2, alpha); \\\n } \\\n \\\n foreach_tensor_list_op_(tensors1, tensors2, alpha); \\\n} \\\n \\\nstd::vector foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2, const Scalar& alpha) { \\\n check_foreach_api_restrictions(tensors1, tensors2); \\\n if (!can_use_fast_route({tensors1, tensors2}, alpha)) { \\\n return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2, alpha); \\\n } \\\n \\\n return foreach_tensor_list_op(tensors1, tensors2, alpha); \\\n}\n\nFOREACH_BINARY_OP_LIST_ALPHA(add, std::plus);\nFOREACH_BINARY_OP_LIST_ALPHA(sub, std::minus);\nFOREACH_BINARY_OP_LIST(mul, std::multiplies, \/*division_op*\/ false);\nFOREACH_BINARY_OP_LIST(div, std::divides, \/*division_op*\/ true);\n\n}} \/\/ namespace at::native\n","avg_line_length":62.9814814815,"max_line_length":134,"alphanum_fraction":0.4378124081} {"size":3643,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"#include \n#include \n\n#include \"caffe\/layers\/contrastive_accuracy_layer.hpp\"\n#include \"caffe\/util\/math_functions.hpp\"\n\nnamespace caffe {\n\ntemplate \nvoid ContrastiveAccuracyLayer::Forward_gpu(\n const vector*>& bottom, const vector*>& top) {\n const int count = bottom[0]->count();\n caffe_gpu_sub(\n count,\n bottom[0]->gpu_data(), \/\/ a\n bottom[1]->gpu_data(), \/\/ b\n diff_.mutable_gpu_data()); \/\/ a_i-b_i\n caffe_gpu_powx(\n count,\n diff_.mutable_gpu_data(), \/\/ a_i-b_i\n Dtype(2),\n diff_sq_.mutable_gpu_data()); \/\/ (a_i-b_i)^2\n caffe_gpu_gemv(\n CblasNoTrans,\n bottom[0]->num(),\n bottom[0]->channels(),\n Dtype(1.0),\n diff_sq_.gpu_data(), \/\/ (a_i-b_i)^2\n summer_vec_.gpu_data(),\n Dtype(0.0),\n dist_sq_.mutable_gpu_data()); \/\/ \\Sum (a_i-b_i)^2\n Dtype margin = this->layer_param_.contrastive_accuracy_param().margin();\n bool legacy_version =\n this->layer_param_.contrastive_accuracy_param().legacy_version();\n \/\/ Dtype loss(0.0);\n int pos_cnt = 0;\n int neg_cnt = 0;\n int pos_right = 0;\n int neg_right = 0;\n float eps = 0.0001;\n\n for (int i = 0; i < bottom[0]->num(); ++i) {\n if (static_cast(bottom[2]->cpu_data()[i])) { \/\/ similar pairs\n \/\/ loss += dist_sq_.cpu_data()[i];\n \/\/ handle the postive pair\n pos_cnt += 1;\n if (dist_sq_.cpu_data()[i] < margin) pos_right += 1;\n\n } else { \/\/ dissimilar pairs\n if (legacy_version) {\n \/\/ loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));\n neg_cnt += 1;\n if( std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)) == 0)\n {\n neg_right += 1;\n }\n } else {\n Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]),\n Dtype(0.0));\n \/\/ loss += dist*dist;\n neg_cnt += 1;\n if (dist == 0)\n {\n neg_right += 1;\n }\n }\n }\n }\n \/\/ loss = loss \/ static_cast(bottom[0]->num()) \/ Dtype(2);\n float pos_accuracy = pos_right\/(pos_cnt + eps);\n float neg_accuracy = neg_right\/(neg_cnt + eps);\n float accuracy = 0.5*(pos_accuracy + neg_accuracy);\n top[0]->mutable_cpu_data()[0] = accuracy;\n top[0]->mutable_cpu_data()[1] = pos_accuracy;\n top[0]->mutable_cpu_data()[2] = neg_accuracy;\n}\n\ntemplate \n__global__ void CLLBackward(const int count, const int channels,\n const Dtype margin, const bool legacy_version, const Dtype alpha,\n const Dtype* y, const Dtype* diff, const Dtype* dist_sq,\n Dtype *bottom_diff) {\n CUDA_KERNEL_LOOP(i, count) {\n int n = i \/ channels; \/\/ the num index, to access y and dist_sq\n if (static_cast(y[n])) { \/\/ similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { \/\/ dissimilar pairs\n Dtype mdist(0.0);\n Dtype beta(0.0);\n if (legacy_version) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist \/ (dist + Dtype(1e-4)) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}\n\ntemplate \nvoid ContrastiveAccuracyLayer::Backward_gpu(const vector*>& top,\n const vector& propagate_down, const vector*>& bottom)\n{\n for (int i = 0; i < propagate_down.size(); ++i) {\n if (propagate_down[i]) { NOT_IMPLEMENTED; }\n }\n}\nINSTANTIATE_LAYER_GPU_FUNCS(ContrastiveAccuracyLayer);\n\n} \/\/ namespace caffe\n","avg_line_length":30.8728813559,"max_line_length":83,"alphanum_fraction":0.5918199286} {"size":13337,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"\/*******************************************************************************\n * Copyright (c) 2015-2018 Skymind, Inc.\n *\n * This program and the accompanying materials are made available under the\n * terms of the Apache License, Version 2.0 which is available at\n * https:\/\/www.apache.org\/licenses\/LICENSE-2.0.\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n * License for the specific language governing permissions and limitations\n * under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ******************************************************************************\/\n\n\/\/\n\/\/ @author raver119@gmail.com\n\/\/ @author Yurii Shyrma (iuriish@yahoo.com)\n\/\/\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace simdOps;\n\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \n__global__ void simpleReduce(void *x, Nd4jLong *xShapeInfo,\n void *extraParams,\n void *z, Nd4jLong *zShapeInfo,\n int *dimension, int dimensionLength,\n void *reductionBuffer, \n Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {\n \n functions::reduce::ReduceSameFunction::template transformCudaXD(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets);\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \n__global__ void simpleScalar(void *x, Nd4jLong *xShapeInfo,\n void *extraParams,\n void *z, Nd4jLong *zShapeInfo,\n int *dimension, int dimensionLength,\n void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {\n\n functions::reduce::ReduceSameFunction::template execScalarCuda(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);\n}\n\n\nnamespace functions {\nnamespace reduce {\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \ntemplate \n__device__ void ReduceSameFunction::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) {\n \n \/\/ start the shared memory loop on the next power of 2 less\n \/\/ than the block size. If block size is not a power of 2,\n \/\/ accumulate the intermediate sums in the remainder range.\n \n auto sPartials = static_cast(vsPartials);\n auto extraParams = static_cast(vextraParams);\n\n Nd4jLong floorPow2 = numItems;\n\n if (floorPow2 & (floorPow2 - 1)) {\n \n while (floorPow2 & (floorPow2 - 1)) \n floorPow2 &= floorPow2 - 1;\n \n if (tid >= floorPow2) \n sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);\n\n __syncthreads();\n }\n\n for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {\n if (tid < activeThreads && tid + activeThreads < numItems) \n sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);\n \n __syncthreads();\n }\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \ntemplate \n__device__ void ReduceSameFunction::transformCudaXD( void *vx, Nd4jLong *xShapeInfo,\n void *vextraParams,\n void *vz, Nd4jLong *zShapeInfo,\n int *dimension, int dimensionLength,\n void *vreductionBuffer, \n Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {\n\n auto x = reinterpret_cast(vx);\n auto z = reinterpret_cast(vz);\n auto extraParams = reinterpret_cast(vextraParams);\n auto reductionBuffer = reinterpret_cast(vreductionBuffer);\n\n if (OpType::requiresSpecialAccumulation) {\n OpType::execSpecialCuda(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets);\n return;\n }\n\n \/\/shared memory space for storing intermediate results\n __shared__ X* sPartials;\n\n \/\/ __shared__ shape::TAD *tad;\n __shared__ int tadLength;\n __shared__ int tadRank;\n __shared__ int numTads;\n __shared__ Nd4jLong *tadShape;\n __shared__ Nd4jLong *tadStride;\n __shared__ bool isPlainOutput;\n \n if (threadIdx.x == 0) {\n extern __shared__ unsigned char shmem[];\n sPartials = reinterpret_cast(shmem);\n tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);\n tadRank = shape::rank(tadOnlyShapeInfo);\n numTads = shape::length(xShapeInfo) \/ tadLength;\n tadShape = shape::shapeOf(tadOnlyShapeInfo);\n tadStride = shape::stride(tadOnlyShapeInfo);\n\n isPlainOutput = shape::order(zShapeInfo) == 'c' && shape::elementWiseStride(zShapeInfo) == 1;\n }\n __syncthreads();\n \n for (int r = blockIdx.x; r < numTads; r += gridDim.x) {\n \n Nd4jLong tadOffsetForBlock = tadOffsets[r];\n sPartials[threadIdx.x] = OpType::startingValue(x + tadOffsetForBlock);\n\n for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {\n \n auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);\n sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[xOffset], extraParams), extraParams);\n }\n __syncthreads();\n\n \/\/ aggregate. do NOT reduce for elements > tadLength\n aggregatePartials(sPartials, threadIdx.x, nd4j::math::nd4j_min(blockDim.x, tadLength), extraParams);\n\n __syncthreads();\n\n if (threadIdx.x == 0)\n z[isPlainOutput ? r : shape::getIndexOffset(r, zShapeInfo, numTads)] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams);\n }\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \n__device__ void ReduceSameFunction::execScalarCudaLegacy(int opNum, void *vx, Nd4jLong *xShapeInfo,\n void *vextraParams,\n void *vz, Nd4jLong *zShapeInfo,\n void *vreductionBuffer,\n Nd4jLong *tadOnlyShapeInfo) {\n DISPATCH_BY_OPNUM_T(execScalarCuda, PARAMS(vx, xShapeInfo, vextraParams, vz, zShapeInfo, vreductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS);\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \ntemplate \n__device__ void ReduceSameFunction::execScalarCuda(void *vx, Nd4jLong *xShapeInfo,\n void *vextraParams,\n void *vz, Nd4jLong *zShapeInfo,\n void *vreductionBuffer,\n Nd4jLong *tadOnlyShapeInfo) {\n\n auto x = reinterpret_cast(vx);\n auto z = reinterpret_cast(vz);\n auto extraParams = reinterpret_cast(vextraParams);\n auto reductionBuffer = reinterpret_cast(vreductionBuffer);\n \n int xEws = shape::elementWiseStride(xShapeInfo);\n auto len = shape::length(xShapeInfo);\n auto tid = blockDim.x * blockIdx.x + threadIdx.x;\n\n \/\/shared memory space for storing intermediate results\n __shared__ X* sPartials;\n if(threadIdx.x == 0) {\n extern __shared__ unsigned char shmem[];\n sPartials = reinterpret_cast(shmem);\n }\n __syncthreads();\n sPartials[threadIdx.x] = OpType::startingValue(x);\n\n if (xEws > 0)\n for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) \n sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); \n else\n for (int i = tid; i < len; i += blockDim.x * gridDim.x) \n sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo, len)], extraParams), extraParams);\n\n __syncthreads();\n aggregatePartials(sPartials, threadIdx.x, nd4j::math::nd4j_min(blockDim.x, len), extraParams);\n __syncthreads();\n\n if (gridDim.x > 1) {\n \n unsigned int *tc = (unsigned int *)reductionBuffer;\n __shared__ bool amLast;\n \n tid = threadIdx.x;\n if (threadIdx.x == 0)\n reductionBuffer[blockIdx.x] = sPartials[0];\/\/this->postProcess(sPartials[0],len,extraParams);\n \n __threadfence();\n __syncthreads();\n\n if (threadIdx.x == 0) {\n unsigned int ticket = atomicInc(&tc[16384], gridDim.x);\n amLast = (ticket == gridDim.x - 1);\n }\n\n __syncthreads();\n\n if (amLast) {\n tc[16384] = 0;\n sPartials[threadIdx.x] = OpType::startingValue(x);\n\n for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) \n sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);\n \n __syncthreads();\n aggregatePartials(sPartials, threadIdx.x, nd4j::math::nd4j_min(gridDim.x, blockDim.x), extraParams);\n __syncthreads();\n\n if (threadIdx.x == 0) {\n z[0] = OpType::postProcess(sPartials[0], len, extraParams);\n }\n }\n }\n else {\n \n if (threadIdx.x == 0) {\n auto tc = reinterpret_cast(reductionBuffer);\n tc[16384] = 0;\n z[0] = OpType::postProcess(sPartials[0], len, extraParams);\n }\n }\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \ntemplate\n__host__ void ReduceSameFunction::intermediateXD(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {\n \n simpleReduce<<>>(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets);\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \ntemplate\n__host__ void ReduceSameFunction::intermediateScalar(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {\n simpleScalar<<>>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \n_CUDA_H void ReduceSameFunction::execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {\n \n DISPATCH_BY_OPNUM_T(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS);\n nd4j::DebugHelper::checkErrorCode(stream, \"execReduceScalarSame(...) failed\");\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \n_CUDA_H void ReduceSameFunction::execReduceXD(dim3 launchDims, cudaStream_t *stream, int opNum, int rank, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {\n \n DISPATCH_BY_OPNUM_T(intermediateXD, PARAMS(launchDims, stream, x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), REDUCE_SAME_OPS);\n DEBUG_KERNEL(stream, opNum);\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ntemplate \n__device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) {\n int sPartialsLength = sMemSize \/ sizeof(X);\n X *sPartialsDeref = (X *) *sPartials;\n for (int i = 0; i < sPartialsLength; i++)\n sPartialsDeref[i] = extraParams[0];\n\n}\n\n \nBUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT ReduceSameFunction, , LIBND4J_TYPES);\n\n}\n}","avg_line_length":45.2101694915,"max_line_length":290,"alphanum_fraction":0.5885881383} {"size":15699,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*\n * Copyright 2019 BlazingDB, Inc.\n * Copyright 2019 Alexander Ocsa \n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"groupby\/common\/aggregation_requests.hpp\"\n#include \"groupby\/common\/type_info.hpp\"\n#include \"groupby\/common\/utils.hpp\"\n#include \"groupby_kernels.cuh\"\n#include \"sort_helper.hpp\"\n\n#include \n#include \n\nnamespace cudf {\nnamespace groupby {\nnamespace sort {\n\nusing index_vector = rmm::device_vector;\n\nnamespace {\n\n\/**---------------------------------------------------------------------------*\n * @brief Computes the ordered aggregation requests which were skipped \n * in a previous process (`compound_to_simple`). These ordered aggregations\n * were skipped because they can't be compound to simple aggregation.\n * \n * Then combine these results with the set of output aggregation columns \n * corresponding to not ordered aggregation requests.\n *\n * @param groupby[in] The object for computing sort-based groupby\n * @param original_requests[in] The original set of potentially ordered\n * aggregation requests\n * @param input_ops_args[in] The list of arguments fot each of the previous ordered\n * aggregation requests\n * @param current_output_values[in] Set of output aggregation columns corresponding to\n * not ordered aggregation requests\n * @param stream[in] CUDA stream on which to execute\n * @return vector of columns satisfying each of the original aggregation requests\n *---------------------------------------------------------------------------**\/\nstd::vector compute_ordered_aggregations(\n detail::helper &groupby,\n std::vector const& original_requests,\n std::vector const& input_ops_args,\n cudf::table& current_output_values,\n cudaStream_t stream) {\n\n std::vector output_value(original_requests.size());\n std::copy(current_output_values.begin(), current_output_values.end(), output_value.begin());\n\n for (size_t i = 0; i < original_requests.size(); ++i) {\n auto const& element = original_requests[i];\n if (is_ordered(element.second)) {\n gdf_column * value_col = element.first;\n gdf_column sorted_values;\n rmm::device_vector group_sizes;\n\n std::tie(sorted_values, group_sizes) = groupby.sort_values(*value_col);\n auto result_col = new gdf_column;\n\n switch (element.second) {\n case MEDIAN: {\n *result_col = cudf::allocate_column(\n GDF_FLOAT64, groupby.num_groups(), false);\n\n cudf::detail::group_medians(sorted_values, groupby.group_offsets(),\n group_sizes, result_col, stream);\n break;\n }\n case QUANTILE: {\n quantile_args * args = static_cast(input_ops_args[i]);\n\n *result_col = cudf::allocate_column(\n GDF_FLOAT64, args->quantiles.size() * groupby.num_groups(), false);\n\n cudf::detail::group_quantiles(sorted_values, groupby.group_offsets(),\n group_sizes, result_col,\n args->quantiles, args->interpolation,\n stream);\n break;\n }\n default:\n break;\n }\n output_value[i] = result_col;\n\n gdf_column_free(&sorted_values);\n }\n }\n return output_value;\n}\n\n\/**---------------------------------------------------------------------------*\n * @brief Prepare input parameters for invoking the `aggregate_all_rows` kernel\n * which compute the simple aggregation(s) of corresponding rows in the output\n * `values` table.\n * @param input_keys The table of keys\n * @param options The options for controlling behavior of the groupby operation.\n * @param groupby The object for computing sort-based groupby\n * @param simple_values_columns The list of simple values columns\n * @param simple_operators The list of simple aggregation operations\n * @param stream[in] CUDA stream on which to execute\n * @return output value table with the aggregation(s) computed \n *---------------------------------------------------------------------------**\/\ntemplate \ncudf::table compute_simple_aggregations(const cudf::table &input_keys,\n const Options &options,\n detail::helper &groupby,\n const std::vector &simple_values_columns,\n const std::vector &simple_operators,\n cudaStream_t &stream) { \n\n const gdf_column& key_sorted_order = groupby.key_sort_order(); \n\n \/\/group_labels \n const index_vector& group_labels = groupby.group_labels();\n const gdf_size_type num_groups = groupby.num_groups();\n \n \/\/ Output allocation size aligned to 4 bytes. The use of `round_up_safe` \n \/\/ guarantee correct execution with cuda-memcheck for cases when \n \/\/ num_groups == 1 and with dtype == int_8. \n gdf_size_type const output_size_estimate = cudf::util::round_up_safe((int64_t)groupby.num_groups(), (int64_t)sizeof(int32_t));\n\n cudf::table simple_values_table{simple_values_columns};\n\n cudf::table simple_output_values{\n output_size_estimate, target_dtypes(column_dtypes(simple_values_table), simple_operators),\n column_dtype_infos(simple_values_table), values_have_nulls, false, stream};\n\n initialize_with_identity(simple_output_values, simple_operators, stream);\n\n auto d_input_values = device_table::create(simple_values_table);\n auto d_output_values = device_table::create(simple_output_values, stream);\n rmm::device_vector d_ops(simple_operators);\n\n auto row_bitmask = cudf::row_bitmask(input_keys, stream);\n\n cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256};\n\n \/\/Aggregate all rows for simple requests using the key sorted order (indices) and the group labels\n cudf::groupby::sort::aggregate_all_rows<<<\n grid_params.num_blocks, grid_params.num_threads_per_block, 0, stream>>>(\n *d_input_values, *d_output_values,\n static_cast(key_sorted_order.data),\n group_labels.data().get(), options.ignore_null_keys,\n d_ops.data().get(), row_bitmask.data().get());\n \n std::transform(simple_output_values.begin(), simple_output_values.end(), simple_output_values.begin(),\n [num_groups](gdf_column *col) {\n CUDF_EXPECTS(col != nullptr, \"Attempt to update Null column.\");\n col->size = num_groups;\n return col;\n });\n return simple_output_values;\n}\n\ntemplate \nstd::pair> compute_sort_groupby(cudf::table const& input_keys, cudf::table const& input_values,\n std::vector const& input_ops,\n std::vector const& input_ops_args,\n Options options,\n cudaStream_t stream) {\n auto include_nulls = not options.ignore_null_keys;\n auto groupby = detail::helper(input_keys, include_nulls, options.null_sort_behavior, options.input_sorted);\n\n if (groupby.num_groups() == 0) {\n cudf::table output_values(0, target_dtypes(column_dtypes(input_values), input_ops), column_dtype_infos(input_values));\n return std::make_pair(\n cudf::empty_like(input_keys),\n std::vector{output_values.begin(), output_values.end()}\n );\n }\n gdf_size_type num_groups = groupby.num_groups();\n \/\/ An \"aggregation request\" is the combination of a `gdf_column*` to a column\n \/\/ of values, and an aggregation operation enum indicating the aggregation\n \/\/ requested to be performed on the column\n std::vector original_requests(input_values.num_columns());\n std::transform(input_values.begin(), input_values.end(), input_ops.begin(),\n original_requests.begin(),\n [](gdf_column const* col, operators op) {\n return std::make_pair(const_cast(col), op);\n });\n\n \/\/ Some aggregations are \"compound\", meaning they need be satisfied via the\n \/\/ composition of 1 or more \"simple\" aggregation requests. For example, MEAN\n \/\/ is satisfied via the division of the SUM by the COUNT aggregation. We\n \/\/ translate these compound requests into simple requests, and compute the\n \/\/ groupby operation for these simple requests. Later, we translate the simple\n \/\/ requests back to compound request results.\n std::vector simple_requests =\n compound_to_simple(original_requests);\n\n std::vector simple_values_columns;\n std::vector simple_operators;\n for (auto const& p : simple_requests) {\n const AggRequestType& agg_req_type = p.first;\n simple_values_columns.push_back(\n const_cast(agg_req_type.first));\n simple_operators.push_back(agg_req_type.second);\n }\n\n \/\/ If there are \"simple\" aggregation requests, compute the aggregations \n cudf::table current_output_values{};\n if (simple_values_columns.size() > 0) {\n \/\/ Step 1: Aggregate all rows for simple requests \n cudf::table simple_output_values = compute_simple_aggregations(input_keys,\n options,\n groupby,\n simple_values_columns,\n simple_operators,\n stream);\n \/\/ Step 2: If any of the original requests were compound, compute them from the\n \/\/ results of simple aggregation requests\n current_output_values = compute_original_requests(original_requests, simple_requests, simple_output_values, stream);\n }\n \/\/ If there are \"ordered\" aggregation requests like MEDIAN, QUANTILE, compute these aggregations \n std::vector final_output_values = compute_ordered_aggregations(groupby, original_requests, input_ops_args, current_output_values, stream);\n\n \/\/ Update size and null count of output columns\n std::transform(final_output_values.begin(), final_output_values.end(), final_output_values.begin(),\n [num_groups](gdf_column *col) {\n CUDF_EXPECTS(col != nullptr, \"Attempt to update Null column.\");\n set_null_count(*col);\n return col;\n });\n return std::make_pair(groupby.unique_keys(), final_output_values);\n}\n\n\/**---------------------------------------------------------------------------*\n * @brief Returns appropriate callable instantiation of `compute_sort_groupby`\n * based on presence of null values in keys and values.\n *\n * @param keys The groupby key columns\n * @param values The groupby value columns\n * @return Instantiated callable of compute_sort_groupby\n *---------------------------------------------------------------------------**\/\nauto groupby_null_specialization(table const& keys, table const& values) {\n if (cudf::has_nulls(keys)) {\n if (cudf::has_nulls(values)) {\n return compute_sort_groupby;\n } else {\n return compute_sort_groupby;\n }\n } else {\n if (cudf::has_nulls(values)) {\n return compute_sort_groupby;\n } else {\n return compute_sort_groupby;\n }\n }\n}\n} \/\/ anonymous namespace\n\nnamespace detail { \n \n\n\/**---------------------------------------------------------------------------*\n * @brief Verifies the requested aggregation is valid for the arguments of the \n * operator.\n *\n * @throw cudf::logic_error if an invalid combination of argument and operator\n * is requested.\n *\n * @param ops The aggregation operators\n * @param ops The aggregation arguments\n *---------------------------------------------------------------------------**\/\nstatic void verify_operators_with_arguments(std::vector const& ops, std::vector const& args) {\n CUDF_EXPECTS(ops.size() == args.size(),\n \"Size mismatch between ops and args\");\n for (size_t i = 0; i < ops.size(); i++) {\n if(ops[i] == QUANTILE) { \n quantile_args* q_args = static_cast(args[i]); \n if (q_args == nullptr or q_args->quantiles.size() == 0) {\n CUDF_FAIL(\n \"Missing quantile aggregation arguments.\");\n }\n } \n }\n}\n\nstd::pair> groupby(cudf::table const& keys,\n cudf::table const& values,\n std::vector const& ops,\n Options options,\n cudaStream_t stream = 0) {\n CUDF_EXPECTS(keys.num_rows() == values.num_rows(),\n \"Size mismatch between number of rows in keys and values.\");\n std::vector optype_list(ops.size());\n std::transform(ops.begin(), ops.end(), optype_list.begin(), [](auto const& op) {\n return op.op_name;\n });\n std::vector ops_args(ops.size());\n std::transform(ops.begin(), ops.end(), ops_args.begin(), [](auto const& op) {\n return op.args.get();\n });\n verify_operators(values, optype_list);\n verify_operators_with_arguments(optype_list, ops_args);\n\n \/\/ Empty inputs\n if (keys.num_rows() == 0) {\n cudf::table output_values(0, target_dtypes(column_dtypes(values), optype_list), column_dtype_infos(values));\n return std::make_pair(\n cudf::empty_like(keys),\n std::vector{output_values.begin(), output_values.end()}\n );\n }\n\n auto compute_groupby = groupby_null_specialization(keys, values);\n\n cudf::table output_keys;\n std::vector output_values;\n std::tie(output_keys, output_values) =\n compute_groupby(keys, values, optype_list, ops_args, options, stream);\n \n cudf::table table_output_values(output_values);\n \n update_nvcategories(keys, output_keys, values, table_output_values);\n return std::make_pair(output_keys, output_values); \n}\n\n} \/\/ namespace detail\n \n\nstd::pair> groupby(cudf::table const &keys,\n cudf::table const &values,\n std::vector const &ops,\n Options options) {\n return detail::groupby(keys, values, ops, options);\n}\n\n\n} \/\/ END: namespace sort\n} \/\/ END: namespace groupby\n} \/\/ END: namespace cudf\n","avg_line_length":43.0109589041,"max_line_length":153,"alphanum_fraction":0.6555194598} {"size":2366,"ext":"cu","lang":"Cuda","max_stars_count":2455.0,"content":"#include \n#include \n#include \n#include \n#include \n#include \n\n\/\/ This example demonstrates how to build a minimal custom\n\/\/ Thrust backend by intercepting for_each's dispatch.\n\n\/\/ We begin by defining a \"system\", which distinguishes our novel\n\/\/ backend from other Thrust backends.\n\/\/ We'll derive my_system from thrust::device_execution_policy to inherit\n\/\/ the functionality of the default device backend.\n\/\/ Note that we pass the name of our system as a template parameter\n\/\/ to thrust::device_execution_policy.\nstruct my_system : thrust::device_execution_policy {};\n\n\/\/ Next, we'll create a novel version of for_each which only\n\/\/ applies to algorithm invocations executed with my_system.\n\/\/ Our version of for_each will print a message and then call\n\/\/ the regular device version of for_each.\n\n\/\/ The first parameter to our version for_each is my_system. This allows\n\/\/ Thrust to locate it when dispatching thrust::for_each.\n\/\/ The following parameters are as normal.\ntemplate\n Iterator for_each(my_system, \n Iterator first, Iterator last,\n Function f)\n{\n \/\/ output a message\n std::cout << \"Hello, world from for_each(my_system)!\" << std::endl;\n\n \/\/ to call the normal device version of for_each, pass thrust::device as the first parameter.\n return thrust::for_each(thrust::device, first, last, f);\n}\n\nint main()\n{\n thrust::device_vector vec(1);\n\n \/\/ create an instance of our system\n my_system sys;\n\n \/\/ To invoke our version of for_each, pass sys as the first parameter\n thrust::for_each(sys, vec.begin(), vec.end(), thrust::identity());\n\n \/\/ Other algorithms that Thrust implements with thrust::for_each will also\n \/\/ cause our version of for_each to be invoked when we pass an instance of my_system as the first parameter.\n \/\/ Even though we did not define a special version of transform, Thrust dispatches the version it knows\n \/\/ for thrust::device_execution_policy, which my_system inherits.\n thrust::transform(sys, vec.begin(), vec.end(), vec.begin(), thrust::identity());\n\n \/\/ Invocations without my_system are handled normally.\n thrust::for_each(vec.begin(), vec.end(), thrust::identity());\n\n return 0;\n}\n\n","avg_line_length":38.7868852459,"max_line_length":110,"alphanum_fraction":0.7387996619} {"size":8434,"ext":"cu","lang":"Cuda","max_stars_count":6989.0,"content":"#include \n#include \n#include \n\nnamespace NKernel {\n\n template \n __global__ void ComputeWeightedQuantileWithBinarySearchImpl(const float* targets,\n const float* weightsPrefixSum,\n ui32 objectsCount,\n const float* needWeights,\n const ui32* beginOffsets,\n const ui32* endOffsets,\n float* point,\n float alpha,\n ui32 binarySearchIterations) {\n const ui32 i = blockIdx.x * BLOCK_SIZE + threadIdx.x;\n if (i >= objectsCount) {\n return;\n }\n\n ui32 left = beginOffsets[i];\n ui32 right = endOffsets[i] == 0 ? 0 : endOffsets[i] - 1;\n\n if (left > right) {\n point[i] = 0;\n return;\n }\n\n const float eps = std::numeric_limits::epsilon();\n for (ui32 index = 0; index < binarySearchIterations; ++index) {\n ui32 middle = left + (right - left) \/ 2;\n\n if (weightsPrefixSum[middle] < needWeights[i] - eps) {\n left = middle;\n } else {\n right = middle;\n }\n }\n\n point[i] = targets[right];\n return;\n }\n\n template \n __global__ void ComputeNeedWeightsImpl(const float* targets,\n const float* weights,\n const ui32* beginOffsets,\n const ui32* endOffsets,\n float* needWeights,\n float alpha,\n ui32 elementsPerThreads) {\n const ui32 begin = beginOffsets[blockIdx.x] + threadIdx.x;\n const ui32 end = endOffsets[blockIdx.x];\n\n __shared__ float localBuffer[BLOCK_SIZE];\n localBuffer[threadIdx.x] = 0;\n\n if (begin >= end) {\n return;\n }\n\n float totalSum = 0;\n for (ui32 idx = begin; idx < end; idx += BLOCK_SIZE) {\n totalSum += weights[idx];\n }\n\n localBuffer[threadIdx.x] = totalSum;\n __syncthreads();\n\n float blocksSum = FastInBlockReduce(threadIdx.x, localBuffer, BLOCK_SIZE);\n if (threadIdx.x == 0) {\n needWeights[blockIdx.x] = blocksSum * alpha;\n }\n }\n\n __global__ void ComputeWeightsWithTargetsImpl(const float* targets,\n const float* weights,\n float* weightsWithTargets,\n ui32 objectsCount) {\n int i = blockIdx.x * blockDim.x + threadIdx.x;\n\n if (i >= objectsCount) {\n return;\n }\n\n const float delta = max(1.0f, abs(targets[i]));;\n weightsWithTargets[i] = weights[i] \/ delta;\n }\n\n __global__ void MakeEndOfBinsFlagsImpl(const ui32* beginOffsets,\n const ui32* endOffsets,\n ui32* flags) {\n const ui32 begin = beginOffsets[blockIdx.x];\n const ui32 end = endOffsets[blockIdx.x];\n\n if (begin == end) {\n return;\n }\n\n flags[begin] = 1;\n }\n\n void ComputeNeedWeights(const float* targets,\n const float* weights,\n ui32 objectsCount,\n ui32 binCount,\n const ui32* beginOffsets,\n const ui32* endOffsets,\n float* needWeights,\n float alpha,\n TCudaStream stream) {\n const ui32 blockSize = 1024;\n const ui32 blocksNum = binCount;\n const ui32 elementsPerThreads = CeilDivide(objectsCount, blockSize * blocksNum);\n\n ComputeNeedWeightsImpl << < blocksNum, blockSize, 0, stream >> > (targets,\n weights,\n beginOffsets,\n endOffsets,\n needWeights,\n alpha,\n elementsPerThreads);\n }\n\n void ComputeWeightsWithTargets(const float* targets,\n const float* weights,\n float* weightsWithTargets,\n ui32 objectsCount,\n TCudaStream stream) {\n const ui32 blockSize = 512;\n const ui32 blocksNum = CeilDivide(objectsCount, blockSize);\n\n ComputeWeightsWithTargetsImpl << < blocksNum, blockSize, 0, stream >> > (targets,\n weights,\n weightsWithTargets,\n objectsCount);\n }\n\n void ComputeWeightedQuantileWithBinarySearch(const float* targets,\n const float* weightsPrefixSum,\n ui32 objectsCount,\n const float* needWeights,\n const ui32* beginOffsets,\n const ui32* endOffsets,\n ui32 binCount,\n float* point,\n float alpha,\n ui32 binarySearchIterations,\n TCudaStream stream) {\n const ui32 blockSize = 256;\n const ui32 blocksNum = CeilDivide(binCount, blockSize);\n\n ComputeWeightedQuantileWithBinarySearchImpl << < blocksNum, blockSize, 0, stream >> > (targets,\n weightsPrefixSum,\n objectsCount,\n needWeights,\n beginOffsets,\n endOffsets,\n point,\n alpha,\n binarySearchIterations);\n }\n\n void MakeEndOfBinsFlags(const ui32* beginOffsets,\n const ui32* endOffsets,\n ui32 binCount,\n ui32* flags,\n TCudaStream stream) {\n const ui32 blockSize = 128;\n const ui32 blocksNum = binCount;\n\n MakeEndOfBinsFlagsImpl << < blocksNum, blockSize, 0, stream >> > (beginOffsets,\n endOffsets,\n flags);\n }\n}\n","avg_line_length":47.3820224719,"max_line_length":130,"alphanum_fraction":0.3681527152} {"size":1078,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \"cudaMemory.hpp\"\n\n#include \n#include \"checkCudaErrors.hpp\"\n\n\nvoid allocCudaMem(\n void** d_DataPtrPtr,\n const unsigned int size\n) {\n checkCudaErrors(cudaMalloc(d_DataPtrPtr, size));\n checkCudaErrors(cudaMemset(*d_DataPtrPtr, 0, size));\n}\n\nvoid gpuMemFree(\n void** d_DataPtrPtr\n) {\n checkCudaErrors(cudaFree(*d_DataPtrPtr));\n *d_DataPtrPtr = nullptr;\n}\n\nvoid memsetZero(\n void* d_DataPtr,\n const unsigned int size\n) {\n checkCudaErrors(cudaMemset(d_DataPtr, 0, size));\n}\n\nvoid memcpyCPUtoGPU(\n void* h_DataPtr,\n void* d_DataPtr,\n const unsigned int size\n) {\n checkCudaErrors(cudaMemcpy(d_DataPtr, h_DataPtr, size, cudaMemcpyHostToDevice));\n}\n\nvoid memcpyGPUtoCPU(\n void* d_DataPtr,\n void* h_DataPtr,\n const unsigned int size\n) {\n checkCudaErrors(cudaMemcpy(h_DataPtr, d_DataPtr, size, cudaMemcpyDeviceToHost));\n}\n\nvoid memcpyGPUtoGPU(\n void* d_DataFromPtr,\n void* d_DataToPtr,\n const unsigned int size\n) {\n checkCudaErrors(cudaMemcpy(d_DataToPtr, d_DataFromPtr, size, cudaMemcpyDeviceToDevice));\n}\n","avg_line_length":20.7307692308,"max_line_length":92,"alphanum_fraction":0.7189239332} {"size":6288,"ext":"cu","lang":"Cuda","max_stars_count":1.0,"content":"\/*\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n#include \"oneflow\/core\/framework\/framework.h\"\n#include \"oneflow\/core\/device\/cuda_util.h\"\n#include \"oneflow\/core\/ep\/cuda\/cuda_stream.h\"\n#include \"oneflow\/core\/kernel\/new_kernel_util.h\"\n#include \"oneflow\/core\/ndarray\/binary_func.h\"\n\nnamespace oneflow {\n#ifdef WITH_CUDA\nnamespace {\n\n\/\/ total thread number: cs_up_space * cs_down_space\n\/\/ in cs_down_space part, use cs_down_space threads\n\/\/ to calculate as follows(m=cs_down_space-1, n=cs_space-1, '|' stands for dependency):\n\/\/ dm0, ..., d10, d00\n\/\/ | | |\n\/\/ dm1, ..., d11, d01\n\/\/ | | |\n\/\/ dm2, ..., d12, d02\n\/\/ | | |\n\/\/ ... ... ...\n\/\/ | | |\n\/\/ dmn, ..., d1n, d0n\ntemplate class BinaryFunc>\n__global__ void CumsumForwardGpu(const T* in_ptr, T* out_ptr, int64_t cs_up_space, int64_t cs_space,\n int64_t cs_down_space) {\n CUDA_1D_KERNEL_LOOP(i, cs_up_space * cs_down_space) {\n auto cs_up_space_id = i \/ cs_down_space;\n auto cs_down_space_id = i - (i \/ cs_down_space) * cs_down_space;\n\n auto* in_ptr_base = in_ptr + cs_up_space_id * cs_space * cs_down_space + cs_down_space_id;\n auto* out_ptr_base = out_ptr + cs_up_space_id * cs_space * cs_down_space + cs_down_space_id;\n\n \/\/ calculate cs_space data in one thread\n for (auto j = 0; j < cs_space; j++) {\n auto idx = j * cs_down_space;\n out_ptr_base[idx] = in_ptr_base[idx];\n if (j != 0) {\n out_ptr_base[idx] =\n BinaryFunc::Invoke(out_ptr_base[idx], out_ptr_base[idx - cs_down_space]);\n }\n }\n }\n}\ntemplate class BinaryFunc>\n__global__ void CumsumForwardGpuUpSpaceIs1(const T* in_ptr, T* out_ptr, int64_t cs_space,\n int64_t cs_down_space) {\n CUDA_1D_KERNEL_LOOP(i, cs_down_space) {\n auto* in_ptr_base = in_ptr + i;\n auto* out_ptr_base = out_ptr + i;\n\n \/\/ calculate cs_space data in one thread\n for (auto j = 0; j < cs_space; j++) {\n auto idx = j * cs_down_space;\n out_ptr_base[idx] = in_ptr_base[idx];\n if (j != 0) {\n out_ptr_base[idx] =\n BinaryFunc::Invoke(out_ptr_base[idx], out_ptr_base[idx - cs_down_space]);\n }\n }\n }\n}\ntemplate class BinaryFunc>\n__global__ void CumsumForwardGpuDownSpaceIs1(const T* in_ptr, T* out_ptr, int64_t cs_up_space,\n int64_t cs_space) {\n CUDA_1D_KERNEL_LOOP(i, cs_up_space) {\n auto* in_ptr_base = in_ptr + i * cs_space;\n auto* out_ptr_base = out_ptr + i * cs_space;\n\n \/\/ calculate cs_space data in one thread\n for (auto j = 0; j < cs_space; j++) {\n out_ptr_base[j] = in_ptr_base[j];\n if (j != 0) { out_ptr_base[j] = BinaryFunc::Invoke(out_ptr_base[j], out_ptr_base[j - 1]); }\n }\n }\n}\n} \/\/ namespace\n\ntemplate class BinaryFunc>\nclass GpuCumKernel : public user_op::OpKernel {\n public:\n GpuCumKernel() = default;\n ~GpuCumKernel() = default;\n\n private:\n using user_op::OpKernel::Compute;\n void Compute(user_op::KernelComputeContext* ctx) const override {\n \/\/ judge whether tensor has 0 size dimension first\n const auto* in = ctx->Tensor4ArgNameAndIndex(\"x\", 0);\n auto elem_cnt = in->shape().elem_cnt();\n if (!elem_cnt) { return; }\n\n auto* out = ctx->Tensor4ArgNameAndIndex(\"y\", 0);\n auto dim = ctx->Attr(\"dim\");\n const auto* in_ptr = in->dptr();\n auto* out_ptr = out->mut_dptr();\n\n \/\/ data partition: up_space|space|down_space\n auto up_space = elem_cnt \/ in->shape().Count(dim);\n auto space = in->shape().At(dim);\n auto down_space = in->shape().Count(dim + 1);\n auto thread_num = up_space * down_space;\n\n if (up_space == 1) {\n RUN_CUDA_KERNEL((CumsumForwardGpuUpSpaceIs1), ctx->stream(), thread_num,\n in_ptr, out_ptr, space, down_space);\n } else if (down_space == 1) {\n RUN_CUDA_KERNEL((CumsumForwardGpuDownSpaceIs1), ctx->stream(), thread_num,\n in_ptr, out_ptr, up_space, space);\n } else {\n RUN_CUDA_KERNEL((CumsumForwardGpu), ctx->stream(), thread_num, in_ptr, out_ptr,\n up_space, space, down_space);\n }\n }\n bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }\n};\n\ntemplate\nclass GpuCumSumKernel final : public GpuCumKernel {\n public:\n GpuCumSumKernel() = default;\n ~GpuCumSumKernel() = default;\n};\n\n#define REGISTER_CUDA_CUMSUM_KERNEL(dtype) \\\n REGISTER_USER_KERNEL(\"cumsum\").SetCreateFn>().SetIsMatchedHob( \\\n (user_op::HobDeviceType() == DeviceType::kCUDA) \\\n && (user_op::HobDataType(\"y\", 0) == GetDataType::value));\n\nREGISTER_CUDA_CUMSUM_KERNEL(int64_t)\nREGISTER_CUDA_CUMSUM_KERNEL(float)\nREGISTER_CUDA_CUMSUM_KERNEL(double)\n#undef REGISTER_CUDA_CUMSUM_KERNEL\n\ntemplate\nclass GpuCumProdKernel final : public GpuCumKernel {\n public:\n GpuCumProdKernel() = default;\n ~GpuCumProdKernel() = default;\n};\n\n#define REGISTER_CUDA_CUMPROD_KERNEL(dtype) \\\n REGISTER_USER_KERNEL(\"cumprod\").SetCreateFn>().SetIsMatchedHob( \\\n (user_op::HobDeviceType() == DeviceType::kCUDA) \\\n && (user_op::HobDataType(\"y\", 0) == GetDataType::value));\n\nREGISTER_CUDA_CUMPROD_KERNEL(int64_t)\nREGISTER_CUDA_CUMPROD_KERNEL(float)\nREGISTER_CUDA_CUMPROD_KERNEL(double)\n#undef REGISTER_CUDA_CUMPROD_KERNEL\n#endif\n} \/\/ namespace oneflow\n","avg_line_length":37.6526946108,"max_line_length":100,"alphanum_fraction":0.6599872774} {"size":16253,"ext":"cu","lang":"Cuda","max_stars_count":567.0,"content":"\/*!\r\n * Copyright (c) 2017 Microsoft\r\n * Licensed under The MIT License [see LICENSE for details]\r\n * \\file deformable_psroi_pooling.cu\r\n * \\brief\r\n * \\author Yi Li, Guodong Zhang, Jifeng Dai\r\n*\/\r\n\/***************** Adapted by Charles Shang *********************\/\r\n\r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n\r\nusing namespace at;\r\n\r\n#define CUDA_KERNEL_LOOP(i, n) \\\r\n for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\r\n i < (n); \\\r\n i += blockDim.x * gridDim.x)\r\n\r\nconst int CUDA_NUM_THREADS = 1024;\r\ninline int GET_BLOCKS(const int N)\r\n{\r\n return (N + CUDA_NUM_THREADS - 1) \/ CUDA_NUM_THREADS;\r\n}\r\n\r\ntemplate \r\n__device__ scalar_t bilinear_interp(\r\n const scalar_t *data,\r\n const scalar_t x,\r\n const scalar_t y,\r\n const int width,\r\n const int height)\r\n{\r\n int x1 = floor(x);\r\n int x2 = ceil(x);\r\n int y1 = floor(y);\r\n int y2 = ceil(y);\r\n scalar_t dist_x = (scalar_t)(x - x1);\r\n scalar_t dist_y = (scalar_t)(y - y1);\r\n scalar_t value11 = data[y1 * width + x1];\r\n scalar_t value12 = data[y2 * width + x1];\r\n scalar_t value21 = data[y1 * width + x2];\r\n scalar_t value22 = data[y2 * width + x2];\r\n scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22;\r\n return value;\r\n}\r\n\r\ntemplate \r\n__global__ void DeformablePSROIPoolForwardKernel(\r\n const int count,\r\n const scalar_t *bottom_data,\r\n const scalar_t spatial_scale,\r\n const int channels,\r\n const int height, const int width,\r\n const int pooled_height, const int pooled_width,\r\n const scalar_t *bottom_rois, const scalar_t *bottom_trans,\r\n const int no_trans,\r\n const scalar_t trans_std,\r\n const int sample_per_part,\r\n const int output_dim,\r\n const int group_size,\r\n const int part_size,\r\n const int num_classes,\r\n const int channels_each_class,\r\n scalar_t *top_data,\r\n scalar_t *top_count)\r\n{\r\n CUDA_KERNEL_LOOP(index, count)\r\n {\r\n \/\/ The output is in order (n, ctop, ph, pw)\r\n int pw = index % pooled_width;\r\n int ph = (index \/ pooled_width) % pooled_height;\r\n int ctop = (index \/ pooled_width \/ pooled_height) % output_dim;\r\n int n = index \/ pooled_width \/ pooled_height \/ output_dim;\r\n\r\n \/\/ [start, end) interval for spatial sampling\r\n const scalar_t *offset_bottom_rois = bottom_rois + n * 5;\r\n int roi_batch_ind = offset_bottom_rois[0];\r\n scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5;\r\n scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5;\r\n scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;\r\n scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;\r\n\r\n \/\/ Force too small ROIs to be 1x1\r\n scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); \/\/avoid 0\r\n scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1);\r\n\r\n \/\/ Compute w and h at bottom\r\n scalar_t bin_size_h = roi_height \/ (scalar_t)(pooled_height);\r\n scalar_t bin_size_w = roi_width \/ (scalar_t)(pooled_width);\r\n\r\n scalar_t sub_bin_size_h = bin_size_h \/ (scalar_t)(sample_per_part);\r\n scalar_t sub_bin_size_w = bin_size_w \/ (scalar_t)(sample_per_part);\r\n\r\n int part_h = floor((scalar_t)(ph) \/ pooled_height * part_size);\r\n int part_w = floor((scalar_t)(pw) \/ pooled_width * part_size);\r\n int class_id = ctop \/ channels_each_class;\r\n scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;\r\n scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;\r\n\r\n scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w;\r\n wstart += trans_x * roi_width;\r\n scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h;\r\n hstart += trans_y * roi_height;\r\n\r\n scalar_t sum = 0;\r\n int count = 0;\r\n int gw = floor((scalar_t)(pw)*group_size \/ pooled_width);\r\n int gh = floor((scalar_t)(ph)*group_size \/ pooled_height);\r\n gw = min(max(gw, 0), group_size - 1);\r\n gh = min(max(gh, 0), group_size - 1);\r\n\r\n const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;\r\n for (int ih = 0; ih < sample_per_part; ih++)\r\n {\r\n for (int iw = 0; iw < sample_per_part; iw++)\r\n {\r\n scalar_t w = wstart + iw * sub_bin_size_w;\r\n scalar_t h = hstart + ih * sub_bin_size_h;\r\n \/\/ bilinear interpolation\r\n if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)\r\n {\r\n continue;\r\n }\r\n w = min(max(w, 0.), width - 1.);\r\n h = min(max(h, 0.), height - 1.);\r\n int c = (ctop * group_size + gh) * group_size + gw;\r\n scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height);\r\n sum += val;\r\n count++;\r\n }\r\n }\r\n top_data[index] = count == 0 ? (scalar_t)(0) : sum \/ count;\r\n top_count[index] = count;\r\n }\r\n}\r\n\r\ntemplate \r\n__global__ void DeformablePSROIPoolBackwardAccKernel(\r\n const int count,\r\n const scalar_t *top_diff,\r\n const scalar_t *top_count,\r\n const int num_rois,\r\n const scalar_t spatial_scale,\r\n const int channels,\r\n const int height, const int width,\r\n const int pooled_height, const int pooled_width,\r\n const int output_dim,\r\n scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff,\r\n const scalar_t *bottom_data,\r\n const scalar_t *bottom_rois,\r\n const scalar_t *bottom_trans,\r\n const int no_trans,\r\n const scalar_t trans_std,\r\n const int sample_per_part,\r\n const int group_size,\r\n const int part_size,\r\n const int num_classes,\r\n const int channels_each_class)\r\n{\r\n CUDA_KERNEL_LOOP(index, count)\r\n {\r\n \/\/ The output is in order (n, ctop, ph, pw)\r\n int pw = index % pooled_width;\r\n int ph = (index \/ pooled_width) % pooled_height;\r\n int ctop = (index \/ pooled_width \/ pooled_height) % output_dim;\r\n int n = index \/ pooled_width \/ pooled_height \/ output_dim;\r\n\r\n \/\/ [start, end) interval for spatial sampling\r\n const scalar_t *offset_bottom_rois = bottom_rois + n * 5;\r\n int roi_batch_ind = offset_bottom_rois[0];\r\n scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5;\r\n scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5;\r\n scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;\r\n scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;\r\n\r\n \/\/ Force too small ROIs to be 1x1\r\n scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); \/\/avoid 0\r\n scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1);\r\n\r\n \/\/ Compute w and h at bottom\r\n scalar_t bin_size_h = roi_height \/ (scalar_t)(pooled_height);\r\n scalar_t bin_size_w = roi_width \/ (scalar_t)(pooled_width);\r\n\r\n scalar_t sub_bin_size_h = bin_size_h \/ (scalar_t)(sample_per_part);\r\n scalar_t sub_bin_size_w = bin_size_w \/ (scalar_t)(sample_per_part);\r\n\r\n int part_h = floor((scalar_t)(ph) \/ pooled_height * part_size);\r\n int part_w = floor((scalar_t)(pw) \/ pooled_width * part_size);\r\n int class_id = ctop \/ channels_each_class;\r\n scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;\r\n scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;\r\n\r\n scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w;\r\n wstart += trans_x * roi_width;\r\n scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h;\r\n hstart += trans_y * roi_height;\r\n\r\n if (top_count[index] <= 0)\r\n {\r\n continue;\r\n }\r\n scalar_t diff_val = top_diff[index] \/ top_count[index];\r\n const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;\r\n scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;\r\n int gw = floor((scalar_t)(pw)*group_size \/ pooled_width);\r\n int gh = floor((scalar_t)(ph)*group_size \/ pooled_height);\r\n gw = min(max(gw, 0), group_size - 1);\r\n gh = min(max(gh, 0), group_size - 1);\r\n\r\n for (int ih = 0; ih < sample_per_part; ih++)\r\n {\r\n for (int iw = 0; iw < sample_per_part; iw++)\r\n {\r\n scalar_t w = wstart + iw * sub_bin_size_w;\r\n scalar_t h = hstart + ih * sub_bin_size_h;\r\n \/\/ bilinear interpolation\r\n if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)\r\n {\r\n continue;\r\n }\r\n w = min(max(w, 0.), width - 1.);\r\n h = min(max(h, 0.), height - 1.);\r\n int c = (ctop * group_size + gh) * group_size + gw;\r\n \/\/ backward on feature\r\n int x0 = floor(w);\r\n int x1 = ceil(w);\r\n int y0 = floor(h);\r\n int y1 = ceil(h);\r\n scalar_t dist_x = w - x0, dist_y = h - y0;\r\n scalar_t q00 = (1 - dist_x) * (1 - dist_y);\r\n scalar_t q01 = (1 - dist_x) * dist_y;\r\n scalar_t q10 = dist_x * (1 - dist_y);\r\n scalar_t q11 = dist_x * dist_y;\r\n int bottom_index_base = c * height * width;\r\n atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);\r\n atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);\r\n atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);\r\n atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);\r\n\r\n if (no_trans)\r\n {\r\n continue;\r\n }\r\n scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];\r\n scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];\r\n scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];\r\n scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];\r\n scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;\r\n diff_x *= roi_width;\r\n scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;\r\n diff_y *= roi_height;\r\n\r\n atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x);\r\n atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y);\r\n }\r\n }\r\n }\r\n}\r\n\r\nvoid DeformablePSROIPoolForward(const at::Tensor data,\r\n const at::Tensor bbox,\r\n const at::Tensor trans,\r\n at::Tensor out,\r\n at::Tensor top_count,\r\n const int batch,\r\n const int channels,\r\n const int height,\r\n const int width,\r\n const int num_bbox,\r\n const int channels_trans,\r\n const int no_trans,\r\n const float spatial_scale,\r\n const int output_dim,\r\n const int group_size,\r\n const int pooled_size,\r\n const int part_size,\r\n const int sample_per_part,\r\n const float trans_std)\r\n{\r\n const int pooled_height = pooled_size;\r\n const int pooled_width = pooled_size;\r\n const int count = num_bbox * output_dim * pooled_height * pooled_width;\r\n const int num_classes = no_trans ? 1 : channels_trans \/ 2;\r\n const int channels_each_class = no_trans ? output_dim : output_dim \/ num_classes;\r\n\r\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(\r\n data.scalar_type(), \"deformable_psroi_pool_forward\", ([&] {\r\n const scalar_t *bottom_data = data.data();\r\n const scalar_t *bottom_rois = bbox.data();\r\n const scalar_t *bottom_trans = no_trans ? NULL : trans.data();\r\n scalar_t *top_data = out.data();\r\n scalar_t *top_count_data = top_count.data();\r\n\r\n DeformablePSROIPoolForwardKernel<<>>(\r\n count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width,\r\n bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim,\r\n group_size, part_size, num_classes, channels_each_class, top_data, top_count_data);\r\n }));\r\n\r\n cudaError_t err = cudaGetLastError();\r\n if (err != cudaSuccess)\r\n {\r\n printf(\"error in DeformablePSROIPoolForward: %s\\n\", cudaGetErrorString(err));\r\n }\r\n}\r\n\r\nvoid DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad,\r\n const at::Tensor data,\r\n const at::Tensor bbox,\r\n const at::Tensor trans,\r\n const at::Tensor top_count,\r\n at::Tensor in_grad,\r\n at::Tensor trans_grad,\r\n const int batch,\r\n const int channels,\r\n const int height,\r\n const int width,\r\n const int num_bbox,\r\n const int channels_trans,\r\n const int no_trans,\r\n const float spatial_scale,\r\n const int output_dim,\r\n const int group_size,\r\n const int pooled_size,\r\n const int part_size,\r\n const int sample_per_part,\r\n const float trans_std)\r\n{\r\n \/\/ LOG(INFO) << \"DeformablePSROIPoolBackward\";\r\n const int num_rois = num_bbox;\r\n const int pooled_height = pooled_size;\r\n const int pooled_width = pooled_size;\r\n const int count = num_bbox * output_dim * pooled_height * pooled_width;\r\n const int num_classes = no_trans ? 1 : channels_trans \/ 2;\r\n const int channels_each_class = no_trans ? output_dim : output_dim \/ num_classes;\r\n\r\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(\r\n out_grad.scalar_type(), \"deformable_psroi_pool_backward_acc\", ([&] {\r\n const scalar_t *top_diff = out_grad.data();\r\n const scalar_t *bottom_data = data.data();\r\n const scalar_t *bottom_rois = bbox.data();\r\n const scalar_t *bottom_trans = no_trans ? NULL : trans.data();\r\n scalar_t *bottom_data_diff = in_grad.data();\r\n scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data();\r\n const scalar_t *top_count_data = top_count.data();\r\n\r\n DeformablePSROIPoolBackwardAccKernel<<>>(\r\n count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width,\r\n pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff,\r\n bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part,\r\n group_size, part_size, num_classes, channels_each_class);\r\n }));\r\n\r\n cudaError_t err = cudaGetLastError();\r\n if (err != cudaSuccess)\r\n {\r\n printf(\"error in DeformablePSROIPoolForward: %s\\n\", cudaGetErrorString(err));\r\n }\r\n}","avg_line_length":44.7741046832,"max_line_length":170,"alphanum_fraction":0.5982895465} {"size":673,"ext":"cuh","lang":"Cuda","max_stars_count":3.0,"content":"\/\/\n\/\/ kernel_params.cuh\n\/\/ Parameters for module arithmetic kernels.\n\/\/\n\/\/ Copyright (c) 2021 Tatsuki Ono\n\/\/\n\/\/ This software is released under the MIT License.\n\/\/ https:\/\/opensource.org\/licenses\/mit-license.php\n\/\/\n\n#ifndef ATPQC_CUDA_LIB_KYBER_ARITHMETIC_MT_KERNEL_PARAMS_CUH_\n#define ATPQC_CUDA_LIB_KYBER_ARITHMETIC_MT_KERNEL_PARAMS_CUH_\n\n#include \n\n#include \"..\/params.cuh\"\n\nnamespace atpqc_cuda::kyber::arithmetic_mt::kernel_params {\n\ntemplate \nstruct mattimes {\n static constexpr unsigned k = K;\n static constexpr unsigned smem_byte_per_coeff = k * k * sizeof(short2);\n};\n\n} \/\/ namespace atpqc_cuda::kyber::arithmetic_mt::kernel_params\n\n#endif\n","avg_line_length":23.2068965517,"max_line_length":73,"alphanum_fraction":0.7726597325} {"size":9631,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*!\n * Copyright 2019-2020 by Contributors\n * \\file survival_metric.cu\n * \\brief Metrics for survival analysis\n * \\author Avinash Barnwal, Hyunsu Cho and Toby Hocking\n *\/\n\n#include \n#include \n\n#include \n#include \n\n#include \"xgboost\/json.h\"\n#include \"xgboost\/metric.h\"\n#include \"xgboost\/host_device_vector.h\"\n\n#include \"metric_common.h\"\n#include \"..\/common\/math.h\"\n#include \"..\/common\/survival_util.h\"\n#include \"..\/common\/threading_utils.h\"\n\n#if defined(XGBOOST_USE_CUDA)\n#include \/\/ thrust::cuda::par\n#include \"..\/common\/device_helpers.cuh\"\n#endif \/\/ XGBOOST_USE_CUDA\n\nusing AFTParam = xgboost::common::AFTParam;\nusing ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;\ntemplate \nusing AFTLoss = xgboost::common::AFTLoss;\n\nnamespace xgboost {\nnamespace metric {\n\/\/ tag the this file, used by force static link later.\nDMLC_REGISTRY_FILE_TAG(survival_metric);\n\ntemplate \nclass ElementWiseSurvivalMetricsReduction {\n public:\n ElementWiseSurvivalMetricsReduction() = default;\n void Configure(EvalRow policy) {\n policy_ = policy;\n }\n\n PackedReduceResult\n CpuReduceMetrics(const HostDeviceVector &weights,\n const HostDeviceVector &labels_lower_bound,\n const HostDeviceVector &labels_upper_bound,\n const HostDeviceVector &preds,\n int32_t n_threads) const {\n size_t ndata = labels_lower_bound.Size();\n CHECK_EQ(ndata, labels_upper_bound.Size());\n\n const auto& h_labels_lower_bound = labels_lower_bound.HostVector();\n const auto& h_labels_upper_bound = labels_upper_bound.HostVector();\n const auto& h_weights = weights.HostVector();\n const auto& h_preds = preds.HostVector();\n\n std::vector score_tloc(n_threads, 0.0);\n std::vector weight_tloc(n_threads, 0.0);\n\n common::ParallelFor(ndata, n_threads, [&](size_t i) {\n const double wt =\n h_weights.empty() ? 1.0 : static_cast(h_weights[i]);\n auto t_idx = omp_get_thread_num();\n score_tloc[t_idx] +=\n policy_.EvalRow(static_cast(h_labels_lower_bound[i]),\n static_cast(h_labels_upper_bound[i]),\n static_cast(h_preds[i])) *\n wt;\n weight_tloc[t_idx] += wt;\n });\n\n double residue_sum = std::accumulate(score_tloc.cbegin(), score_tloc.cend(), 0.0);\n double weights_sum = std::accumulate(weight_tloc.cbegin(), weight_tloc.cend(), 0.0);\n\n PackedReduceResult res{residue_sum, weights_sum};\n return res;\n }\n\n#if defined(XGBOOST_USE_CUDA)\n\n PackedReduceResult DeviceReduceMetrics(\n const HostDeviceVector& weights,\n const HostDeviceVector& labels_lower_bound,\n const HostDeviceVector& labels_upper_bound,\n const HostDeviceVector& preds) {\n size_t ndata = labels_lower_bound.Size();\n CHECK_EQ(ndata, labels_upper_bound.Size());\n\n thrust::counting_iterator begin(0);\n thrust::counting_iterator end = begin + ndata;\n\n auto s_label_lower_bound = labels_lower_bound.DeviceSpan();\n auto s_label_upper_bound = labels_upper_bound.DeviceSpan();\n auto s_preds = preds.DeviceSpan();\n auto s_weights = weights.DeviceSpan();\n\n const bool is_null_weight = (weights.Size() == 0);\n\n auto d_policy = policy_;\n\n dh::XGBCachingDeviceAllocator alloc;\n PackedReduceResult result = thrust::transform_reduce(\n thrust::cuda::par(alloc),\n begin, end,\n [=] XGBOOST_DEVICE(size_t idx) {\n double weight = is_null_weight ? 1.0 : static_cast(s_weights[idx]);\n double residue = d_policy.EvalRow(\n static_cast(s_label_lower_bound[idx]),\n static_cast(s_label_upper_bound[idx]),\n static_cast(s_preds[idx]));\n residue *= weight;\n return PackedReduceResult{residue, weight};\n },\n PackedReduceResult(),\n thrust::plus());\n\n return result;\n }\n\n#endif \/\/ XGBOOST_USE_CUDA\n\n PackedReduceResult Reduce(\n const GenericParameter &ctx,\n const HostDeviceVector& weights,\n const HostDeviceVector& labels_lower_bound,\n const HostDeviceVector& labels_upper_bound,\n const HostDeviceVector& preds) {\n PackedReduceResult result;\n\n if (ctx.gpu_id < 0) {\n result = CpuReduceMetrics(weights, labels_lower_bound, labels_upper_bound,\n preds, ctx.Threads());\n }\n#if defined(XGBOOST_USE_CUDA)\n else { \/\/ NOLINT\n preds.SetDevice(ctx.gpu_id);\n labels_lower_bound.SetDevice(ctx.gpu_id);\n labels_upper_bound.SetDevice(ctx.gpu_id);\n weights.SetDevice(ctx.gpu_id);\n\n dh::safe_cuda(cudaSetDevice(ctx.gpu_id));\n result = DeviceReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);\n }\n#endif \/\/ defined(XGBOOST_USE_CUDA)\n return result;\n }\n\n private:\n EvalRow policy_;\n};\n\nstruct EvalIntervalRegressionAccuracy {\n void Configure(const Args& args) {}\n\n const char* Name() const {\n return \"interval-regression-accuracy\";\n }\n\n XGBOOST_DEVICE double EvalRow(\n double label_lower_bound, double label_upper_bound, double log_pred) const {\n const double pred = exp(log_pred);\n return (pred >= label_lower_bound && pred <= label_upper_bound) ? 1.0 : 0.0;\n }\n\n static double GetFinal(double esum, double wsum) {\n return wsum == 0 ? esum : esum \/ wsum;\n }\n};\n\n\/*! \\brief Negative log likelihood of Accelerated Failure Time model *\/\ntemplate \nstruct EvalAFTNLogLik {\n void Configure(const Args& args) {\n param_.UpdateAllowUnknown(args);\n }\n\n const char* Name() const {\n return \"aft-nloglik\";\n }\n\n XGBOOST_DEVICE double EvalRow(\n double label_lower_bound, double label_upper_bound, double pred) const {\n return AFTLoss::Loss(\n label_lower_bound, label_upper_bound, pred, param_.aft_loss_distribution_scale);\n }\n\n static double GetFinal(double esum, double wsum) {\n return wsum == 0 ? esum : esum \/ wsum;\n }\n private:\n AFTParam param_;\n};\n\ntemplate struct EvalEWiseSurvivalBase : public Metric {\n explicit EvalEWiseSurvivalBase(GenericParameter const *ctx) {\n tparam_ = ctx;\n }\n EvalEWiseSurvivalBase() = default;\n\n void Configure(const Args& args) override {\n policy_.Configure(args);\n reducer_.Configure(policy_);\n CHECK(tparam_);\n }\n\n bst_float Eval(const HostDeviceVector& preds,\n const MetaInfo& info,\n bool distributed) override {\n CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());\n CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());\n CHECK(tparam_);\n auto result =\n reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,\n info.labels_upper_bound_, preds);\n\n double dat[2] {result.Residue(), result.Weights()};\n\n if (distributed) {\n rabit::Allreduce(dat, 2);\n }\n return static_cast(Policy::GetFinal(dat[0], dat[1]));\n }\n\n const char* Name() const override {\n return policy_.Name();\n }\n\n private:\n Policy policy_;\n ElementWiseSurvivalMetricsReduction reducer_;\n int device_{-1}; \/\/ used only for GPU metric\n};\n\n\/\/ This class exists because we want to perform dispatch according to the distribution type at\n\/\/ configuration time, not at prediction time.\nstruct AFTNLogLikDispatcher : public Metric {\n const char* Name() const override {\n return \"aft-nloglik\";\n }\n\n bst_float Eval(const HostDeviceVector& preds,\n const MetaInfo& info,\n bool distributed) override {\n CHECK(metric_) << \"AFT metric must be configured first, with distribution type and scale\";\n return metric_->Eval(preds, info, distributed);\n }\n\n void Configure(const Args& args) override {\n param_.UpdateAllowUnknown(args);\n switch (param_.aft_loss_distribution) {\n case common::ProbabilityDistributionType::kNormal:\n metric_.reset(\n new EvalEWiseSurvivalBase>(\n tparam_));\n break;\n case common::ProbabilityDistributionType::kLogistic:\n metric_.reset(new EvalEWiseSurvivalBase<\n EvalAFTNLogLik>(tparam_));\n break;\n case common::ProbabilityDistributionType::kExtreme:\n metric_.reset(new EvalEWiseSurvivalBase<\n EvalAFTNLogLik>(tparam_));\n break;\n default:\n LOG(FATAL) << \"Unknown probability distribution\";\n }\n metric_->Configure(args);\n }\n\n void SaveConfig(Json* p_out) const override {\n auto& out = *p_out;\n out[\"name\"] = String(this->Name());\n out[\"aft_loss_param\"] = ToJson(param_);\n }\n\n void LoadConfig(const Json& in) override {\n FromJson(in[\"aft_loss_param\"], ¶m_);\n }\n\n private:\n AFTParam param_;\n std::unique_ptr metric_;\n};\n\n\nXGBOOST_REGISTER_METRIC(AFTNLogLik, \"aft-nloglik\")\n.describe(\"Negative log likelihood of Accelerated Failure Time model.\")\n.set_body([](const char* param) {\n return new AFTNLogLikDispatcher();\n});\n\nXGBOOST_REGISTER_METRIC(IntervalRegressionAccuracy, \"interval-regression-accuracy\")\n.describe(\"\")\n.set_body([](const char* param) {\n return new EvalEWiseSurvivalBase();\n});\n\n} \/\/ namespace metric\n} \/\/ namespace xgboost\n","avg_line_length":31.7854785479,"max_line_length":94,"alphanum_fraction":0.6953587374} {"size":7631,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*\n -- MAGMA (version 2.2.0) --\n Univ. of Tennessee, Knoxville\n Univ. of California, Berkeley\n Univ. of Colorado, Denver\n @date November 2016\n\n @generated from magmablas\/zhemm_batched_core.cu, normal z -> s, Sun Nov 20 20:20:31 2016\n\n @author Ahmad Abdelfattah\n \n*\/\n#include \"magma_internal.h\"\n#include \"batched_kernel_param.h\"\n\n#define PRECISION_s\n#include \"hemm_template_kernel_batched.cuh\"\n\n\/******************************************************************************\/\nextern \"C\" void \nmagmablas_ssymm_batched_core(\n magma_side_t side, magma_uplo_t uplo, \n magma_int_t m, magma_int_t n, \n float alpha, \n float **dA_array, magma_int_t ldda,\n float **dB_array, magma_int_t lddb, \n float beta, \n float **dC_array, magma_int_t lddc, \n magma_int_t roffA, magma_int_t coffA, magma_int_t roffB, magma_int_t coffB, magma_int_t roffC, magma_int_t coffC, \n magma_int_t batchCount, magma_queue_t queue )\n{\n if(side == MagmaLeft){\n hemm_template_batched(\n side, uplo, m, n, \n dA_array, ldda,\n dB_array, lddb, \n dC_array, lddc, alpha, beta, \n roffA, coffA, roffB, coffB, roffC, coffC, batchCount, queue);\n }else{\n hemm_template_batched(\n side, uplo, m, n, \n dA_array, ldda,\n dB_array, lddb, \n dC_array, lddc, alpha, beta, \n roffA, coffA, roffB, coffB, roffC, coffC, batchCount, queue);\n }\n}\n\n\/***************************************************************************\/\/**\n Purpose\n -------\n SSYMM performs one of the matrix-matrix operations\n\n C := alpha*A*B + beta*C,\n or\n C := alpha*B*A + beta*C,\n\n where alpha and beta are scalars, A is a symmetric matrix, and\n B and C are m by n matrices.\n\n Arguments\n ---------\n @param[in]\n side magma_side_t\n On entry, side specifies whether each symmetric matrix A\n appears on the left or right in the operation as follows:\n\n SIDE = MagmaLeft C := alpha*A*B + beta*C,\n SIDE = MagmaRight C := alpha*B*A + beta*C.\n\n\n @param[in]\n uplo magma_uplo_t\n On entry, uplo specifies whether the upper or lower\n triangular part of each symmetric matrix A is to be\n referenced as follows:\n\n uplo = MagmaUpper Only the upper triangular part of the\n symmetric matrix is to be referenced.\n uplo = MagmaLower Only the lower triangular part of the\n symmetric matrix is to be referenced.\n\n @param[in]\n m INTEGER\n On entry, m specifies the number of rows of each matrix C.\n m >= 0.\n\n @param[in]\n n INTEGER\n On entry, n specifies the number of columns of each matrix C.\n n >= 0.\n\n @param[in]\n alpha REAL\n On entry, alpha specifies the scalar alpha.\n\n @param[in]\n dA_array Array of pointers, dimension(batchCount).\n Each is a REAL array A of DIMENSION ( ldda, ka ), where ka is\n m when side = MagmaLower and is n otherwise.\n Before entry with side = MagmaLeft, the m by m part of\n the array A must contain the symmetric matrix, such that\n when uplo = MagmaUpper, the leading m by m upper triangular\n part of the array A must contain the upper triangular part\n of the symmetric matrix and the strictly lower triangular\n part of A is not referenced, and when uplo = MagmaLower,\n the leading m by m lower triangular part of the array A\n must contain the lower triangular part of the symmetric\n matrix and the strictly upper triangular part of A is not\n referenced.\n Before entry with side = MagmaRight, the n by n part of\n the array A must contain the symmetric matrix, such that\n when uplo = MagmaUpper, the leading n by n upper triangular\n part of the array A must contain the upper triangular part\n of the symmetric matrix and the strictly lower triangular\n part of A is not referenced, and when uplo = MagmaLower,\n the leading n by n lower triangular part of the array A\n must contain the lower triangular part of the symmetric\n matrix and the strictly upper triangular part of A is not\n referenced.\n Note that the imaginary parts of the diagonal elements need\n not be set, they are assumed to be zero.\n\n @param[in]\n ldda INTEGER\n On entry, ldda specifies the first dimension of each A as declared\n in the calling (sub) program.\n When side = MagmaLower then ldda >= max( 1, m ),\n otherwise ldda >= max( 1, n ).\n\n @param[in]\n dB_array Array of pointers, dimension(batchCount). \n Each is a REAL array B of DIMENSION ( lddb, n ).\n Before entry, the leading m by n part of the array B must\n contain the matrix B.\n\n @param[in]\n lddb INTEGER\n On entry, lddb specifies the first dimension of B as declared\n in the calling (sub) program. LDDB >= max( 1, m ).\n\n @param[in]\n beta REAL\n On entry, BETA specifies the scalar beta. When BETA is\n supplied as zero then C need not be set on input.\n\n @param[in,out]\n dC_array Array of pointers, dimension(batchCount). \n Each is a REAL array C of DIMENSION ( lddc, n ).\n Before entry, the leading m by n part of the array C must\n contain the matrix C, except when beta is zero, in which\n case C need not be set on entry.\n On exit, the array C is overwritten by the m by n updated\n matrix.\n\n @param[in]\n lddc INTEGER\n On entry, lddc specifies the first dimension of C as declared\n in the calling (sub) program. lddc >= max( 1, m ).\n\n @param[in]\n batchCount INTEGER\n The number of matrices to operate on.\n\n @param[in]\n queue magma_queue_t\n Queue to execute in.\n \n\n @ingroup magma_hemm_batched\n*******************************************************************************\/\nextern \"C\" void \nmagmablas_ssymm_batched(\n magma_side_t side, magma_uplo_t uplo, \n magma_int_t m, magma_int_t n, \n float alpha, \n float **dA_array, magma_int_t ldda,\n float **dB_array, magma_int_t lddb, \n float beta, \n float **dC_array, magma_int_t lddc, \n magma_int_t batchCount, magma_queue_t queue )\n{\n magma_int_t nrowa = (side == MagmaLeft ? m : n);\n magma_int_t info = 0;\n if ( side != MagmaLeft && side != MagmaRight ) {\n info = -1;\n } else if (uplo != MagmaLower && uplo != MagmaUpper ) {\n info = -2;\n } else if ( m < 0 ) {\n info = -3;\n } else if ( n < 0 ) {\n info = -4;\n } else if ( ldda < max(1,nrowa) ) {\n info = -7;\n } else if ( lddb < max(1,m) ) {\n info = -9;\n } else if (lddc < max(1,m)) {\n info = -12;\n }\n\n if (info != 0) {\n magma_xerbla( __func__, -(info) );\n return;\n }\n \n magmablas_ssymm_batched_core( \n side, uplo, \n m, n, \n alpha, dA_array, ldda, \n dB_array, lddb, \n beta, dC_array, lddc, \n 0, 0, 0, 0, 0, 0, \n batchCount, queue );\n}\n","avg_line_length":35.4930232558,"max_line_length":122,"alphanum_fraction":0.5632289346} {"size":1572,"ext":"cu","lang":"Cuda","max_stars_count":19.0,"content":"#include \n#include \n#include \n\n#include \n\n#include \"timer.hpp\"\n\nusing namespace std;\n\n__global__ void calcpi(int threads, long n, double *results) {\n int rank = threadIdx.x;\n results[rank] = 0.0;\n double step = 1.0\/n;\n double x = 0.0;\n\n long lower = rank * n\/threads;\n long upper = (rank + 1) * n\/threads;\n\n for (long i = lower; i < upper; i++) {\n x = (i + 0.5) * step;\n results[rank] += 4.0 \/ (1.0 + x*x);\n }\n}\n\nint main( int argc, char **argv ) {\n long num_steps = 1000000000;\n double result;\n int threads = 1000; \/\/ threads needs to dived num_steps!\n\n cout.precision(numeric_limits::digits10+2);\n \n if (argc > 1) {\n num_steps = atol(argv[1]);\n }\n if (argc > 2) {\n threads = atol(argv[2]);\n }\n\n double step, pi;\n Timer timer;\n \n cout << \"Calculating PI using:\" << endl <<\n \" \" << num_steps << \" slices\" << endl <<\n \" \" << threads << \" CUDA threads\" << endl;\n \n timer.start();\n \n double *sum, *d_sum;\n size_t size = threads*sizeof(double);\n step = 1.0 \/ num_steps;\n sum = (double*)malloc(size);\n\n cudaMalloc((void**)&d_sum, size);\n calcpi<<<1,threads>>>(threads, num_steps, d_sum);\n cudaMemcpy(sum, d_sum, size, cudaMemcpyDeviceToHost);\n cudaFree(d_sum);\n\n result = 0.0;\n\n for (int i=0; i\nvoid CalBatchNormGrad(T *x, T *dy, float *scale, float *save_mean, float *save_variance, T *dx, float *bn_scale,\n float *bn_bias, double epsilon, int N, int C, int H, int W, cudaStream_t cuda_stream);\n#endif \/\/ MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BATCHNORMGRAD_H_\n","avg_line_length":43.92,"max_line_length":112,"alphanum_fraction":0.7604735883} {"size":10811,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"\/*\n * Copyright (c) 2019, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace cudf::test;\n\nusing bool8 = cudf::experimental::bool8;\n\n\/\/ =============================================================================\n\/\/ ---- test data --------------------------------------------------------------\n\nnamespace {\nnamespace testdata {\n\n\/\/ ----- most numerics\n\ntemplate\nauto ascending() {\n return fixed_width_column_wrapper({ std::numeric_limits::lowest(),\n -100, -10, -1, 0, 1, 10, 100,\n std::numeric_limits::max() });\n}\n\ntemplate\nauto descending() {\n return fixed_width_column_wrapper({ std::numeric_limits::max(),\n 100, 10, 1, 0, -1, -10, -100,\n std::numeric_limits::lowest() });\n}\n\ntemplate\nauto empty() {\n return fixed_width_column_wrapper({ });\n}\n\ntemplate\nauto nulls_after() {\n return fixed_width_column_wrapper({ 0, 0 }, { 1, 0 });\n}\n\ntemplate\nauto nulls_before() {\n return fixed_width_column_wrapper({ 0, 0 }, { 0, 1 });\n}\n\n\/\/ ----- bool8\n\ntemplate<>\nauto ascending() {\n return fixed_width_column_wrapper({ false, false, true, true });\n}\n\ntemplate<>\nauto descending() {\n return fixed_width_column_wrapper({ true, true, false, false });\n}\n\n\/\/ ----- timestamp\n\ntemplate\nfixed_width_column_wrapper ascending_timestamp()\n{\n return fixed_width_column_wrapper({ T::min().time_since_epoch().count(),\n T::max().time_since_epoch().count() });\n}\n\ntemplate\nfixed_width_column_wrapper descending_timestamp()\n{\n return fixed_width_column_wrapper({ T::max().time_since_epoch().count(),\n T::min().time_since_epoch().count() });\n}\n\ntemplate<> auto ascending() { return ascending_timestamp(); }\ntemplate<> auto ascending() { return ascending_timestamp(); }\ntemplate<> auto ascending() { return ascending_timestamp(); }\ntemplate<> auto ascending() { return ascending_timestamp(); }\ntemplate<> auto ascending() { return ascending_timestamp(); }\n\ntemplate<> auto descending() { return descending_timestamp(); }\ntemplate<> auto descending() { return descending_timestamp(); }\ntemplate<> auto descending() { return descending_timestamp(); }\ntemplate<> auto descending() { return descending_timestamp(); }\ntemplate<> auto descending() { return descending_timestamp(); }\n\n\/\/ ----- string_view\n\ntemplate<>\nauto ascending() {\n return strings_column_wrapper({ \"A\", \"B\" });\n}\n\ntemplate<>\nauto descending() {\n return strings_column_wrapper({ \"B\", \"A\" });\n}\n\ntemplate<>\nauto empty() {\n return strings_column_wrapper({ });\n}\n\ntemplate<>\nauto nulls_after() {\n return strings_column_wrapper({ \"identical\", \"identical\" }, { 1, 0 });\n}\n\ntemplate<>\nauto nulls_before() {\n return strings_column_wrapper({ \"identical\", \"identical\" }, { 0, 1 });\n}\n\n} \/\/ namespace testdata\n} \/\/ anonymous namespace\n\n\/\/ =============================================================================\n\/\/ ---- tests ------------------------------------------------------------------\n\ntemplate \nstruct IsSortedTest : public BaseFixture {};\n\nTYPED_TEST_CASE(IsSortedTest, ComparableTypes);\n\nTYPED_TEST(IsSortedTest, NoColumns)\n{\n using T = TypeParam;\n\n cudf::table_view in{std::vector{ }};\n std::vector order{ };\n std::vector null_precedence{ };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(true, actual);\n}\n\nTYPED_TEST(IsSortedTest, NoRows)\n{\n using T = TypeParam;\n\n if (std::is_same::value)\n {\n \/\/ strings_column_wrapper does not yet support empty columns.\n return;\n }\n else\n {\n auto col1 = testdata::empty();\n auto col2 = testdata::empty();\n\n cudf::table_view in{{ col1, col2 }};\n std::vector order{ cudf::order::ASCENDING,\n cudf::order::DESCENDING };\n std::vector null_precedence{ };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(true, actual);\n }\n}\n\n\nTYPED_TEST(IsSortedTest, Ascending)\n{\n using T = TypeParam;\n\n auto col1 = testdata::ascending();\n cudf::table_view in{{ col1 }};\n std::vector order{ cudf::order::ASCENDING };\n std::vector null_precedence{ };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(true, actual);\n}\n\nTYPED_TEST(IsSortedTest, AscendingFalse)\n{\n using T = TypeParam;\n\n auto col1 = testdata::descending();\n cudf::table_view in{{ col1 }};\n std::vector order{ cudf::order::ASCENDING };\n std::vector null_precedence{ };\n\n auto actual = cudf::experimental::is_sorted(in, order, { });\n\n EXPECT_EQ(false, actual);\n}\n\nTYPED_TEST(IsSortedTest, Descending)\n{\n using T = TypeParam;\n\n auto col1 = testdata::descending();\n\n cudf::table_view in{{ col1 }};\n std::vector order{ cudf::order::DESCENDING };\n std::vector null_precedence{ };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(true, actual);\n}\n\nTYPED_TEST(IsSortedTest, DescendingFalse)\n{\n using T = TypeParam;\n\n auto col1 = testdata::ascending();\n\n cudf::table_view in{{ col1 }};\n std::vector order{ cudf::order::DESCENDING };\n std::vector null_precedence{ };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(false, actual);\n}\n\nTYPED_TEST(IsSortedTest, NullsAfter)\n{\n using T = TypeParam;\n\n auto col1 = testdata::nulls_after();\n\n cudf::table_view in{{ col1 }};\n std::vector order{ };\n std::vector null_precedence{ cudf::null_order::AFTER };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(true, actual);\n}\n\nTYPED_TEST(IsSortedTest, NullsAfterFalse)\n{\n using T = TypeParam;\n\n auto col1 = testdata::nulls_before();\n\n cudf::table_view in{{ col1 }};\n std::vector order{ };\n std::vector null_precedence{ cudf::null_order::AFTER };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(false, actual);\n}\n\nTYPED_TEST(IsSortedTest, NullsBefore)\n{\n using T = TypeParam;\n\n auto col1 = testdata::nulls_before();\n\n cudf::table_view in{{ col1 }};\n std::vector order{ };\n std::vector null_precedence{ cudf::null_order::BEFORE };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(true, actual);\n}\n\nTYPED_TEST(IsSortedTest, NullsBeforeFalse)\n{\n using T = TypeParam;\n\n auto col1 = testdata::nulls_after();\n\n cudf::table_view in{{ col1 }};\n std::vector order{ };\n std::vector null_precedence{ cudf::null_order::BEFORE };\n\n auto actual = cudf::experimental::is_sorted(in, order, null_precedence);\n\n EXPECT_EQ(false, actual);\n}\n\nTYPED_TEST(IsSortedTest, OrderArgsTooFew)\n{\n using T = TypeParam;\n\n auto col1 = testdata::ascending();\n auto col2 = testdata::ascending();\n\n cudf::table_view in{{ col1, col2 }};\n std::vector order{ cudf::order::ASCENDING };\n std::vector null_precedence{ };\n\n EXPECT_THROW(cudf::experimental::is_sorted(in, order, null_precedence),\n cudf::logic_error);\n}\n\nTYPED_TEST(IsSortedTest, OrderArgsTooMany)\n{\n using T = TypeParam;\n\n auto col1 = testdata::ascending();\n\n cudf::table_view in{{ col1 }};\n std::vector order{ cudf::order::ASCENDING,\n cudf::order::ASCENDING };\n std::vector null_precedence{ };\n\n EXPECT_THROW(cudf::experimental::is_sorted(in, order, null_precedence),\n cudf::logic_error);\n}\n\nTYPED_TEST(IsSortedTest, NullOrderArgsTooFew)\n{\n using T = TypeParam;\n\n auto col1 = testdata::nulls_before();\n auto col2 = testdata::nulls_before();\n\n cudf::table_view in{{ col1, col2 }};\n std::vector order{ };\n std::vector null_precedence{ cudf::null_order::BEFORE };\n\n EXPECT_THROW(cudf::experimental::is_sorted(in, order, null_precedence),\n cudf::logic_error);\n}\n\nTYPED_TEST(IsSortedTest, NullOrderArgsTooMany)\n{\n using T = TypeParam;\n\n auto col1 = testdata::nulls_before();\n\n cudf::table_view in{{ col1 }};\n std::vector order{ };\n std::vector null_precedence{ cudf::null_order::BEFORE,\n cudf::null_order::BEFORE };\n\n EXPECT_THROW(cudf::experimental::is_sorted(in, order, null_precedence),\n cudf::logic_error);\n}\n\ntemplate \nstruct IsSortedFixedWidthOnly : public cudf::test::BaseFixture {};\n\nTYPED_TEST_CASE(IsSortedFixedWidthOnly, cudf::test::FixedWidthTypes);\n\n","avg_line_length":29.1401617251,"max_line_length":103,"alphanum_fraction":0.6458236981} {"size":1046,"ext":"cu","lang":"Cuda","max_stars_count":27.0,"content":"\/* tex_anim2d.cu\n * 2-dim. GPU texture animation \n * Ernest Yeung ernestyalumni@gmail.com\n * 20160720\n *\/\n#include \"tex_anim2d.h\"\n \nint iterationCount = 0 ;\n\n\n\/\/ interactions\n\nvoid keyboard_func( unsigned char key, int x, int y) {\n\n\tif (key==27) {\n\/\/\t\tstd::exit(0) ;\n\t\texit(0);\n\t}\n\tglutPostRedisplay();\n}\n\t\nvoid mouse_func( int button, int state, int x, int y ) {\n\tglutPostRedisplay();\n}\n\nvoid idle() {\n\t++iterationCount;\n\tglutPostRedisplay();\n}\n\nvoid printInstructions() {\n\tprintf(\"2 dim. texture animation \\n\"\n\n\t\t\t\"Exit : Esc\\n\"\n\t\n\t);\n}\n\n\/\/ make* functions make functions to pass into OpenGL (note OpenGL is inherently a C API\n\nvoid make_draw_texture(int w, int h) {\n\tglTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, \n\t\tGL_UNSIGNED_BYTE, NULL);\n\tglEnable(GL_TEXTURE_2D);\n\tglBegin(GL_QUADS);\n\tglTexCoord2f(0.0f, 0.0f); glVertex2f(0,0);\n\tglTexCoord2f(0.0f, 1.0f); glVertex2f(0,h);\n\tglTexCoord2f(1.0f, 1.0f); glVertex2f(w,h);\n\tglTexCoord2f(1.0f, 0.0f); glVertex2f(w,0);\n\tglEnd();\n\tglDisable(GL_TEXTURE_2D);\n}\t\n","avg_line_length":19.7358490566,"max_line_length":88,"alphanum_fraction":0.6644359465} {"size":1144,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \n\n__global__ void mul_mat(int *a, int *b, int *c, int n, int m){\n\n \/\/ a = matrix A\n \/\/ b = matrix B\n \/\/ c = matrix C\n\n int x = blockDim.x * blockIdx.x + threadIdx.x;\n int y = blockDim.y * blockIdx.y + threadIdx.y;\n\n int value = 0;\n\n if (x > n || y > m){\n return;\n }\n\n for(int i = 0; i < 1024; i++){\n value += a[x * n + i] * b[y + m * i];\n }\n\n c[x * n + y] = value;\n\n}\n\nvoid mat_mul_cuda(int *a, int *b, int *c, int n, int m){\n\n int *d_a, *d_b, *d_c;\n\n size_t size = m * n * sizeof(int);\n cudaMalloc(&d_a, size);\n cudaMalloc(&d_b, size);\n cudaMalloc(&d_c, size);\n\n cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);\n cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);\n cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice);\n\n int threads = 16;\n int blocks = (n + threads - 1) \/ threads;\n dim3 BLOCKS (blocks, blocks);\n dim3 THREADS(threads, threads);\n mul_mat<<>>(d_a, d_b, d_c, n, m);\n cudaDeviceSynchronize();\n\n cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);\n\n cudaFree(d_a);\n cudaFree(d_b);\n cudaFree(d_c);\n\n}","avg_line_length":22.0,"max_line_length":62,"alphanum_fraction":0.5681818182} {"size":2955,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \"bootstrap.cuh\"\n#include \"random_gen.cuh\"\n#include \n\nnamespace NKernel {\n\n__global__ void PoissonBootstrapImpl(const float lambda, ui64* seeds, ui32 seedSize, float* weights, ui32 size) {\n\n seeds += blockIdx.x * blockDim.x + threadIdx.x;\n ui32 i = blockIdx.x * blockDim.x + threadIdx.x;\n ui64 s = seeds[0];\n while (i < size) {\n float w = weights[i];\n weights[i] = w * NextPoisson(&s, lambda);\n i += gridDim.x * blockDim.x;\n }\n seeds[0] = s;\n\n}\n\n__global__ void GammaBootstrapImpl(const float scale, const float shape, ui64* seeds, ui32 seedSize, float* weights, ui32 size) {\n\n ui32 i = blockIdx.x * blockDim.x + threadIdx.x;\n seeds += i;\n ui64 s = seeds[0];\n\n while (i < size) {\n float w = weights[i];\n weights[i] = w * NextGamma(&s, scale, shape);\n i += gridDim.x * blockDim.x;\n }\n seeds[0] = s;\n}\n\n__global__ void BayesianBootstrapImpl(ui64* seeds, ui32 seedSize, float* weights, ui32 size, float temperature) {\n\n seeds += blockIdx.x * blockDim.x + threadIdx.x;\n ui32 i = blockIdx.x * blockDim.x + threadIdx.x;\n\n ui64 s = seeds[0];\n\n while (i < size) {\n float w = weights[i];\n const float tmp = (-log(NextUniform(&s) + 1e-20f));\n weights[i] = w * (temperature != 1.0f ? powf(tmp, temperature) : tmp);\n i += gridDim.x * blockDim.x;\n }\n seeds[0] = s;\n}\n\n__global__ void UniformBootstrapImpl(const float sampleRate, ui64* seeds, ui32 seedSize, float* weights, ui32 size) {\n\n ui32 i = blockIdx.x * blockDim.x + threadIdx.x;\n seeds += i;\n ui64 s = seeds[0];\n while (i < size) {\n const float w = weights[i];\n const float flag = (NextUniform(&s) < sampleRate) ? 1.0f : 0.0f;\n weights[i] = w * flag;\n i += gridDim.x * blockDim.x;\n }\n seeds[0] = s;\n}\n\nvoid PoissonBootstrap(const float lambda, ui64* seeds, ui32 seedsSize, float* weights, ui32 weighsSize, TCudaStream stream) {\n const ui32 blockSize = 256;\n const ui32 numBlocks = CeilDivide(seedsSize, blockSize);\n PoissonBootstrapImpl<<>>(lambda, seeds, seedsSize, weights, weighsSize);\n}\n\nvoid UniformBootstrap(const float sampleRate, ui64* seeds, ui32 seedSize, float* weights, ui32 size, TCudaStream stream) {\n const ui32 blockSize = 256;\n const ui32 numBlocks = CeilDivide(seedSize, blockSize);\n UniformBootstrapImpl<<>>(sampleRate, seeds, seedSize, weights, size);\n}\n\nvoid BayesianBootstrap(ui64* seeds, ui32 seedSize, float* weights, ui32 size, float temperature, TCudaStream stream) {\n const ui32 blockSize = 256;\n const ui32 numBlocks =CeilDivide(seedSize, blockSize);\n\/\/ GammaBootstrapImpl<<>>(1.0f, 1.0f, seeds, seedSize, weights, size);\n BayesianBootstrapImpl<<>>(seeds, seedSize, weights, size, temperature);\n}\n}\n\n","avg_line_length":34.7647058824,"max_line_length":129,"alphanum_fraction":0.6521150592} {"size":28489,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \"tensoroperations.cuh\"\n\nconst int THREAD_SIZE_XY = 1 << 10;\nconst int THREAD_SIZE_Z = 1 << 6;\n\n__global__\nvoid addElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {\n int idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx < size) ptr3[idx] = ptr1[idx] + ptr2[idx];\n}\n\nstd::unique_ptr CUDAaddElementwise(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr2, int ptr_size) {\n int gpu_ptr1_bytes = ptr_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n float* gpu_ptr3;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);\n\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\n int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) \/ THREAD_SIZE_XY;\n addElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);\n\n std::unique_ptr out_ptr(new float[ptr_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n cudaFree(gpu_ptr3);\n \n return out_ptr;\n}\n\n__global__\nvoid subtractElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {\n int idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx < size) ptr3[idx] = ptr1[idx] - ptr2[idx];\n}\n\nstd::unique_ptr CUDAsubtractElementwise(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr2, int ptr_size) {\n int gpu_ptr1_bytes = ptr_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n float* gpu_ptr3;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);\n\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\n int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) \/ THREAD_SIZE_XY;\n subtractElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);\n\n std::unique_ptr out_ptr(new float[ptr_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n cudaFree(gpu_ptr3);\n \n return out_ptr;\n}\n\n__global__\nvoid multiplyElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {\n int idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx < size) ptr3[idx] = ptr1[idx] * ptr2[idx];\n}\n\nstd::unique_ptr CUDAmultiplyElementwise(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr2, int ptr_size) {\n int gpu_ptr1_bytes = ptr_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n float* gpu_ptr3;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);\n\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\n int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) \/ THREAD_SIZE_XY;\n multiplyElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);\n\n std::unique_ptr out_ptr(new float[ptr_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n cudaFree(gpu_ptr3);\n \n return out_ptr;\n}\n\n__global__\nvoid divideElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {\n int idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx < size) ptr3[idx] = ptr1[idx] \/ ptr2[idx];\n}\n\nstd::unique_ptr CUDAdivideElementwise(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr2, int ptr_size) {\n int gpu_ptr1_bytes = ptr_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n float* gpu_ptr3;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);\n\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\n int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) \/ THREAD_SIZE_XY;\n divideElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);\n\n std::unique_ptr out_ptr(new float[ptr_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n cudaFree(gpu_ptr3);\n \n return out_ptr;\n}\n\n__global__\nvoid powerElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {\n int idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx < size) ptr3[idx] = std::pow(ptr1[idx], ptr2[idx]);\n}\n\nstd::unique_ptr CUDApowerElementwise(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr2, int ptr_size) {\n int gpu_ptr1_bytes = ptr_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n float* gpu_ptr3;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);\n\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\n int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) \/ THREAD_SIZE_XY;\n powerElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);\n\n std::unique_ptr out_ptr(new float[ptr_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n cudaFree(gpu_ptr3);\n \n return out_ptr;\n}\n\n__global__\nvoid transposeD(int cols, int rows, int depths, float* ptr1, float* ptr2) {\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int depth = blockIdx.z * blockDim.z + threadIdx.z;\n \/\/ Of course this is going to need a Z coordinate for the infinite dimensions it can take\n if ((col < cols) && (row < rows) && (depth < depths)) ptr2[depth * rows * cols + row * cols + col] = ptr1[depth * rows * cols + col * rows + row];\n}\n\n\/\/ I need to reformat all of the other functions to fit this\nstd::unique_ptr CUDAtranspose(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size) {\n int cols = in_ptr1_dims[0];\n int rows = in_ptr1_dims[1];\n \/\/ Is there a faster way to do this\n int depths = 1;\n for (int i = 2; i < in_ptr1_dims_size; i++) {\n depths *= in_ptr1_dims[i];\n }\n\n int gpu_ptr1_bytes = ptr1_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\n int grid_cols = (cols + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_rows = (rows + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_depths = (depths + THREAD_SIZE_Z - 1) \/ THREAD_SIZE_Z;\n\n dim3 gridSize(grid_cols, grid_cols, grid_depths);\n dim3 threadSize(std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), THREAD_SIZE_Z);\n\n transposeD <<< gridSize, threadSize >>> (cols, rows, depths, gpu_ptr1, gpu_ptr2);\n\n std::unique_ptr out_ptr(new float[ptr1_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr2, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n \n return out_ptr;\n} \n\n__global__\nvoid multiplyD(int cols, int shared, int rows, int depths, float* ptr1, float* ptr2, float* ptr3) {\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int depth = blockIdx.z * blockDim.z + threadIdx.z;\n\n float sum;\n if ((col < cols) && (row < rows) && (depth < depths)) {\n sum = 0;\n for (int i = 0; i < shared; i++) {\n sum += ptr1[depth * rows * cols + row * shared + i] * ptr2[depth * rows * cols + i * cols + col];\n }\n ptr3[depth * rows * cols + row * cols + col] = sum;\n }\n}\n\nstd::unique_ptr CUDAmultiply(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, std::unique_ptr& in_ptr2, std::unique_ptr& in_ptr2_dims, int in_ptr2_dims_size, int ptr2_size) {\n int ptr1_rows = in_ptr1_dims[1];\n int ptr2_cols = in_ptr2_dims[0];\n int shared_size = in_ptr1_dims[0];\n int depths = 1;\n for (int i = 2; i < in_ptr1_dims_size; i++) { \/\/ In theory the rest of the dims after should be the exact same if we assume they are correct\n depths *= in_ptr1_dims[i];\n } \n\n int gpu_ptr1_bytes = ptr1_size * sizeof(float);\n int gpu_ptr2_bytes = ptr2_size * sizeof(float);\n int gpu_ptr3_bytes = depths * ptr1_rows * ptr2_cols * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n float* gpu_ptr3;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);\n cudaMalloc(&gpu_ptr3, gpu_ptr3_bytes);\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr2_bytes, cudaMemcpyHostToDevice);\n\n int grid_cols = (ptr2_cols + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_depths = (depths + THREAD_SIZE_Z - 1) \/ THREAD_SIZE_Z;\n\n dim3 gridSize(grid_cols, grid_cols, grid_depths);\n dim3 threadSize(std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), THREAD_SIZE_Z);\n\n multiplyD <<< gridSize, threadSize >>> (ptr2_cols, shared_size, ptr1_rows, depths, gpu_ptr1, gpu_ptr2, gpu_ptr3);\n\n std::unique_ptr out_ptr(new float[depths * ptr1_rows * ptr2_cols]);\n cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr3_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n cudaFree(gpu_ptr3);\n\n return out_ptr;\n}\n\n__global__\nvoid rotateD(int cols, int rows, int depths, float* ptr1, float* ptr2) {\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int depth = blockIdx.z * blockDim.z + threadIdx.z;\n\n if ((col < cols) && (row < rows) && (depth < depths)) ptr2[depth * rows * cols + (rows - row - 1) * cols + (cols - col - 1)] = ptr1[depth * rows * cols + row * cols + col];\n}\n\nstd::unique_ptr CUDArotate(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size) {\n int ptr1_cols = in_ptr1_dims[0];\n int ptr1_rows = in_ptr1_dims[1];\n int depths = 1;\n for (int i = 2; i < in_ptr1_dims_size; i++) {\n depths *= in_ptr1_dims[i];\n }\n\n int gpu_ptr_bytes = ptr1_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n cudaMalloc(&gpu_ptr1, gpu_ptr_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr_bytes);\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr_bytes, cudaMemcpyHostToDevice);\n\n int grid_cols = (ptr1_cols + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_depths = (depths + THREAD_SIZE_Z - 1) \/ THREAD_SIZE_Z;\n\n dim3 gridSize(grid_cols, grid_cols, grid_depths);\n dim3 threadSize(std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), THREAD_SIZE_Z);\n\n rotateD <<< gridSize, threadSize >>> (ptr1_cols, ptr1_rows, depths, gpu_ptr1, gpu_ptr2);\n\n std::unique_ptr out_ptr(new float[ptr1_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr2, gpu_ptr_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n\n return out_ptr;\n}\n\n__global__\nvoid maxPoolingD(int cols, int rows, int depths, int kernel_cols, int kernel_rows, int stride_cols, int stride_rows, float* ptr1, float* ptr2) {\n int col = blockIdx.x * blockDim.x + threadIdx.x; \/\/ Col of the unpooled ptr\n int row = blockIdx.y * blockDim.y + threadIdx.y; \/\/ Row of the unpooled ptr\n int depth = blockIdx.z * blockDim.z + threadIdx.z; \/\/ Depth of the unpooled ptr\n\n if ((col < cols - kernel_cols + 1) && (row < rows - kernel_rows + 1) && (depth < depths)) {\n if ((col % stride_cols == 0) && (row % stride_rows == 0)) {\n\n int max = ptr1[depth * rows * cols + row * cols + col];\n int comparison;\n\n for (int i = 0; i < kernel_rows; i++) {\n for (int j = 0; j < kernel_cols; j++) {\n\n comparison = ptr1[depth * rows * cols + (row + i) * cols + (col + j)];\n if (max < comparison) max = comparison; \n \n }\n }\n\n int pooled_cols_size = (cols - kernel_cols + stride_cols) \/ stride_cols;\n int pooled_rows_size = (rows - kernel_rows + stride_rows) \/ stride_rows;\n\n int pooled_col = (col - kernel_cols + stride_cols) \/ stride_cols;\n if (pooled_col < 0) pooled_col = 0;\n int pooled_row = (row - kernel_rows + stride_rows) \/ stride_rows;\n if (pooled_row < 0) pooled_row = 0;\n\n ptr2[depth * pooled_rows_size * pooled_cols_size + pooled_row * pooled_cols_size + pooled_col] = max;\n }\n }\n}\n\nstd::unique_ptr CUDAmaxPooling(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, int kernel_cols, int kernel_rows, int stride_cols, int stride_rows) {\n \/\/ This is raw dimensions\n int ptr1_cols = in_ptr1_dims[0];\n int ptr1_rows = in_ptr1_dims[1];\n int depths = 1;\n for (int i = 2; i < in_ptr1_dims_size; i++) {\n depths *= in_ptr1_dims[i];\n }\n\n \/\/ This is pooled dims\n int ptr2_cols = (ptr1_cols - kernel_cols + stride_cols) \/ stride_cols;\n int ptr2_rows = (ptr1_rows - kernel_rows + stride_rows) \/ stride_rows;\n int ptr2_size = ptr2_cols * ptr2_rows * depths;\n\n int gpu_ptr1_bytes = ptr1_size * sizeof(float);\n int gpu_ptr2_bytes = ptr2_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\n int grid_cols = (ptr1_cols + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_depths = (depths + THREAD_SIZE_Z - 1) \/ THREAD_SIZE_Z;\n\n dim3 gridSize(grid_cols, grid_cols, grid_depths);\n dim3 threadSize(std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), THREAD_SIZE_Z);\n\n maxPoolingD <<< gridSize, threadSize >>> (ptr1_cols, ptr1_rows, depths, kernel_rows, kernel_cols, stride_cols, stride_cols, gpu_ptr1, gpu_ptr2);\n\n std::unique_ptr out_ptr(new float[ptr2_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr2, gpu_ptr2_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n \n return out_ptr;\n}\n\n__global__\nvoid poolingDerivD(int cols, int rows, int depths, int kernel_cols, int kernel_rows, int stride_cols, int stride_rows, float* ptr1, float* ptr2, float* ptr3) {\n int col = blockIdx.x * blockDim.x + threadIdx.x; \/\/ Col of the unpooled ptr\n int row = blockIdx.y * blockDim.y + threadIdx.y; \/\/ Row of the unpooled ptr\n int depth = blockIdx.z * blockDim.z + threadIdx.z; \/\/ Depth of the unpooled ptr\n\n if ((col < cols - kernel_cols + 1) && (row < rows - kernel_rows + 1) && (depth < depths)) {\n if ((col % stride_cols == 0) && (row % stride_rows == 0)) {\n\n int max = ptr1[depth * rows * cols + row * cols + col];\n int argmax_col = 0;\n int argmax_row = 0;\n int comparison;\n\n for (int i = 0; i < kernel_rows; i++) {\n for (int j = 0; j < kernel_cols; j++) {\n\n comparison = ptr1[depth * rows * cols + (row + i) * cols + (col + j)];\n if (max < comparison) {\n max = comparison;\n argmax_col = j;\n argmax_row = i;\n }\n\n }\n } \n\n int pooled_cols_size = (cols - kernel_cols + stride_cols) \/ stride_cols;\n int pooled_rows_size = (rows - kernel_rows + stride_rows) \/ stride_rows;\n\n int pooled_col = (col - kernel_cols + stride_cols) \/ stride_cols;\n if (pooled_col < 0) pooled_col = 0;\n int pooled_row = (row - kernel_rows + stride_rows) \/ stride_rows;\n if (pooled_row < 0) pooled_row = 0;\n\n ptr3[depth * rows * cols + (row + argmax_row) * cols + (col + argmax_col)] += ptr2[depth * pooled_rows_size * pooled_cols_size + pooled_row * pooled_cols_size + pooled_col];\n\n }\n }\n}\n\nstd::unique_ptr CUDApoolingDeriv(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, std::unique_ptr& in_ptr2, std::unique_ptr& in_ptr2_dims, int in_ptr2_dims_size, int ptr2_size, int kernel_cols, int kernel_rows, int stride_cols, int stride_rows) {\n int ptr1_cols = in_ptr1_dims[0]; \/\/ This is the full size unkerneled\n int ptr1_rows = in_ptr1_dims[1];\n int ptr2_cols = in_ptr2_dims[0]; \/\/ This is the kernel size!\n int ptr2_rows = in_ptr2_dims[1];\n int depths = 1;\n for (int i = 2; i < in_ptr1_dims_size; i++) {\n depths *= in_ptr1_dims[i];\n }\n\n int gpu_ptr1_bytes = ptr1_size * sizeof(float);\n int gpu_ptr2_bytes = ptr2_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n float* gpu_ptr3;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);\n cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr2_bytes, cudaMemcpyHostToDevice);\n\n \/\/ Now what memory blocks are we going to use for this?\n int grid_cols = (ptr1_cols + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_depths = (depths + THREAD_SIZE_Z - 1) \/ THREAD_SIZE_Z;\n\n dim3 gridSize(grid_cols, grid_cols, grid_depths);\n dim3 threadSize(std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), THREAD_SIZE_Z);\n\n poolingDerivD <<< gridSize, threadSize >>> (ptr1_cols, ptr1_rows, depths, kernel_cols, kernel_rows, stride_cols, stride_rows, gpu_ptr1, gpu_ptr2, gpu_ptr3);\n\n std::unique_ptr out_ptr(new float[ptr1_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n cudaFree(gpu_ptr3);\n\n return out_ptr;\n}\n\n__global__\nvoid dupeD(int cols, int rows, int depths, int duped_depths, float* ptr1, float* ptr2) {\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int depth = blockIdx.z * blockDim.z + threadIdx.z; \/\/ Now represents the depth of the unstreteched size\n\n if ((col < cols) && (row < rows) && (depth < duped_depths)) {\n int ptr1_depth = depth % depths;\n ptr2[depth * rows * cols + row * cols + col] = ptr1[ptr1_depth * rows * cols + row * cols + col];\n }\n}\n\n\/\/ This is the broken function\nstd::unique_ptr CUDAdupe(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, int dupe_size) {\n int ptr1_cols = in_ptr1_dims[0];\n int ptr1_rows = in_ptr1_dims[1];\n int depths = 1;\n for (int i = 2; i < in_ptr1_dims_size; i++) {\n depths *= in_ptr1_dims[i];\n }\n\n int ptr2_depths = dupe_size * depths;\n int ptr2_size = ptr1_cols * ptr1_rows * ptr2_depths;\n\n int gpu_ptr1_bytes = ptr1_size * sizeof(float);\n int gpu_ptr2_bytes = ptr2_size * sizeof(float);\n\n float* gpu_ptr1;\n float* gpu_ptr2;\n cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);\n cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\n int grid_cols = (ptr1_cols + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n int grid_depths = (ptr2_depths + THREAD_SIZE_Z - 1) \/ THREAD_SIZE_Z; \/\/ This should be the depths of the duped one\n\n dim3 gridSize(grid_cols, grid_cols, grid_depths);\n dim3 threadSize(std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), THREAD_SIZE_Z);\n\n dupeD <<< gridSize, threadSize >>> (ptr1_cols, ptr1_rows, depths, ptr2_depths, gpu_ptr1, gpu_ptr2);\n\n std::unique_ptr out_ptr(new float[ptr2_size]);\n cudaMemcpy(out_ptr.get(), gpu_ptr2, gpu_ptr2_bytes, cudaMemcpyDeviceToHost);\n\n cudaFree(gpu_ptr1);\n cudaFree(gpu_ptr2);\n\n return out_ptr;\n}\n\n__global__\n\/\/ What row, col and depth are we choosing? The big one\nvoid convolutionD(int cols, int rows, int kernel_cols, int kernel_rows, int depths, int stride_cols, int stride_rows, float* ptr1, float* ptr2, float* ptr3) {\n \/\/ In here we take the correct stride and perform the convolution over that desired block for each element\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int depth = blockIdx.z * blockDim.z + threadIdx.z;\n\n if ((col < cols - kernel_cols + 1) && (row < rows - kernel_rows + 1) && (depth < depths)) {\n\n if ((col % stride_cols == 0) && (row % stride_rows == 0)) {\n\n float weighted = 0;\n\n for (int i = 0; i < kernel_rows; i++) {\n for (int j = 0; j < kernel_cols; j++) {\n\n weighted += ptr1[depth * rows * cols + (row + i) * cols + (col + j)] * ptr2[depth * rows * cols + i * kernel_cols + j]; \/\/ Now I have to do the dot product of the kernel and the convolved\n\n }\n }\n\n int weighted_cols_size = (cols - kernel_cols + stride_cols) \/ stride_cols;\n int weighted_rows_size = (rows - kernel_rows + stride_rows) \/ stride_rows;\n\n int weighted_col = (col - kernel_cols + stride_cols) \/ stride_cols;\n if (weighted_col < 0) weighted_col = 0;\n int weighted_row = (row - kernel_rows + stride_rows) \/ stride_rows;\n if (weighted_row < 0) weighted_row = 0; \n\n ptr3[depth * weighted_rows_size * weighted_cols_size + weighted_row * weighted_cols_size + weighted_col] = weighted;\n\n }\n\n }\n}\n\n\/\/ No bias is required for this\n\/\/ std::unique_ptr CUDAconvolution(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, std::unique_ptr& in_ptr2, std::unique_ptr& in_ptr2_dims, int in_ptr2_dims_size, int ptr2_size, int stride_cols, int stride_rows) {\n\n\/\/ \/\/ Convolve layer\n\/\/ int ptr1_cols = in_ptr1_dims[0];\n\/\/ int ptr1_rows = in_ptr1_dims[1];\n\/\/ int ptr1_depths = 1;\n\/\/ for (int i = 0; i < in_ptr1_dims_size; i++) { \n\/\/ ptr1_depths *= in_ptr1_dims[i];\n\/\/ }\n\n\/\/ \/\/ Kernel\n\/\/ int ptr2_cols = in_ptr2_dims[0];\n\/\/ int ptr2_rows = in_ptr2_dims[1];\n\/\/ int ptr2_depths = 1;\n\/\/ for (int i = 0; i < in_ptr2_dims_size; i++) { \n\/\/ ptr2_depths *= in_ptr2_dims[i];\n\/\/ }\n\n\/\/ \/\/ This will be the amount to scale the pointers for its depth size\n\/\/ int dupe_ptr1 = 1; \n\/\/ if (in_ptr2_dims_size > 3) dupe_ptr1 = in_ptr2_dims[3];\n\/\/ int dupe_ptr2 = 1;\n\/\/ if (in_ptr1_dims_size > 3) dupe_ptr2 = in_ptr1_dims[3];\n\n\/\/ \/\/ We see that the dupe function duplicates every depth in each fourth dimension\n\/\/ std::unique_ptr ptr1_duped = CUDAdupe(in_ptr1, in_ptr1_dims, in_ptr1_dims_size, ptr1_size, dupe_ptr1); \/\/ This will be the ptr1 that has been scaled to match the filter sizes\n\/\/ std::unique_ptr ptr2_duped = CUDAdupe(in_ptr2, in_ptr2_dims, in_ptr2_dims_size, ptr2_size, dupe_ptr2); \/\/ This will scale the kernel to match the amount of input blocks there are\n\n\/\/ int ptr1_duped_size = ptr1_size * dupe_ptr1;\n\/\/ int ptr2_duped_size = ptr2_size * dupe_ptr2; \/\/ This part could be the problem?\n\n\/\/ \/\/ This part is all safe\n\/\/ int ptr3_cols = (ptr1_cols - ptr2_cols + stride_cols) \/ stride_cols;\n\/\/ int ptr3_rows = (ptr1_rows - ptr2_rows + stride_rows) \/ stride_rows;\n\/\/ int ptr3_depths = dupe_ptr1 * ptr1_depths;\n\/\/ int ptr3_size = ptr3_depths * ptr3_rows * ptr3_cols;\n\n\/\/ int gpu_ptr1_bytes = ptr1_duped_size * sizeof(float); \/\/ These must be the wrong allocation sizes\n\/\/ int gpu_ptr2_bytes = ptr2_duped_size * sizeof(float);\n\/\/ int gpu_ptr3_bytes = ptr3_size * sizeof(float);\n\n\/\/ float* gpu_ptr1; \/\/ Convolved\n\/\/ float* gpu_ptr2; \/\/ Kernel\n\/\/ float* gpu_ptr3; \/\/ Output\n\/\/ cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);\n\/\/ cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);\n\/\/ cudaMalloc(&gpu_ptr3, gpu_ptr3_bytes);\n\/\/ cudaMemcpy(gpu_ptr1, ptr1_duped.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);\n\/\/ cudaMemcpy(gpu_ptr2, ptr2_duped.get(), gpu_ptr2_bytes, cudaMemcpyHostToDevice); \/\/ The memory allocation for this one is wrong\n\n\/\/ int grid_cols = (ptr3_cols + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n\/\/ int grid_rows = (ptr3_rows + std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z) - 1) \/ std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z);\n\/\/ int grid_depths = (ptr3_depths + THREAD_SIZE_Z - 1) \/ THREAD_SIZE_Z;\n\n\/\/ dim3 gridSize(grid_cols, grid_cols, grid_depths);\n\/\/ dim3 threadSize(std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY \/ THREAD_SIZE_Z), THREAD_SIZE_Z);\n\n\/\/ std::unique_ptr out_ptr(new float[ptr3_size]);\n\/\/ cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr3_bytes, cudaMemcpyDeviceToHost);\n\n\/\/ cudaFree(gpu_ptr1);\n\/\/ cudaFree(gpu_ptr2);\n\/\/ cudaFree(gpu_ptr3);\n\n\/\/ return out_ptr;\n\/\/ }\n\n\/\/ std::unique_ptr CUDAconvolution(std::unique_ptr& in_ptr1, std::unique_ptr& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, std::unique_ptr& in_ptr2, std::unique_ptr& in_ptr2_dims, int in_ptr2_dims_size, int ptr2_size, int stride_cols, int stride_rows) {\n\/\/ int ptr1_cols = ;\n\/\/ }\n\n\/\/ New Pseudo:\n\/\/ Inputs: A layered 4 dimensional input block\n\/\/ A layered 4 dimensional weight block (with the same depth as those of the filters)\n\/\/ The third dimensions nof each should line up but not the fourth dimension\n\n\/\/ Scaling: For the input block scale them to be the same size as the fourth dimension of the weight block \n\/\/ For the weight blocks, condense it all into a single 3d layer and then scale them by the fourth dimension of the input block\n\n\/\/ Post Scaling: Turn the scaled input block into a single three dimensional layer (do this by multiplying the depth by the rest of the size)\n\/\/ Turn the scaled weight block into a big single three dimensional block too\n\n\/\/ Remaining steps: Perform the convolution across every different subsection\n\/\/ Output it as a block with dimensions of the new rows and cols, the depth of the original depth of the input block and the fourth dimension of the kernels \n\n\/\/ Post processing ----------- (NOT NEEDED)\n\/\/ Do the sum across all of the fourth dimensions into a single third dimension (or something)\n\/\/ Add the bias term to each respective element\n\n\/\/ Thoughts?\n\/\/ How would this deal with a block size larger than four dimensions?\n\/\/ To do so it appears that the dupe function is broken - It does not perform the duplicates properly for just the fourth dimensions, lets check this out","avg_line_length":43.8967642527,"max_line_length":334,"alphanum_fraction":0.6757695953} {"size":409,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*\n * UpdaterEz1D.cpp\n *\n * Created on: 25 \u044f\u043d\u0432. 2016 \u0433.\n * Author: aleksandr\n *\/\n\n#include \"UpdaterIntensityTM.h\"\n\n__device__\nvoid UpdaterIntensityTM::operator() (const int indx) {\n\t#define Ez(M, N) Ez[(M) * (gridSizeY) + (N)]\n\tconst int n = indx % sizeY;\n\tconst int m = indx \/ sizeY;\n\tintensity[indx] = intensity[indx] + Ez(firstX + m*stepX, firstY + n*stepX)*Ez(firstX + m*stepX, firstY + n*stepX);\n}\n","avg_line_length":24.0588235294,"max_line_length":115,"alphanum_fraction":0.6430317848} {"size":6079,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/\/----------------------------------*-C++-*----------------------------------\/\/\n\/\/ Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.\n\/\/ See the top-level COPYRIGHT file for details.\n\/\/ SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\/\/---------------------------------------------------------------------------\/\/\n\/\/! \\file RngEngine.test.cu\n\/\/---------------------------------------------------------------------------\/\/\n#include \"random\/cuda\/RngStateStore.hh\"\n#include \"random\/cuda\/RngEngine.cuh\"\n\n#include \n#include \n#include \n#include \"base\/Range.hh\"\n#include \"celeritas_test.hh\"\n#include \"base\/KernelParamCalculator.cuda.hh\"\n\nusing celeritas::generate_canonical;\nusing celeritas::RngEngine;\nusing celeritas::RngState;\nusing celeritas::RngStatePointers;\nusing celeritas::RngStateStore;\n\n\/\/---------------------------------------------------------------------------\/\/\n\/\/ CUDA KERNELS\n\/\/---------------------------------------------------------------------------\/\/\n\n__global__ void sample_native(int num_samples,\n RngStatePointers view,\n RngEngine::result_type* samples)\n{\n auto tid = celeritas::KernelParamCalculator::thread_id();\n if (tid.get() < num_samples)\n {\n RngEngine rng(view, tid);\n samples[tid.get()] = rng();\n }\n}\n\ntemplate\n__global__ void\nsample_real(int num_samples, RngStatePointers view, RealType* samples)\n{\n auto tid = celeritas::KernelParamCalculator::thread_id();\n if (tid.get() < num_samples)\n {\n RngEngine rng(view, tid);\n samples[tid.get()] = generate_canonical(rng);\n }\n}\n\n\/\/---------------------------------------------------------------------------\/\/\n\/\/ INT TEST\n\/\/---------------------------------------------------------------------------\/\/\n\nTEST(RngEngineIntTest, regression)\n{\n using value_type = RngEngine::result_type;\n\n int num_samples = 1024;\n\n \/\/ Allocate device memory for results\n thrust::device_vector samples(num_samples);\n\n \/\/ Initialize the RNG states on device\n RngStateStore container(num_samples);\n EXPECT_EQ(container.size(), num_samples);\n\n celeritas::KernelParamCalculator calc_launch_params;\n auto params = calc_launch_params(num_samples);\n sample_native<<>>(\n num_samples,\n container.device_pointers(),\n thrust::raw_pointer_cast(samples.data()));\n CELER_CUDA_CALL(cudaDeviceSynchronize());\n\n \/\/ Copy data back to host\n std::vector host_samples(num_samples);\n thrust::copy(samples.begin(), samples.end(), host_samples.begin());\n\n \/\/ Print a subset of the values\n std::vector test_values;\n for (int i = 0; i < num_samples; i += 127)\n {\n test_values.push_back(host_samples[i]);\n }\n\n \/\/ PRINT_EXPECTED(test_values);\n static const unsigned int expected_test_values[] = {165860337u,\n 3006138920u,\n 2161337536u,\n 390101068u,\n 2347834113u,\n 100129048u,\n 4122784086u,\n 473544901u,\n 2822849608u};\n EXPECT_VEC_EQ(test_values, expected_test_values);\n}\n\n\/\/---------------------------------------------------------------------------\/\/\n\/\/ FLOAT TEST\n\/\/---------------------------------------------------------------------------\/\/\n\ntemplate\nclass RngEngineFloatTest : public celeritas::Test\n{\n};\n\nvoid check_expected_float_samples(const thrust::host_vector& v)\n{\n EXPECT_FLOAT_EQ(0.038617369, v[0]);\n EXPECT_FLOAT_EQ(0.411269426, v[1]);\n}\nvoid check_expected_float_samples(const thrust::host_vector& v)\n{\n EXPECT_DOUBLE_EQ(0.283318433931184, v[0]);\n EXPECT_DOUBLE_EQ(0.653335242131673, v[1]);\n}\n\nusing FloatTypes = ::testing::Types;\nTYPED_TEST_SUITE(RngEngineFloatTest, FloatTypes, );\n\nTYPED_TEST(RngEngineFloatTest, generate_canonical)\n{\n using real_type = TypeParam;\n int num_samples = 100;\n\n \/\/ Allocate device memory for results\n thrust::device_vector samples(num_samples);\n\n \/\/ Initialize the RNG states on device\n RngStateStore container(num_samples);\n EXPECT_EQ(container.size(), num_samples);\n\n celeritas::KernelParamCalculator calc_launch_params;\n auto params = calc_launch_params(num_samples);\n sample_real<<>>(\n num_samples,\n container.device_pointers(),\n thrust::raw_pointer_cast(samples.data()));\n CELER_CUDA_CALL(cudaDeviceSynchronize());\n\n \/\/ Copy data back to host\n thrust::host_vector host_samples = samples;\n EXPECT_EQ(host_samples.size(), num_samples);\n for (real_type sample : host_samples)\n {\n EXPECT_GE(sample, real_type(0));\n EXPECT_LE(sample, real_type(1));\n }\n\n check_expected_float_samples(host_samples);\n}\n\n\/\/---------------------------------------------------------------------------\/\/\n\/\/ TEST on CPU\n\/\/---------------------------------------------------------------------------\/\/\nTEST(RngEngineCPUTest, generate_on_cpu)\n{\n int num_samples = 1024 * 1000;\n unsigned long seed = 12345u;\n\n RngState host_state[1];\n RngStatePointers host_pointers{celeritas::make_span(host_state)};\n RngEngine rng(host_pointers, celeritas::ThreadId{0});\n rng = RngEngine::Initializer_t{seed};\n\n double mean = 0;\n for (int i = 0; i < num_samples; ++i)\n {\n mean += generate_canonical(rng);\n }\n mean \/= num_samples;\n EXPECT_NEAR(0.5, mean, 0.0001);\n}\n","avg_line_length":34.3446327684,"max_line_length":79,"alphanum_fraction":0.5389044251} {"size":3896,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \n\n#include \n#include \n#include \n\n#include \"MonteRayParallelAssistant.hh\"\n#include \"GPUUtilityFunctions.hh\"\n\nnamespace MonteRay {\n\nMonteRayParallelAssistant::MonteRayParallelAssistant() {\n char host[1024];\n gethostname(host,1024);\n host[1023] = '\\0';\n name = std::string( host );\n\n int mpi_initialized = 0;\n MPI_Initialized( &mpi_initialized );\n\n if( !mpi_initialized ) {\n world_size = 1; \n world_rank = 0;\n shared_memory_size = 1;\n shared_memory_rank = 0;\n WORK_GROUP_COMM_SIZE = 1;\n WORK_GROUP_COMM_RANK = 0;\n INTER_WORK_GROUP_COMM_SIZE = 1;\n INTER_WORK_GROUP_COMM_RANK = 0;\n return;\n }\n\n\n MPI_Comm_dup(MPI_COMM_WORLD, &MONTERAY_COMM_WORLD);\n\n parallel = true;\n MPI_Comm_size(MONTERAY_COMM_WORLD, &world_size);\n MPI_Comm_rank(MONTERAY_COMM_WORLD, &world_rank);\n\n MPI_Comm_split_type( MONTERAY_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &MONTRERAY_COMM_SHMEM );\n MPI_Comm_size( MONTRERAY_COMM_SHMEM, &shared_memory_size );\n MPI_Comm_rank( MONTRERAY_COMM_SHMEM, &shared_memory_rank );\n\n\n \/\/ Only get the number of GPUs once per node\n int numberOfGPUs = 0;\n if( shared_memory_rank == 0 ){\n numberOfGPUs = getNumberOfGPUS();\n if( numberOfGPUs == 0 ) {\n \/\/ if not using GPUs, setup to use one cpu process per work group\n numberOfGPUs = shared_memory_size;\n }\n }\n \/\/ scatter numberOfGPUs to all processes on the node\n MPI_Bcast( &numberOfGPUs, 1, MPI_INT, 0, MONTRERAY_COMM_SHMEM);\n\n \/\/ Check SINGLEPROC_WORKGROUP environment variable\n char* pSINGLEPROC_WORKGROUP;\n pSINGLEPROC_WORKGROUP = std::getenv(\"SINGLEPROC_WORKGROUP\");\n if (pSINGLEPROC_WORKGROUP != NULL) {\n\n if( world_rank == 0 ) {\n std::cout << \"Warning -- MonteRay is using a single process per workgroup. Each process will issue it's own GPU kernel calls.\\n\";\n }\n useSingleProcWorkGroup = true;\n }\n\n \/\/ split MONTRERAY_COMM_SHMEM into numberOfGPUs work groups\n if( !useSingleProcWorkGroup ) {\n if( numberOfGPUs <= 1 ) {\n deviceID = 0;\n MPI_Comm_dup(MONTRERAY_COMM_SHMEM, &WORK_GROUP_COMM);\n } else {\n deviceID = calcDeviceID(shared_memory_size, numberOfGPUs, shared_memory_rank );\n MPI_Comm_split(MONTRERAY_COMM_SHMEM, deviceID, shared_memory_rank, &WORK_GROUP_COMM);\n }\n } else {\n \/\/std::cout << \"Debug: Splitting MONTRERAY_COMM_SHMEM into one process per WORK_GROUP_COMM. \\n\";\n deviceID = calcDeviceID(shared_memory_size, numberOfGPUs, shared_memory_rank );\n MPI_Comm_split(MONTRERAY_COMM_SHMEM, shared_memory_rank, shared_memory_rank, &WORK_GROUP_COMM);\n }\n\n MPI_Comm_size( WORK_GROUP_COMM, &WORK_GROUP_COMM_SIZE );\n MPI_Comm_rank( WORK_GROUP_COMM, &WORK_GROUP_COMM_RANK );\n setCudaDevice( deviceID );\n\n \/\/ Create inter-working group communicator\n if( WORK_GROUP_COMM_RANK == 0 ) {\n MPI_Comm_split( MONTERAY_COMM_WORLD, 0, world_rank, &INTER_WORK_GROUP_COMM);\n MPI_Comm_rank( INTER_WORK_GROUP_COMM, &INTER_WORK_GROUP_COMM_RANK );\n MPI_Comm_size( INTER_WORK_GROUP_COMM, &INTER_WORK_GROUP_COMM_SIZE );\n } else {\n MPI_Comm_split( MONTERAY_COMM_WORLD, MPI_UNDEFINED, world_rank, &INTER_WORK_GROUP_COMM);\n INTER_WORK_GROUP_COMM_RANK = -1;\n INTER_WORK_GROUP_COMM_SIZE = 0;\n }\n}\n\nvoid setMonteRayStackSize( size_t size) {\n const MonteRayParallelAssistant& PA( MonteRayParallelAssistant::getInstance() );\n if( PA.getWorkGroupRank() == 0 ) {\n setCudaStackSize( size );\n }\n}\n\nbool isWorkGroupMaster(void) {\n const MonteRayParallelAssistant& PA( MonteRayParallelAssistant::getInstance() );\n if( PA.getWorkGroupRank() == 0 ) {\n return true;\n }\n return false;\n}\n\n\n} \/\/ end namespace\n","avg_line_length":33.5862068966,"max_line_length":141,"alphanum_fraction":0.6868583162} {"size":6296,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \n#include \n#include \n#include \n\nnamespace arb {\nnamespace bbp_catalogue {\n\n#define PPACK_IFACE_BLOCK \\\nauto _pp_var_width __attribute__((unused)) = params_.width;\\\nauto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\\\nauto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\\\nauto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\\\nauto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\\\nauto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\\\nauto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\\\nauto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\\\nauto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\\\nauto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\\\nauto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\\\nauto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\\\nauto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\\\nauto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\\\nauto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\\\nauto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\\\nauto* _pp_var_weight __attribute__((unused)) = params_.weight;\\\nauto& _pp_var_events __attribute__((unused)) = params_.events;\\\nauto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\\\nauto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\\\nauto _pp_var_zTau __attribute__((unused)) = params_.globals[0];\\\nauto* _pp_var_z __attribute__((unused)) = params_.state_vars[0];\\\nauto* _pp_var_gSK_E2bar __attribute__((unused)) = params_.parameters[0];\\\nauto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\\\nauto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\\\nauto& _pp_var_ion_ca __attribute__((unused)) = params_.ion_states[1];\\\nauto* _pp_var_ion_ca_index __attribute__((unused)) = params_.ion_states[1].index;\\\n\/\/End of IFACEBLOCK\n\nnamespace {\n\nusing ::arb::gpu::exprelr;\nusing ::arb::gpu::safeinv;\nusing ::arb::gpu::min;\nusing ::arb::gpu::max;\n\n__global__\nvoid init(arb_mechanism_ppack params_) {\n int n_ = params_.width;\n int tid_ = threadIdx.x + blockDim.x*blockIdx.x;\n PPACK_IFACE_BLOCK;\n if (tid_width;\n unsigned block_dim = 128;\n unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);\n init<<>>(*p);\n if (!p->multiplicity) return;\n multiply<<>>(*p);\n}\n\nvoid mechanism_SK_E2_gpu_compute_currents_(arb_mechanism_ppack* p) {\n auto n = p->width;\n unsigned block_dim = 128;\n unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);\n compute_currents<<>>(*p);\n}\n\nvoid mechanism_SK_E2_gpu_advance_state_(arb_mechanism_ppack* p) {\n auto n = p->width;\n unsigned block_dim = 128;\n unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);\n advance_state<<>>(*p);\n}\n\nvoid mechanism_SK_E2_gpu_write_ions_(arb_mechanism_ppack* p) {}\n\nvoid mechanism_SK_E2_gpu_post_event_(arb_mechanism_ppack* p) {}\nvoid mechanism_SK_E2_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}\n\n} \/\/ namespace bbp_catalogue\n} \/\/ namespace arb\n","avg_line_length":40.8831168831,"max_line_length":136,"alphanum_fraction":0.6939326557} {"size":3564,"ext":"cu","lang":"Cuda","max_stars_count":5.0,"content":"extern \"C\" {\n\n\/**\n * This is called by the various functions below to clear a buffer.\n *\/\n__device__ void clearSingleBuffer(int* __restrict__ buffer, int size) {\n int index = blockDim.x*blockIdx.x+threadIdx.x;\n int4* buffer4 = (int4*) buffer;\n int sizeDiv4 = size\/4;\n while (index < sizeDiv4) {\n buffer4[index] = make_int4(0);\n index += blockDim.x*gridDim.x;\n }\n if (blockDim.x*blockIdx.x+threadIdx.x == 0)\n for (int i = sizeDiv4*4; i < size; i++)\n buffer[i] = 0;\n}\n\n\/**\n * Fill a buffer with 0.\n *\/\n__global__ void clearBuffer(int* __restrict__ buffer, int size) {\n clearSingleBuffer(buffer, size);\n}\n\n\/**\n * Fill two buffers with 0.\n *\/\n__global__ void clearTwoBuffers(int* __restrict__ buffer1, int size1, int* __restrict__ buffer2, int size2) {\n clearSingleBuffer(buffer1, size1);\n clearSingleBuffer(buffer2, size2);\n}\n\n\/**\n * Fill three buffers with 0.\n *\/\n__global__ void clearThreeBuffers(int* __restrict__ buffer1, int size1, int* __restrict__ buffer2, int size2, int* __restrict__ buffer3, int size3) {\n clearSingleBuffer(buffer1, size1);\n clearSingleBuffer(buffer2, size2);\n clearSingleBuffer(buffer3, size3);\n}\n\n\/**\n * Fill four buffers with 0.\n *\/\n__global__ void clearFourBuffers(int* __restrict__ buffer1, int size1, int* __restrict__ buffer2, int size2, int* __restrict__ buffer3, int size3, int* __restrict__ buffer4, int size4) {\n clearSingleBuffer(buffer1, size1);\n clearSingleBuffer(buffer2, size2);\n clearSingleBuffer(buffer3, size3);\n clearSingleBuffer(buffer4, size4);\n}\n\n\/**\n * Fill five buffers with 0.\n *\/\n__global__ void clearFiveBuffers(int* __restrict__ buffer1, int size1, int* __restrict__ buffer2, int size2, int* __restrict__ buffer3, int size3, int* __restrict__ buffer4, int size4, int* __restrict__ buffer5, int size5) {\n clearSingleBuffer(buffer1, size1);\n clearSingleBuffer(buffer2, size2);\n clearSingleBuffer(buffer3, size3);\n clearSingleBuffer(buffer4, size4);\n clearSingleBuffer(buffer5, size5);\n}\n\n\/**\n * Fill six buffers with 0.\n *\/\n__global__ void clearSixBuffers(int* __restrict__ buffer1, int size1, int* __restrict__ buffer2, int size2, int* __restrict__ buffer3, int size3, int* __restrict__ buffer4, int size4, int* __restrict__ buffer5, int size5, int* __restrict__ buffer6, int size6) {\n clearSingleBuffer(buffer1, size1);\n clearSingleBuffer(buffer2, size2);\n clearSingleBuffer(buffer3, size3);\n clearSingleBuffer(buffer4, size4);\n clearSingleBuffer(buffer5, size5);\n clearSingleBuffer(buffer6, size6);\n}\n\n\/**\n * Sum the energy buffer.\n *\/\n__global__ void reduceEnergy(const mixed* __restrict__ energyBuffer, mixed* __restrict__ result, int bufferSize, int workGroupSize) {\n extern __shared__ mixed tempBuffer[];\n const unsigned int thread = threadIdx.x;\n mixed sum = 0;\n for (unsigned int index = thread; index < bufferSize; index += blockDim.x)\n sum += energyBuffer[index];\n tempBuffer[thread] = sum;\n for (int i = 1; i < workGroupSize; i *= 2) {\n __syncthreads();\n if (thread%(i*2) == 0 && thread+i < workGroupSize)\n tempBuffer[thread] += tempBuffer[thread+i];\n }\n if (thread == 0)\n *result = tempBuffer[0];\n}\n\n\/**\n * Record the atomic charges into the posq array.\n *\/\n__global__ void setCharges(real* __restrict__ charges, real4* __restrict__ posq, int* __restrict__ atomOrder, int numAtoms) {\n for (int i = blockDim.x*blockIdx.x+threadIdx.x; i < numAtoms; i += blockDim.x*gridDim.x)\n posq[i].w = charges[atomOrder[i]];\n}\n}\n","avg_line_length":34.6019417476,"max_line_length":261,"alphanum_fraction":0.6975308642} {"size":26068,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"..\/dogqc\/include\/csv.h\"\n#include \"..\/dogqc\/include\/util.h\"\n#include \"..\/dogqc\/include\/mappedmalloc.h\"\n#include \"..\/dogqc\/include\/util.cuh\"\n#include \"..\/dogqc\/include\/hashing.cuh\"\nstruct jpayl4 {\n int att3_lorderke;\n str_t att17_lshipmod;\n};\nstruct apayl7 {\n str_t att17_lshipmod;\n};\n\n__global__ void krnl_lineitem1(\n int* iatt3_lorderke, unsigned* iatt13_lshipdat, unsigned* iatt14_lcommitd, unsigned* iatt15_lreceipt, size_t* iatt17_lshipmod_offset, char* iatt17_lshipmod_char, multi_ht* jht4, jpayl4* jht4_payload) {\n int att3_lorderke;\n unsigned att13_lshipdat;\n unsigned att14_lcommitd;\n unsigned att15_lreceipt;\n str_t att17_lshipmod;\n str_t c1 = stringConstant ( \"SHIP\", 4);\n str_t c2 = stringConstant ( \"MAIL\", 4);\n\n int tid_lineitem1 = 0;\n unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x);\n unsigned step = (blockDim.x * gridDim.x);\n unsigned flushPipeline = 0;\n int active = 0;\n while(!(flushPipeline)) {\n tid_lineitem1 = loopVar;\n active = (loopVar < 6001215);\n \/\/ flush pipeline if no new elements\n flushPipeline = !(__ballot_sync(ALL_LANES,active));\n if(active) {\n att3_lorderke = iatt3_lorderke[tid_lineitem1];\n att13_lshipdat = iatt13_lshipdat[tid_lineitem1];\n att14_lcommitd = iatt14_lcommitd[tid_lineitem1];\n att15_lreceipt = iatt15_lreceipt[tid_lineitem1];\n att17_lshipmod = stringScan ( iatt17_lshipmod_offset, iatt17_lshipmod_char, tid_lineitem1);\n }\n \/\/ -------- selection (opId: 2) --------\n if(active) {\n active = ((stringEquals ( att17_lshipmod, c1) || stringEquals ( att17_lshipmod, c2)) && ((att14_lcommitd < att15_lreceipt) && ((att13_lshipdat < att14_lcommitd) && ((att15_lreceipt >= 19940101) && (att15_lreceipt < 19950101)))));\n }\n \/\/ -------- hash join build (opId: 4) --------\n if(active) {\n uint64_t hash4 = 0;\n if(active) {\n hash4 = 0;\n if(active) {\n hash4 = hash ( (hash4 + ((uint64_t)att3_lorderke)));\n }\n }\n hashCountMulti ( jht4, 840170, hash4);\n }\n loopVar += step;\n }\n\n}\n\n__global__ void krnl_lineitem1_ins(\n int* iatt3_lorderke, unsigned* iatt13_lshipdat, unsigned* iatt14_lcommitd, unsigned* iatt15_lreceipt, size_t* iatt17_lshipmod_offset, char* iatt17_lshipmod_char, multi_ht* jht4, jpayl4* jht4_payload, int* offs4) {\n int att3_lorderke;\n unsigned att13_lshipdat;\n unsigned att14_lcommitd;\n unsigned att15_lreceipt;\n str_t att17_lshipmod;\n str_t c1 = stringConstant ( \"SHIP\", 4);\n str_t c2 = stringConstant ( \"MAIL\", 4);\n\n int tid_lineitem1 = 0;\n unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x);\n unsigned step = (blockDim.x * gridDim.x);\n unsigned flushPipeline = 0;\n int active = 0;\n while(!(flushPipeline)) {\n tid_lineitem1 = loopVar;\n active = (loopVar < 6001215);\n \/\/ flush pipeline if no new elements\n flushPipeline = !(__ballot_sync(ALL_LANES,active));\n if(active) {\n att3_lorderke = iatt3_lorderke[tid_lineitem1];\n att13_lshipdat = iatt13_lshipdat[tid_lineitem1];\n att14_lcommitd = iatt14_lcommitd[tid_lineitem1];\n att15_lreceipt = iatt15_lreceipt[tid_lineitem1];\n att17_lshipmod = stringScan ( iatt17_lshipmod_offset, iatt17_lshipmod_char, tid_lineitem1);\n }\n \/\/ -------- selection (opId: 2) --------\n if(active) {\n active = ((stringEquals ( att17_lshipmod, c1) || stringEquals ( att17_lshipmod, c2)) && ((att14_lcommitd < att15_lreceipt) && ((att13_lshipdat < att14_lcommitd) && ((att15_lreceipt >= 19940101) && (att15_lreceipt < 19950101)))));\n }\n \/\/ -------- hash join build (opId: 4) --------\n if(active) {\n uint64_t hash4 = 0;\n if(active) {\n hash4 = 0;\n if(active) {\n hash4 = hash ( (hash4 + ((uint64_t)att3_lorderke)));\n }\n }\n jpayl4 payl;\n payl.att3_lorderke = att3_lorderke;\n payl.att17_lshipmod = att17_lshipmod;\n hashInsertMulti ( jht4, jht4_payload, offs4, 840170, hash4, &(payl));\n }\n loopVar += step;\n }\n\n}\n\n__global__ void krnl_orders3(\n int* iatt19_oorderke, size_t* iatt24_oorderpr_offset, char* iatt24_oorderpr_char, multi_ht* jht4, jpayl4* jht4_payload, agg_ht* aht7, float* agg1, float* agg2) {\n int att19_oorderke;\n str_t att24_oorderpr;\n unsigned warplane = (threadIdx.x % 32);\n int att3_lorderke;\n str_t att17_lshipmod;\n float att28_lowline;\n str_t c3 = stringConstant ( \"1-URGENT\", 8);\n str_t c4 = stringConstant ( \"2-HIGH\", 6);\n float att29_highline;\n str_t c5 = stringConstant ( \"1-URGENT\", 8);\n str_t c6 = stringConstant ( \"2-HIGH\", 6);\n\n int tid_orders1 = 0;\n unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x);\n unsigned step = (blockDim.x * gridDim.x);\n unsigned flushPipeline = 0;\n int active = 0;\n while(!(flushPipeline)) {\n tid_orders1 = loopVar;\n active = (loopVar < 1500000);\n \/\/ flush pipeline if no new elements\n flushPipeline = !(__ballot_sync(ALL_LANES,active));\n if(active) {\n att19_oorderke = iatt19_oorderke[tid_orders1];\n att24_oorderpr = stringScan ( iatt24_oorderpr_offset, iatt24_oorderpr_char, tid_orders1);\n }\n \/\/ -------- hash join probe (opId: 4) --------\n \/\/ -------- multiprobe multi broadcast (opId: 4) --------\n int matchEnd4 = 0;\n int matchEndBuf4 = 0;\n int matchOffset4 = 0;\n int matchOffsetBuf4 = 0;\n int probeActive4 = active;\n int att19_oorderke_bcbuf4;\n str_t att24_oorderpr_bcbuf4;\n uint64_t hash4 = 0;\n if(probeActive4) {\n hash4 = 0;\n if(active) {\n hash4 = hash ( (hash4 + ((uint64_t)att19_oorderke)));\n }\n probeActive4 = hashProbeMulti ( jht4, 840170, hash4, matchOffsetBuf4, matchEndBuf4);\n }\n unsigned activeProbes4 = __ballot_sync(ALL_LANES,probeActive4);\n int num4 = 0;\n num4 = (matchEndBuf4 - matchOffsetBuf4);\n unsigned wideProbes4 = __ballot_sync(ALL_LANES,(num4 >= 32));\n att19_oorderke_bcbuf4 = att19_oorderke;\n att24_oorderpr_bcbuf4 = att24_oorderpr;\n while((activeProbes4 > 0)) {\n unsigned tupleLane;\n unsigned broadcastLane;\n int numFilled = 0;\n int num = 0;\n while(((numFilled < 32) && activeProbes4)) {\n if((wideProbes4 > 0)) {\n tupleLane = (__ffs(wideProbes4) - 1);\n wideProbes4 -= (1 << tupleLane);\n }\n else {\n tupleLane = (__ffs(activeProbes4) - 1);\n }\n num = __shfl_sync(ALL_LANES,num4,tupleLane);\n if((numFilled && ((numFilled + num) > 32))) {\n break;\n }\n if((warplane >= numFilled)) {\n broadcastLane = tupleLane;\n matchOffset4 = (warplane - numFilled);\n }\n numFilled += num;\n activeProbes4 -= (1 << tupleLane);\n }\n matchOffset4 += __shfl_sync(ALL_LANES,matchOffsetBuf4,broadcastLane);\n matchEnd4 = __shfl_sync(ALL_LANES,matchEndBuf4,broadcastLane);\n att19_oorderke = __shfl_sync(ALL_LANES,att19_oorderke_bcbuf4,broadcastLane);\n att24_oorderpr = __shfl_sync(ALL_LANES,att24_oorderpr_bcbuf4,broadcastLane);\n probeActive4 = (matchOffset4 < matchEnd4);\n while(__any_sync(ALL_LANES,probeActive4)) {\n active = probeActive4;\n active = 0;\n jpayl4 payl;\n if(probeActive4) {\n payl = jht4_payload[matchOffset4];\n att3_lorderke = payl.att3_lorderke;\n att17_lshipmod = payl.att17_lshipmod;\n active = 1;\n active &= ((att3_lorderke == att19_oorderke));\n matchOffset4 += 32;\n probeActive4 &= ((matchOffset4 < matchEnd4));\n }\n \/\/ -------- map (opId: 5) --------\n if(active) {\n float casevar1227;\n if((!(stringEquals ( att24_oorderpr, c3)) && !(stringEquals ( att24_oorderpr, c4)))) {\n casevar1227 = 1;\n }\n else {\n casevar1227 = 0;\n }\n att28_lowline = casevar1227;\n }\n \/\/ -------- map (opId: 6) --------\n if(active) {\n float casevar1215;\n if((stringEquals ( att24_oorderpr, c5) || stringEquals ( att24_oorderpr, c6))) {\n casevar1215 = 1;\n }\n else {\n casevar1215 = 0;\n }\n att29_highline = casevar1215;\n }\n \/\/ -------- aggregation (opId: 7) --------\n int bucket = 0;\n if(active) {\n uint64_t hash7 = 0;\n hash7 = 0;\n hash7 = hash ( (hash7 + stringHash ( att17_lshipmod)));\n apayl7 payl;\n payl.att17_lshipmod = att17_lshipmod;\n int bucketFound = 0;\n int numLookups = 0;\n while(!(bucketFound)) {\n bucket = hashAggregateGetBucket ( aht7, 14, hash7, numLookups, &(payl));\n apayl7 probepayl = aht7[bucket].payload;\n bucketFound = 1;\n bucketFound &= (stringEquals ( payl.att17_lshipmod, probepayl.att17_lshipmod));\n }\n }\n if(active) {\n atomicAdd(&(agg1[bucket]), ((float)att29_highline));\n atomicAdd(&(agg2[bucket]), ((float)att28_lowline));\n }\n }\n }\n loopVar += step;\n }\n\n}\n\n__global__ void krnl_aggregation7(\n agg_ht* aht7, float* agg1, float* agg2, int* nout_result, str_offs* oatt17_lshipmod_offset, char* iatt17_lshipmod_char, float* oatt1_highline, float* oatt2_lowlinec) {\n str_t att17_lshipmod;\n float att1_highline;\n float att2_lowlinec;\n unsigned warplane = (threadIdx.x % 32);\n unsigned prefixlanes = (0xffffffff >> (32 - warplane));\n\n int tid_aggregation7 = 0;\n unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x);\n unsigned step = (blockDim.x * gridDim.x);\n unsigned flushPipeline = 0;\n int active = 0;\n while(!(flushPipeline)) {\n tid_aggregation7 = loopVar;\n active = (loopVar < 14);\n \/\/ flush pipeline if no new elements\n flushPipeline = !(__ballot_sync(ALL_LANES,active));\n if(active) {\n }\n \/\/ -------- scan aggregation ht (opId: 7) --------\n if(active) {\n active &= ((aht7[tid_aggregation7].lock.lock == OnceLock::LOCK_DONE));\n }\n if(active) {\n apayl7 payl = aht7[tid_aggregation7].payload;\n att17_lshipmod = payl.att17_lshipmod;\n }\n if(active) {\n att1_highline = agg1[tid_aggregation7];\n att2_lowlinec = agg2[tid_aggregation7];\n }\n \/\/ -------- materialize (opId: 8) --------\n int wp;\n int writeMask;\n int numProj;\n writeMask = __ballot_sync(ALL_LANES,active);\n numProj = __popc(writeMask);\n if((warplane == 0)) {\n wp = atomicAdd(nout_result, numProj);\n }\n wp = __shfl_sync(ALL_LANES,wp,0);\n wp = (wp + __popc((writeMask & prefixlanes)));\n if(active) {\n oatt17_lshipmod_offset[wp] = toStringOffset ( iatt17_lshipmod_char, att17_lshipmod);\n oatt1_highline[wp] = att1_highline;\n oatt2_lowlinec[wp] = att2_lowlinec;\n }\n loopVar += step;\n }\n\n}\n\nint main() {\n int* iatt3_lorderke;\n iatt3_lorderke = ( int*) map_memory_file ( \"mmdb\/lineitem_l_orderkey\" );\n unsigned* iatt13_lshipdat;\n iatt13_lshipdat = ( unsigned*) map_memory_file ( \"mmdb\/lineitem_l_shipdate\" );\n unsigned* iatt14_lcommitd;\n iatt14_lcommitd = ( unsigned*) map_memory_file ( \"mmdb\/lineitem_l_commitdate\" );\n unsigned* iatt15_lreceipt;\n iatt15_lreceipt = ( unsigned*) map_memory_file ( \"mmdb\/lineitem_l_receiptdate\" );\n size_t* iatt17_lshipmod_offset;\n iatt17_lshipmod_offset = ( size_t*) map_memory_file ( \"mmdb\/lineitem_l_shipmode_offset\" );\n char* iatt17_lshipmod_char;\n iatt17_lshipmod_char = ( char*) map_memory_file ( \"mmdb\/lineitem_l_shipmode_char\" );\n int* iatt19_oorderke;\n iatt19_oorderke = ( int*) map_memory_file ( \"mmdb\/orders_o_orderkey\" );\n size_t* iatt24_oorderpr_offset;\n iatt24_oorderpr_offset = ( size_t*) map_memory_file ( \"mmdb\/orders_o_orderpriority_offset\" );\n char* iatt24_oorderpr_char;\n iatt24_oorderpr_char = ( char*) map_memory_file ( \"mmdb\/orders_o_orderpriority_char\" );\n\n int nout_result;\n std::vector < str_offs > oatt17_lshipmod_offset(7);\n std::vector < float > oatt1_highline(7);\n std::vector < float > oatt2_lowlinec(7);\n\n \/\/ wake up gpu\n cudaDeviceSynchronize();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in wake up gpu! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"wake up gpu\")\n }\n }\n\n int* d_iatt3_lorderke;\n cudaMalloc((void**) &d_iatt3_lorderke, 6001215* sizeof(int) );\n unsigned* d_iatt13_lshipdat;\n cudaMalloc((void**) &d_iatt13_lshipdat, 6001215* sizeof(unsigned) );\n unsigned* d_iatt14_lcommitd;\n cudaMalloc((void**) &d_iatt14_lcommitd, 6001215* sizeof(unsigned) );\n unsigned* d_iatt15_lreceipt;\n cudaMalloc((void**) &d_iatt15_lreceipt, 6001215* sizeof(unsigned) );\n size_t* d_iatt17_lshipmod_offset;\n cudaMalloc((void**) &d_iatt17_lshipmod_offset, (6001215 + 1)* sizeof(size_t) );\n char* d_iatt17_lshipmod_char;\n cudaMalloc((void**) &d_iatt17_lshipmod_char, 25717043* sizeof(char) );\n int* d_iatt19_oorderke;\n cudaMalloc((void**) &d_iatt19_oorderke, 1500000* sizeof(int) );\n size_t* d_iatt24_oorderpr_offset;\n cudaMalloc((void**) &d_iatt24_oorderpr_offset, (1500000 + 1)* sizeof(size_t) );\n char* d_iatt24_oorderpr_char;\n cudaMalloc((void**) &d_iatt24_oorderpr_char, 12599838* sizeof(char) );\n int* d_nout_result;\n cudaMalloc((void**) &d_nout_result, 1* sizeof(int) );\n str_offs* d_oatt17_lshipmod_offset;\n cudaMalloc((void**) &d_oatt17_lshipmod_offset, 7* sizeof(str_offs) );\n float* d_oatt1_highline;\n cudaMalloc((void**) &d_oatt1_highline, 7* sizeof(float) );\n float* d_oatt2_lowlinec;\n cudaMalloc((void**) &d_oatt2_lowlinec, 7* sizeof(float) );\n cudaDeviceSynchronize();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in cuda malloc! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"cuda malloc\")\n }\n }\n\n\n \/\/ show memory usage of GPU\n { size_t free_byte ;\n size_t total_byte ;\n cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;\n if ( cudaSuccess != cuda_status ) {\n printf(\"Error: cudaMemGetInfo fails, %s \\n\", cudaGetErrorString(cuda_status) );\n exit(1);\n }\n double free_db = (double)free_byte ;\n double total_db = (double)total_byte ;\n double used_db = total_db - free_db ;\n fprintf(stderr, \"Memory %.1f \/ %.1f GB\\n\",\n used_db\/(1024*1024*1024), total_db\/(1024*1024*1024) );\n fflush(stdout);\n }\n\n multi_ht* d_jht4;\n cudaMalloc((void**) &d_jht4, 840170* sizeof(multi_ht) );\n jpayl4* d_jht4_payload;\n cudaMalloc((void**) &d_jht4_payload, 840170* sizeof(jpayl4) );\n {\n int gridsize=920;\n int blocksize=128;\n initMultiHT<<>>(d_jht4, 840170);\n }\n int* d_offs4;\n cudaMalloc((void**) &d_offs4, 1* sizeof(int) );\n {\n int gridsize=920;\n int blocksize=128;\n initArray<<>>(d_offs4, 0, 1);\n }\n agg_ht* d_aht7;\n cudaMalloc((void**) &d_aht7, 14* sizeof(agg_ht) );\n {\n int gridsize=920;\n int blocksize=128;\n initAggHT<<>>(d_aht7, 14);\n }\n float* d_agg1;\n cudaMalloc((void**) &d_agg1, 14* sizeof(float) );\n {\n int gridsize=920;\n int blocksize=128;\n initArray<<>>(d_agg1, 0.0f, 14);\n }\n float* d_agg2;\n cudaMalloc((void**) &d_agg2, 14* sizeof(float) );\n {\n int gridsize=920;\n int blocksize=128;\n initArray<<>>(d_agg2, 0.0f, 14);\n }\n {\n int gridsize=920;\n int blocksize=128;\n initArray<<>>(d_nout_result, 0, 1);\n }\n cudaDeviceSynchronize();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in cuda mallocHT! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"cuda mallocHT\")\n }\n }\n\n\n \/\/ show memory usage of GPU\n { size_t free_byte ;\n size_t total_byte ;\n cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;\n if ( cudaSuccess != cuda_status ) {\n printf(\"Error: cudaMemGetInfo fails, %s \\n\", cudaGetErrorString(cuda_status) );\n exit(1);\n }\n double free_db = (double)free_byte ;\n double total_db = (double)total_byte ;\n double used_db = total_db - free_db ;\n fprintf(stderr, \"Memory %.1f \/ %.1f GB\\n\",\n used_db\/(1024*1024*1024), total_db\/(1024*1024*1024) );\n fflush(stdout);\n }\n\n cudaMemcpy( d_iatt3_lorderke, iatt3_lorderke, 6001215 * sizeof(int), cudaMemcpyHostToDevice);\n cudaMemcpy( d_iatt13_lshipdat, iatt13_lshipdat, 6001215 * sizeof(unsigned), cudaMemcpyHostToDevice);\n cudaMemcpy( d_iatt14_lcommitd, iatt14_lcommitd, 6001215 * sizeof(unsigned), cudaMemcpyHostToDevice);\n cudaMemcpy( d_iatt15_lreceipt, iatt15_lreceipt, 6001215 * sizeof(unsigned), cudaMemcpyHostToDevice);\n cudaMemcpy( d_iatt17_lshipmod_offset, iatt17_lshipmod_offset, (6001215 + 1) * sizeof(size_t), cudaMemcpyHostToDevice);\n cudaMemcpy( d_iatt17_lshipmod_char, iatt17_lshipmod_char, 25717043 * sizeof(char), cudaMemcpyHostToDevice);\n cudaMemcpy( d_iatt19_oorderke, iatt19_oorderke, 1500000 * sizeof(int), cudaMemcpyHostToDevice);\n cudaMemcpy( d_iatt24_oorderpr_offset, iatt24_oorderpr_offset, (1500000 + 1) * sizeof(size_t), cudaMemcpyHostToDevice);\n cudaMemcpy( d_iatt24_oorderpr_char, iatt24_oorderpr_char, 12599838 * sizeof(char), cudaMemcpyHostToDevice);\n cudaDeviceSynchronize();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in cuda memcpy in! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"cuda memcpy in\")\n }\n }\n\n std::clock_t start_totalKernelTime105 = std::clock();\n std::clock_t start_krnl_lineitem1106 = std::clock();\n {\n int gridsize=920;\n int blocksize=128;\n krnl_lineitem1<<>>(d_iatt3_lorderke, d_iatt13_lshipdat, d_iatt14_lcommitd, d_iatt15_lreceipt, d_iatt17_lshipmod_offset, d_iatt17_lshipmod_char, d_jht4, d_jht4_payload);\n }\n cudaDeviceSynchronize();\n std::clock_t stop_krnl_lineitem1106 = std::clock();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in krnl_lineitem1! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"krnl_lineitem1\")\n }\n }\n\n std::clock_t start_scanMultiHT107 = std::clock();\n {\n int gridsize=920;\n int blocksize=128;\n scanMultiHT<<>>(d_jht4, 840170, d_offs4);\n }\n cudaDeviceSynchronize();\n std::clock_t stop_scanMultiHT107 = std::clock();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in scanMultiHT! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"scanMultiHT\")\n }\n }\n\n std::clock_t start_krnl_lineitem1_ins108 = std::clock();\n {\n int gridsize=920;\n int blocksize=128;\n krnl_lineitem1_ins<<>>(d_iatt3_lorderke, d_iatt13_lshipdat, d_iatt14_lcommitd, d_iatt15_lreceipt, d_iatt17_lshipmod_offset, d_iatt17_lshipmod_char, d_jht4, d_jht4_payload, d_offs4);\n }\n cudaDeviceSynchronize();\n std::clock_t stop_krnl_lineitem1_ins108 = std::clock();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in krnl_lineitem1_ins! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"krnl_lineitem1_ins\")\n }\n }\n\n std::clock_t start_krnl_orders3109 = std::clock();\n {\n int gridsize=920;\n int blocksize=128;\n krnl_orders3<<>>(d_iatt19_oorderke, d_iatt24_oorderpr_offset, d_iatt24_oorderpr_char, d_jht4, d_jht4_payload, d_aht7, d_agg1, d_agg2);\n }\n cudaDeviceSynchronize();\n std::clock_t stop_krnl_orders3109 = std::clock();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in krnl_orders3! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"krnl_orders3\")\n }\n }\n\n std::clock_t start_krnl_aggregation7110 = std::clock();\n {\n int gridsize=920;\n int blocksize=128;\n krnl_aggregation7<<>>(d_aht7, d_agg1, d_agg2, d_nout_result, d_oatt17_lshipmod_offset, d_iatt17_lshipmod_char, d_oatt1_highline, d_oatt2_lowlinec);\n }\n cudaDeviceSynchronize();\n std::clock_t stop_krnl_aggregation7110 = std::clock();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in krnl_aggregation7! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"krnl_aggregation7\")\n }\n }\n\n std::clock_t stop_totalKernelTime105 = std::clock();\n cudaMemcpy( &nout_result, d_nout_result, 1 * sizeof(int), cudaMemcpyDeviceToHost);\n cudaMemcpy( oatt17_lshipmod_offset.data(), d_oatt17_lshipmod_offset, 7 * sizeof(str_offs), cudaMemcpyDeviceToHost);\n cudaMemcpy( oatt1_highline.data(), d_oatt1_highline, 7 * sizeof(float), cudaMemcpyDeviceToHost);\n cudaMemcpy( oatt2_lowlinec.data(), d_oatt2_lowlinec, 7 * sizeof(float), cudaMemcpyDeviceToHost);\n cudaDeviceSynchronize();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in cuda memcpy out! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"cuda memcpy out\")\n }\n }\n\n cudaFree( d_iatt3_lorderke);\n cudaFree( d_iatt13_lshipdat);\n cudaFree( d_iatt14_lcommitd);\n cudaFree( d_iatt15_lreceipt);\n cudaFree( d_iatt17_lshipmod_offset);\n cudaFree( d_iatt17_lshipmod_char);\n cudaFree( d_jht4);\n cudaFree( d_jht4_payload);\n cudaFree( d_offs4);\n cudaFree( d_iatt19_oorderke);\n cudaFree( d_iatt24_oorderpr_offset);\n cudaFree( d_iatt24_oorderpr_char);\n cudaFree( d_aht7);\n cudaFree( d_agg1);\n cudaFree( d_agg2);\n cudaFree( d_nout_result);\n cudaFree( d_oatt17_lshipmod_offset);\n cudaFree( d_oatt1_highline);\n cudaFree( d_oatt2_lowlinec);\n cudaDeviceSynchronize();\n {\n cudaError err = cudaGetLastError();\n if(err != cudaSuccess) {\n std::cerr << \"Cuda Error in cuda free! \" << cudaGetErrorString( err ) << std::endl;\n ERROR(\"cuda free\")\n }\n }\n\n std::clock_t start_finish111 = std::clock();\n printf(\"\\nResult: %i tuples\\n\", nout_result);\n if((nout_result > 7)) {\n ERROR(\"Index out of range. Output size larger than allocated with expected result number.\")\n }\n for ( int pv = 0; ((pv < 10) && (pv < nout_result)); pv += 1) {\n printf(\"l_shipmode: \");\n stringPrint ( iatt17_lshipmod_char, oatt17_lshipmod_offset[pv]);\n printf(\" \");\n printf(\"high_line_count: \");\n printf(\"%15.2f\", oatt1_highline[pv]);\n printf(\" \");\n printf(\"low_line_count: \");\n printf(\"%15.2f\", oatt2_lowlinec[pv]);\n printf(\" \");\n printf(\"\\n\");\n }\n if((nout_result > 10)) {\n printf(\"[...]\\n\");\n }\n printf(\"\\n\");\n std::clock_t stop_finish111 = std::clock();\n\n printf(\"\\n\");\n printf ( \"%32s: %6.1f ms\\n\", \"krnl_lineitem1\", (stop_krnl_lineitem1106 - start_krnl_lineitem1106) \/ (double) (CLOCKS_PER_SEC \/ 1000) );\n printf ( \"%32s: %6.1f ms\\n\", \"scanMultiHT\", (stop_scanMultiHT107 - start_scanMultiHT107) \/ (double) (CLOCKS_PER_SEC \/ 1000) );\n printf ( \"%32s: %6.1f ms\\n\", \"krnl_lineitem1_ins\", (stop_krnl_lineitem1_ins108 - start_krnl_lineitem1_ins108) \/ (double) (CLOCKS_PER_SEC \/ 1000) );\n printf ( \"%32s: %6.1f ms\\n\", \"krnl_orders3\", (stop_krnl_orders3109 - start_krnl_orders3109) \/ (double) (CLOCKS_PER_SEC \/ 1000) );\n printf ( \"%32s: %6.1f ms\\n\", \"krnl_aggregation7\", (stop_krnl_aggregation7110 - start_krnl_aggregation7110) \/ (double) (CLOCKS_PER_SEC \/ 1000) );\n printf ( \"%32s: %6.1f ms\\n\", \"finish\", (stop_finish111 - start_finish111) \/ (double) (CLOCKS_PER_SEC \/ 1000) );\n printf ( \"%32s: %6.1f ms\\n\", \"totalKernelTime\", (stop_totalKernelTime105 - start_totalKernelTime105) \/ (double) (CLOCKS_PER_SEC \/ 1000) );\n printf(\"<\/timing>\\n\");\n}\n","avg_line_length":40.5412130638,"max_line_length":241,"alphanum_fraction":0.6010050637} {"size":522,"ext":"cuh","lang":"Cuda","max_stars_count":null,"content":"#ifndef TRANSPO_CUH\n#define TRANSPO_CUH\n\n#include \n#include \n#include \"cuda_runtime.h\"\n#include \"cuda.h\"\n#include \"omp.h\"\n#include \"device_launch_parameters.h\"\n#include \"parameters.cuh\"\n\n\n__global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height);\n__global__ void gpu_transpo_kernel_shared(u_char *Source, u_char *Resultat, unsigned height, unsigned width);\nvoid cpu_transpo(u_char **Source, u_char **Resultat, unsigned width, unsigned height);\n\n\n\n#endif","avg_line_length":27.4736842105,"max_line_length":109,"alphanum_fraction":0.7911877395} {"size":5162,"ext":"cu","lang":"Cuda","max_stars_count":2206.0,"content":"\/* ******************************************************************************\n *\n *\n * This program and the accompanying materials are made available under the\n * terms of the Apache License, Version 2.0 which is available at\n * https:\/\/www.apache.org\/licenses\/LICENSE-2.0.\n *\n * See the NOTICE file distributed with this work for additional\n * information regarding copyright ownership.\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n * License for the specific language governing permissions and limitations\n * under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ******************************************************************************\/\n\n\/\/\n\/\/ @author raver119@gmail.com\n\/\/\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"..\/ConstantTadHelper.h\"\n\nnamespace sd {\nConstantTadHelper::ConstantTadHelper() {\n auto numDevices = AffinityManager::numberOfDevices();\n\n for (int e = 0; e < numDevices; e++) {\n SD_MAP_IMPL pack;\n _cache.emplace_back(pack);\n }\n}\n\nConstantTadHelper &ConstantTadHelper::getInstance() {\n static ConstantTadHelper instance;\n return instance;\n}\n\nTadPack ConstantTadHelper::tadForDimensions(const sd::LongType *originalShape, int dimension,\n const bool keepUnitiesInShape) {\n return tadForDimensions(originalShape, &dimension, 1, keepUnitiesInShape);\n}\n\nTadPack ConstantTadHelper::tadForDimensions(const sd::LongType *originalShape, const std::vector &dimensions,\n const bool keepUnitiesInShape) {\n return tadForDimensions(originalShape, const_cast(dimensions.data()), dimensions.size(), keepUnitiesInShape);\n}\n\nTadPack ConstantTadHelper::tadForDimensions(const sd::LongType *originalShape, int *dimensions, int dimLength,\n const bool keepUnitiesInShape) {\n TadDescriptor tadDescriptor(originalShape, dimensions, dimLength, keepUnitiesInShape);\n return tadForDimensions(tadDescriptor);\n}\n\nTadPack ConstantTadHelper::tadForDimensions(ShapeDescriptor &descriptor, std::vector &dimensions,\n const bool keepUnitiesInShape) {\n TadDescriptor tadDescriptor(descriptor, dimensions, keepUnitiesInShape);\n return tadForDimensions(tadDescriptor);\n}\n\nTadPack ConstantTadHelper::tadForDimensions(TadDescriptor &descriptor) {\n const int deviceId = AffinityManager::currentDeviceId();\n\n std::lock_guard lock(_mutex);\n\n if (_cache[deviceId].count(descriptor) == 0) {\n const auto shapeInfo = descriptor.originalShape().toShapeInfo();\n const int rank = shape::rank(shapeInfo);\n const std::vector dimsToExclude = ShapeUtils::evalDimsToExclude(rank, descriptor.axis());\n const sd::LongType numOfSubArrs = ShapeUtils::getNumOfSubArrs(shapeInfo, dimsToExclude);\n const int subArrRank =\n (rank == dimsToExclude.size() || descriptor.areUnitiesinShape()) ? rank : rank - dimsToExclude.size();\n\n auto sPtr = std::make_shared(new sd::LongType[shape::shapeInfoLength(subArrRank)],\n std::make_shared());\n auto oPtr =\n std::make_shared(new sd::LongType[numOfSubArrs], std::make_shared());\n\n if (numOfSubArrs > 0)\n shape::calcSubArrsShapeInfoAndOffsets(shapeInfo, numOfSubArrs, dimsToExclude.size(), dimsToExclude.data(),\n sPtr->pointerAsT(), oPtr->pointerAsT(),\n descriptor.areUnitiesinShape());\n\n sd::Pointer soPtr;\n auto res = cudaMalloc(reinterpret_cast(&soPtr), numOfSubArrs * sizeof(sd::LongType));\n if (res != 0) throw cuda_exception::build(\"Memory allocation for tadOffsets failed\", res);\n\n res = cudaMemcpy(soPtr, oPtr->pointer(), numOfSubArrs * sizeof(sd::LongType), cudaMemcpyHostToDevice);\n if (res != 0) throw cuda_exception::build(\"tadOffsets copy failed\", res);\n\n \/\/ TODO: add deallocator here?\n auto ssPtr = std::make_shared(\n ConstantHelper::getInstance().replicatePointer(sPtr->pointer(), shape::shapeInfoByteLength(subArrRank)));\n\n ConstantShapeBuffer shapesBuffer(sPtr, ssPtr);\n ConstantOffsetsBuffer offsetsBuffer(\n oPtr, std::make_shared(soPtr, std::make_shared()));\n\n TadPack t(shapesBuffer, offsetsBuffer, numOfSubArrs);\n _cache[deviceId][descriptor] = t;\n\n TadPack r = _cache[deviceId][descriptor];\n\n delete[] shapeInfo;\n\n return r;\n } else {\n TadPack r = _cache[deviceId][descriptor];\n\n return r;\n }\n}\n} \/\/ namespace sd\n","avg_line_length":41.9674796748,"max_line_length":120,"alphanum_fraction":0.6848120883} {"size":25401,"ext":"cuh","lang":"Cuda","max_stars_count":73.0,"content":"\/*\n * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#ifndef CONCURRENT_UNORDERED_MAP_CUH\n#define CONCURRENT_UNORDERED_MAP_CUH\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \"managed_allocator.cuh\"\n#include \"managed.cuh\"\n#include \"hash_functions.cuh\"\n#include \"..\/groupby\/hash\/aggregation_operations.cuh\"\n\n\/\/ TODO: replace this with CUDA_TRY and propagate the error\n#ifndef CUDA_RT_CALL\n#define CUDA_RT_CALL( call ) \t\t\t\t\t\t\t\t\t \\\n{ \\\n cudaError_t cudaStatus = call; \\\n if ( cudaSuccess != cudaStatus ) { \\\n fprintf(stderr, \"ERROR: CUDA RT call \\\"%s\\\" in line %d of file %s failed with %s (%d).\\n\", \\\n #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \\\n exit(1);\t\t\t\t\t\t\t\t\t\t \\\n }\t\t\t\t\t\t\t\t\t\t\t\t \\\n}\n#endif\n\n\/\/ TODO: can we do this more efficiently?\n__inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare, int8_t val)\n{\n int32_t *base_address = (int32_t*)((char*)address - ((size_t)address & 3));\n int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8);\n int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8);\n return (int8_t)atomicCAS(base_address, int_comp, int_val);\n}\n\n\/\/ TODO: can we do this more efficiently?\n__inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare, int16_t val)\n{\n int32_t *base_address = (int32_t*)((char*)address - ((size_t)address & 2));\n int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8);\n int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8);\n return (int16_t)atomicCAS(base_address, int_comp, int_val);\n}\n\n__inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare, int64_t val)\n{\n return (int64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);\n}\n\n__inline__ __device__ uint64_t atomicCAS(uint64_t* address, uint64_t compare, uint64_t val)\n{\n return (uint64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);\n}\n\n__inline__ __device__ long long int atomicCAS(long long int* address, long long int compare, long long int val)\n{\n return (long long int)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);\n}\n\n__inline__ __device__ double atomicCAS(double* address, double compare, double val)\n{\n return __longlong_as_double(atomicCAS((unsigned long long int*)address, __double_as_longlong(compare), __double_as_longlong(val)));\n}\n\n__inline__ __device__ float atomicCAS(float* address, float compare, float val)\n{\n return __int_as_float(atomicCAS((int*)address, __float_as_int(compare), __float_as_int(val)));\n}\n\n__inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val)\n{\n return (int64_t) atomicAdd((unsigned long long*)address, (unsigned long long)val);\n}\n\n__inline__ __device__ uint64_t atomicAdd(uint64_t* address, uint64_t val)\n{\n return (uint64_t) atomicAdd((unsigned long long*)address, (unsigned long long)val);\n}\n\ntemplate\n__forceinline__\n__device__ pair_type load_pair_vectorized( const pair_type* __restrict__ const ptr )\n{\n if ( sizeof(uint4) == sizeof(pair_type) ) {\n union pair_type2vec_type\n {\n uint4 vec_val;\n pair_type pair_val;\n };\n pair_type2vec_type converter = {0,0,0,0};\n converter.vec_val = *reinterpret_cast(ptr);\n return converter.pair_val;\n } else if ( sizeof(uint2) == sizeof(pair_type) ) {\n union pair_type2vec_type\n {\n uint2 vec_val;\n pair_type pair_val;\n };\n pair_type2vec_type converter = {0,0};\n converter.vec_val = *reinterpret_cast(ptr);\n return converter.pair_val;\n } else if ( sizeof(int) == sizeof(pair_type) ) {\n union pair_type2vec_type\n {\n int vec_val;\n pair_type pair_val;\n };\n pair_type2vec_type converter = {0};\n converter.vec_val = *reinterpret_cast(ptr);\n return converter.pair_val;\n } else if ( sizeof(short) == sizeof(pair_type) ) {\n union pair_type2vec_type\n {\n short vec_val;\n pair_type pair_val;\n };\n pair_type2vec_type converter = {0};\n converter.vec_val = *reinterpret_cast(ptr);\n return converter.pair_val;\n } else {\n return *ptr;\n }\n}\n\ntemplate\n__forceinline__\n__device__ void store_pair_vectorized( pair_type* __restrict__ const ptr, const pair_type val )\n{\n if ( sizeof(uint4) == sizeof(pair_type) ) {\n union pair_type2vec_type\n {\n uint4 vec_val;\n pair_type pair_val;\n };\n pair_type2vec_type converter = {0,0,0,0};\n converter.pair_val = val;\n *reinterpret_cast(ptr) = converter.vec_val;\n } else if ( sizeof(uint2) == sizeof(pair_type) ) {\n union pair_type2vec_type\n {\n uint2 vec_val;\n pair_type pair_val;\n };\n pair_type2vec_type converter = {0,0};\n converter.pair_val = val;\n *reinterpret_cast(ptr) = converter.vec_val;\n } else if ( sizeof(int) == sizeof(pair_type) ) {\n union pair_type2vec_type\n {\n int vec_val;\n pair_type pair_val;\n };\n pair_type2vec_type converter = {0};\n converter.pair_val = val;\n *reinterpret_cast(ptr) = converter.vec_val;\n } else if ( sizeof(short) == sizeof(pair_type) ) {\n union pair_type2vec_type\n {\n short vec_val;\n pair_type pair_val;\n };\n pair_type2vec_type converter = {0};\n converter.pair_val = val;\n *reinterpret_cast(ptr) = converter.vec_val;\n } else {\n *ptr = val;\n }\n}\n\ntemplate\n__global__ void init_hashtbl(\n value_type* __restrict__ const hashtbl_values,\n const size_type n,\n const key_type key_val,\n const elem_type elem_val)\n{\n const size_type idx = blockIdx.x * blockDim.x + threadIdx.x;\n if ( idx < n )\n {\n store_pair_vectorized( hashtbl_values + idx, thrust::make_pair( key_val, elem_val ) );\n }\n}\n\ntemplate \nstruct equal_to\n{\n using result_type = bool;\n using first_argument_type = T;\n using second_argument_type = T;\n __forceinline__\n __host__ __device__ constexpr bool operator()(const first_argument_type &lhs, const second_argument_type &rhs) const \n {\n return lhs == rhs;\n }\n};\n\ntemplate\nclass cycle_iterator_adapter {\npublic:\n using value_type = typename std::iterator_traits::value_type; \n using difference_type = typename std::iterator_traits::difference_type;\n using pointer = typename std::iterator_traits::pointer;\n using reference = typename std::iterator_traits::reference;\n using iterator_type = Iterator;\n \n cycle_iterator_adapter() = delete;\n \n __host__ __device__ explicit cycle_iterator_adapter( const iterator_type& begin, const iterator_type& end, const iterator_type& current )\n : m_begin( begin ), m_end( end ), m_current( current )\n {}\n \n __host__ __device__ cycle_iterator_adapter& operator++()\n {\n if ( m_end == (m_current+1) )\n m_current = m_begin;\n else\n ++m_current;\n return *this;\n }\n \n __host__ __device__ const cycle_iterator_adapter& operator++() const\n {\n if ( m_end == (m_current+1) )\n m_current = m_begin;\n else\n ++m_current;\n return *this;\n }\n \n __host__ __device__ cycle_iterator_adapter& operator++(int)\n {\n cycle_iterator_adapter old( m_begin, m_end, m_current);\n if ( m_end == (m_current+1) )\n m_current = m_begin;\n else\n ++m_current;\n return old;\n }\n \n __host__ __device__ const cycle_iterator_adapter& operator++(int) const\n {\n cycle_iterator_adapter old( m_begin, m_end, m_current);\n if ( m_end == (m_current+1) )\n m_current = m_begin;\n else\n ++m_current;\n return old;\n }\n \n __host__ __device__ bool equal(const cycle_iterator_adapter& other) const\n {\n return m_current == other.m_current && m_begin == other.m_begin && m_end == other.m_end;\n }\n \n __host__ __device__ reference& operator*()\n {\n return *m_current;\n }\n \n __host__ __device__ const reference& operator*() const\n {\n return *m_current;\n }\n\n __host__ __device__ const pointer operator->() const\n {\n return m_current.operator->();\n }\n \n __host__ __device__ pointer operator->()\n {\n return m_current;\n }\n \nprivate:\n iterator_type m_current;\n iterator_type m_begin;\n iterator_type m_end;\n};\n\ntemplate \n__host__ __device__ bool operator==(const cycle_iterator_adapter& lhs, const cycle_iterator_adapter& rhs)\n{\n return lhs.equal(rhs);\n}\n\ntemplate \n__host__ __device__ bool operator!=(const cycle_iterator_adapter& lhs, const cycle_iterator_adapter& rhs)\n{\n return !lhs.equal(rhs);\n}\n\n\/**\n * Does support concurrent insert, but not concurrent insert and probping.\n *\n * TODO:\n * - add constructor that takes pointer to hash_table to avoid allocations\n * - extend interface to accept streams\n *\/\ntemplate ,\n typename Equality = equal_to,\n typename Allocator = managed_allocator >,\n bool count_collisions = false>\nclass concurrent_unordered_map : public managed\n{\n\npublic:\n using size_type = size_t;\n using hasher = Hasher;\n using key_equal = Equality;\n using allocator_type = Allocator;\n using key_type = Key;\n using value_type = thrust::pair;\n using mapped_type = Element;\n using iterator = cycle_iterator_adapter;\n using const_iterator = const cycle_iterator_adapter;\n\nprivate:\n union pair2longlong\n {\n unsigned long long int longlong;\n value_type pair;\n };\n \npublic:\n\n explicit concurrent_unordered_map(size_type n,\n const mapped_type unused_element,\n const Hasher& hf = hasher(),\n const Equality& eql = key_equal(),\n const allocator_type& a = allocator_type())\n : m_hf(hf), m_equal(eql), m_allocator(a), m_hashtbl_size(n), m_hashtbl_capacity(n), m_collisions(0), m_unused_element(unused_element)\n {\n m_hashtbl_values = m_allocator.allocate( m_hashtbl_capacity );\n constexpr int block_size = 128;\n {\n cudaPointerAttributes hashtbl_values_ptr_attributes;\n cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values );\n \n if ( cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged ) {\n int dev_id = 0;\n CUDA_RT_CALL( cudaGetDevice( &dev_id ) );\n CUDA_RT_CALL( cudaMemPrefetchAsync(m_hashtbl_values, m_hashtbl_size*sizeof(value_type), dev_id, 0) );\n }\n }\n \n init_hashtbl<<<((m_hashtbl_size-1)\/block_size)+1,block_size>>>( m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element );\n CUDA_RT_CALL( cudaGetLastError() );\n CUDA_RT_CALL( cudaStreamSynchronize(0) );\n }\n \n ~concurrent_unordered_map()\n {\n m_allocator.deallocate( m_hashtbl_values, m_hashtbl_capacity );\n }\n \n __host__ __device__ iterator begin()\n {\n return iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,m_hashtbl_values );\n }\n __host__ __device__ const_iterator begin() const\n {\n return const_iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,m_hashtbl_values );\n }\n __host__ __device__ iterator end()\n {\n return iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,m_hashtbl_values+m_hashtbl_size );\n }\n __host__ __device__ const_iterator end() const\n {\n return const_iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,m_hashtbl_values+m_hashtbl_size );\n }\n __host__ __device__ size_type size() const\n {\n return m_hashtbl_size;\n }\n __host__ __device__ value_type* data() const\n {\n return m_hashtbl_values;\n }\n \n __forceinline__\n static constexpr __host__ __device__ key_type get_unused_key()\n {\n return unused_key;\n }\n\n \/\/ Generic update of a hash table value for any aggregator\n template \n __forceinline__ __device__\n void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, aggregation_type op)\n {\n const mapped_type insert_value = insert_pair.second;\n\n mapped_type old_value = existing_value;\n\n mapped_type expected{old_value};\n\n \/\/ Attempt to perform the aggregation with existing_value and\n \/\/ store the result atomically\n do \n {\n expected = old_value;\n\n const mapped_type new_value = op(insert_value, old_value);\n\n old_value = atomicCAS(&existing_value, expected, new_value);\n }\n \/\/ Guard against another thread's update to existing_value\n while( expected != old_value );\n }\n\n \/\/ TODO Overload atomicAdd for 1 byte and 2 byte types, until then, overload specifically for the types\n \/\/ where atomicAdd already has an overload. Otherwise the generic update_existing_value will be used.\n \/\/ Specialization for COUNT aggregator\n __forceinline__ __host__ __device__\n void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op op)\n {\n atomicAdd(&existing_value, static_cast(1));\n }\n \/\/ Specialization for COUNT aggregator\n __forceinline__ __host__ __device__\n void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op op)\n {\n atomicAdd(&existing_value, static_cast(1));\n }\n \/\/ Specialization for COUNT aggregator\n __forceinline__ __host__ __device__\n void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op op)\n {\n atomicAdd(&existing_value, static_cast(1));\n }\n \/\/ Specialization for COUNT aggregator\n __forceinline__ __host__ __device__\n void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op op)\n {\n atomicAdd(&existing_value, static_cast(1));\n }\n\n \/* --------------------------------------------------------------------------*\/\n \/** \n * @Synopsis Inserts a new (key, value) pair. If the key already exists in the map\n an aggregation operation is performed with the new value and existing value.\n E.g., if the aggregation operation is 'max', then the maximum is computed\n between the new value and existing value and the result is stored in the map.\n * \n * @Param[in] x The new (key, value) pair to insert\n * @Param[in] op The aggregation operation to perform\n * @Param[in] keys_equal An optional functor for comparing two keys \n * @Param[in] precomputed_hash Indicates if a precomputed hash value is being passed in to use\n * to determine the write location of the new key\n * @Param[in] precomputed_hash_value The precomputed hash value\n * @tparam aggregation_type A functor for a binary operation that performs the aggregation\n * @tparam comparison_type A functor for comparing two keys\n * \n * @Returns An iterator to the newly inserted key,value pair\n *\/\n \/* ----------------------------------------------------------------------------*\/\n template\n __forceinline__\n __device__ iterator insert(const value_type& x, \n aggregation_type op,\n comparison_type keys_equal = key_equal(),\n bool precomputed_hash = false,\n hash_value_type precomputed_hash_value = 0)\n {\n const size_type hashtbl_size = m_hashtbl_size;\n value_type* hashtbl_values = m_hashtbl_values;\n\n hash_value_type hash_value{0};\n\n \/\/ If a precomputed hash value has been passed in, then use it to determine\n \/\/ the write location of the new key\n if(true == precomputed_hash)\n {\n hash_value = precomputed_hash_value;\n }\n \/\/ Otherwise, compute the hash value from the new key\n else\n {\n hash_value = m_hf(x.first);\n }\n\n size_type current_index = hash_value % hashtbl_size;\n value_type *current_hash_bucket = &(hashtbl_values[current_index]);\n\n const key_type insert_key = x.first;\n \n bool insert_success = false;\n \n while (false == insert_success) {\n\n key_type& existing_key = current_hash_bucket->first;\n mapped_type& existing_value = current_hash_bucket->second;\n\n \/\/ Try and set the existing_key for the current hash bucket to insert_key\n const key_type old_key = atomicCAS( &existing_key, unused_key, insert_key);\n\n \/\/ If old_key == unused_key, the current hash bucket was empty\n \/\/ and existing_key was updated to insert_key by the atomicCAS. \n \/\/ If old_key == insert_key, this key has already been inserted. \n \/\/ In either case, perform the atomic aggregation of existing_value and insert_value\n \/\/ Because the hash table is initialized with the identity value of the aggregation\n \/\/ operation, it is safe to perform the operation when the existing_value still \n \/\/ has its initial value\n \/\/ TODO: Use template specialization to make use of native atomic functions\n \/\/ TODO: How to handle data types less than 32 bits?\n if ( keys_equal( unused_key, old_key ) || keys_equal(insert_key, old_key) ) {\n\n update_existing_value(existing_value, x, op);\n\n insert_success = true;\n }\n\n current_index = (current_index+1)%hashtbl_size;\n current_hash_bucket = &(hashtbl_values[current_index]);\n }\n \n return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size, current_hash_bucket);\n }\n \n \/* This function is not currently implemented\n __forceinline__\n __host__ __device__ iterator insert(const value_type& x)\n {\n const size_type hashtbl_size = m_hashtbl_size;\n value_type* hashtbl_values = m_hashtbl_values;\n const size_type key_hash = m_hf( x.first );\n size_type hash_tbl_idx = key_hash%hashtbl_size;\n \n value_type* it = 0;\n \n while (0 == it) {\n value_type* tmp_it = hashtbl_values + hash_tbl_idx;\n#ifdef __CUDA_ARCH__\n if ( std::numeric_limits::is_integer && std::numeric_limits::is_integer &&\n sizeof(unsigned long long int) == sizeof(value_type) )\n {\n pair2longlong converter = {0ull};\n converter.pair = thrust::make_pair( unused_key, m_unused_element );\n const unsigned long long int unused = converter.longlong;\n converter.pair = x;\n const unsigned long long int value = converter.longlong;\n const unsigned long long int old_val = atomicCAS( reinterpret_cast(tmp_it), unused, value );\n if ( old_val == unused ) {\n it = tmp_it;\n }\n else if ( count_collisions )\n {\n atomicAdd( &m_collisions, 1 );\n }\n } else {\n const key_type old_key = atomicCAS( &(tmp_it->first), unused_key, x.first );\n if ( m_equal( unused_key, old_key ) ) {\n (m_hashtbl_values+hash_tbl_idx)->second = x.second;\n it = tmp_it;\n }\n else if ( count_collisions )\n {\n atomicAdd( &m_collisions, 1 );\n }\n }\n#else\n \n #pragma omp critical\n {\n if ( m_equal( unused_key, tmp_it->first ) ) {\n hashtbl_values[hash_tbl_idx] = thrust::make_pair( x.first, x.second );\n it = tmp_it;\n }\n }\n#endif\n hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size;\n }\n \n return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it);\n }\n *\/\n \n __forceinline__\n __host__ __device__ const_iterator find(const key_type& k ) const\n {\n size_type key_hash = m_hf( k );\n size_type hash_tbl_idx = key_hash%m_hashtbl_size;\n \n value_type* begin_ptr = 0;\n \n size_type counter = 0;\n while ( 0 == begin_ptr ) {\n value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx;\n const key_type tmp_val = tmp_ptr->first;\n if ( m_equal( k, tmp_val ) ) {\n begin_ptr = tmp_ptr;\n break;\n }\n if ( m_equal( unused_key , tmp_val ) || counter > m_hashtbl_size ) {\n begin_ptr = m_hashtbl_values + m_hashtbl_size;\n break;\n }\n hash_tbl_idx = (hash_tbl_idx+1)%m_hashtbl_size;\n ++counter;\n }\n \n return const_iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,begin_ptr);\n }\n \n gdf_error assign_async( const concurrent_unordered_map& other, cudaStream_t stream = 0 )\n {\n m_collisions = other.m_collisions;\n if ( other.m_hashtbl_size <= m_hashtbl_capacity ) {\n m_hashtbl_size = other.m_hashtbl_size;\n } else {\n m_allocator.deallocate( m_hashtbl_values, m_hashtbl_capacity );\n m_hashtbl_capacity = other.m_hashtbl_size;\n m_hashtbl_size = other.m_hashtbl_size;\n \n m_hashtbl_values = m_allocator.allocate( m_hashtbl_capacity );\n }\n CUDA_TRY( cudaMemcpyAsync( m_hashtbl_values, other.m_hashtbl_values, m_hashtbl_size*sizeof(value_type), cudaMemcpyDefault, stream ) );\n return GDF_SUCCESS;\n }\n \n void clear_async( cudaStream_t stream = 0 ) \n {\n constexpr int block_size = 128;\n init_hashtbl<<<((m_hashtbl_size-1)\/block_size)+1,block_size,0,stream>>>( m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element );\n if ( count_collisions )\n m_collisions = 0;\n }\n \n unsigned long long get_num_collisions() const\n {\n return m_collisions;\n }\n \n void print()\n {\n for (size_type i = 0; i < m_hashtbl_size; ++i) \n {\n std::cout<\n#include \n#include \n#include \n#include \n\n#include _CUB_INCLUDE(cub\/block\/block_radix_sort.cuh)\n\nnamespace NKernel {\n\n __global__ void WriteCompressedIndexImpl(TCFeature feature, const ui8* bins, ui32 docCount, ui32* cindex) {\n\n cindex += feature.Offset;\n ui32 i = blockIdx.x * blockDim.x + threadIdx.x;\n while (i < docCount) {\n const ui32 bin = (((ui32)bins[i]) & feature.Mask) << feature.Shift;\n cindex[i] = cindex[i] | bin;\n i += blockDim.x * gridDim.x;\n }\n }\n\n void WriteCompressedIndex(TCFeature feature,\n const ui8* bins, ui32 docCount,\n ui32* cindex,\n TCudaStream stream) {\n\n const ui32 blockSize = 256;\n const ui32 numBlocks = (docCount + blockSize - 1) \/ blockSize;\n\n WriteCompressedIndexImpl<< < numBlocks, blockSize, 0, stream >> > (feature, bins, docCount, cindex);\n }\n\n\n\n template \n __launch_bounds__(BLOCK_SIZE, 2)\n __global__ void BinarizeFloatFeatureImpl(TCFeature feature, const float* values, ui32 docCount,\n const float* borders,\n const ui32* gatherIndex, ui32* dst) {\n\n const ui32 i = (blockIdx.x * BLOCK_SIZE * DOCS_PER_THREAD + threadIdx.x);\n\n __shared__ float sharedBorders[256];\n sharedBorders[0] = borders[0];\n __syncthreads();\n const int bordersCount = static_cast(sharedBorders[0]);\n __syncthreads();\n dst += feature.Offset;\n\n if (threadIdx.x < bordersCount) {\n sharedBorders[threadIdx.x] = LdgWithFallback(borders, threadIdx.x + 1);\n }\n __syncthreads();\n\n ui32 index[DOCS_PER_THREAD];\n float featureValues[DOCS_PER_THREAD];\n\n #pragma unroll\n for (int j = 0; j < DOCS_PER_THREAD; ++j) {\n index[j] = 0;\n const int idx = i + j * BLOCK_SIZE;\n\n if (idx < docCount) {\n const ui32 readIdx = gatherIndex ? StreamLoad(gatherIndex + idx) : idx;\n featureValues[j] = StreamLoad(values + readIdx);\n }\n }\n\n #pragma unroll\n for (int border = 0; border < bordersCount; ++border)\n {\n const float borderValue = sharedBorders[border];\n #pragma unroll\n for (int j = 0; j < DOCS_PER_THREAD; ++j)\n {\n if (featureValues[j] > borderValue)\n {\n ++index[j];\n }\n }\n }\n\n\n #pragma unroll\n for (int j = 0; j < DOCS_PER_THREAD; ++j)\n {\n const int idx = i + j * BLOCK_SIZE;\n\n if (idx < docCount) {\n\n if (ATOMIC_UPDATE)\n {\n atomicOr(dst + idx, (index[j] & feature.Mask) << feature.Shift);\n } else {\n ui32 bin = dst[idx];\n bin |= (index[j] & feature.Mask) << feature.Shift;\n dst[idx] = bin;\n }\n }\n }\n }\n\n \/\/smth like bootstrap for quantiles estimation\n template \n __global__ void FastGpuBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) {\n\n const int valuesPerThread = 2;\n using BlockRadixSort = cub::BlockRadixSort;\n const int tid = threadIdx.x;\n float vals[valuesPerThread];\n\n if (tid == 0 && blockIdx.x == 0) {\n borders[0] = bordersCount;\n }\n\n ui64 seed = (blockIdx.x * 6364136223846793005 + 1442695040888963407) + (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF;\n\n for (int i = 0; i < valuesPerThread; ++i) {\n const int idx = static_cast(AdvanceSeed(&seed) % size);\n vals[i] = StreamLoad(values + idx);\n }\n\n {\n using TTempStorage = typename BlockRadixSort::TempStorage;\n __shared__ TTempStorage temp;\n BlockRadixSort(temp).Sort(vals);\n }\n\n float sum = 0;\n float weight = 0;\n for (int i = 0; i < valuesPerThread; ++i) {\n sum += vals[i];\n weight += 1.0f;\n }\n\n __shared__ float localBorders[BLOCK_SIZE];\n localBorders[tid] = sum \/ weight;\n __syncthreads();\n\n if (tid < bordersCount) {\n const ui32 offset = static_cast((tid + 1.0f) * BLOCK_SIZE \/ bordersCount - 1e-5f);\n atomicAdd(borders + tid + 1, (localBorders[offset]) * 0.9999 \/ gridDim.x);\n }\n }\n\n __global__ void SortBordersImpl(float* borders, ui32 bordersCount)\n {\n\n using BlockRadixSort = cub::BlockRadixSort;\n ui32 tid = threadIdx.x;\n float val[1];\n val[0] = tid < bordersCount ? borders[tid] : PositiveInfty();\n using TTempStorage = typename BlockRadixSort::TempStorage;\n __shared__ TTempStorage temp;\n BlockRadixSort(temp).Sort(val);\n if (tid < bordersCount) {\n borders[tid] = val[0];\n }\n }\n\n void FastGpuBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) {\n FillBuffer(borders, 0.0f, bordersCount + 1, stream);\n const ui32 blockSize = 1024;\n const ui32 valuesPerBlock = 2 * blockSize;\n const ui32 numBlocks = min(CeilDivide(size, valuesPerBlock), 15);\n FastGpuBordersImpl<<>>(values, size, borders, bordersCount);\n SortBordersImpl<<<1, 256, 0, stream>>>(borders + 1, bordersCount);\n }\n\n __global__ void QuantileBordersImpl(const float* sortedValues, ui32 size, float* borders, ui32 bordersCount) {\n const ui32 tid = threadIdx.x;\n __shared__ float localBorders[256];\n\n if (tid < bordersCount) {\n const ui32 offset = static_cast((tid + 1.0) * size \/ (bordersCount + 1));\n localBorders[tid] = LdgWithFallback(sortedValues, offset);\n }\n __syncthreads();\n\n if (tid <(bordersCount + 1)) {\n borders[tid] = tid == 0 ? bordersCount : localBorders[tid - 1];\n }\n }\n\n\n __global__ void UniformBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) {\n\n const ui32 tid = threadIdx.x;\n const int blockSize = 1024;\n\n __shared__ float localMin[blockSize];\n __shared__ float localMax[blockSize];\n\n float minValue = PositiveInfty();\n float maxValue = NegativeInfty();\n\n ui64 seed = (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF;\n\n #pragma unroll 32\n for (int i = 0; i < 32; ++i) {\n const int idx = static_cast(AdvanceSeed(&seed) % size);\n float val = StreamLoad(values + idx);\n minValue = val < minValue ? val : minValue;\n maxValue = val > maxValue ? val : maxValue;\n }\n\n localMin[tid] = minValue * 0.999;\n localMax[tid] = maxValue * 1.001;\n __syncthreads();\n\n for (ui32 s = blockSize >> 1; s > 0; s >>= 1) {\n if (tid < s) {\n localMin[tid] = min(localMin[tid], localMin[tid + s]);\n localMax[tid] = max(localMax[tid], localMax[tid + s]);\n }\n __syncthreads();\n }\n minValue = localMin[0];\n maxValue = localMax[0];\n\n if (tid < (bordersCount + 1)) {\n const float borderIdx = tid * 1.0f \/ bordersCount;\n \/\/emulate ui8 rounding in cpu\n const float val = (minValue + borderIdx * (maxValue - minValue)) * 0.9999;\n borders[tid] = tid == 0 ? bordersCount : val;\n }\n }\n\n void ComputeQuantileBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) {\n QuantileBordersImpl<<< 1, 256, 0, stream >>> (values, size, borders, bordersCount);\n }\n\n void ComputeUniformBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) {\n UniformBordersImpl<<< 1, 1024, 0, stream >>> (values, size, borders, bordersCount);\n }\n\n void BinarizeFloatFeature(const float* values, ui32 docCount,\n const float* borders,\n TCFeature feature,\n ui32* dst,\n const ui32* gatherIndex,\n bool atomicUpdate,\n TCudaStream stream) {\n\n const ui32 blockSize = 1024;\n const ui32 docsPerThread = 8;\n const ui32 numBlocks = (docCount + docsPerThread * blockSize - 1) \/ (docsPerThread * blockSize);\n\n if (atomicUpdate)\n {\n BinarizeFloatFeatureImpl << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount,\n borders, gatherIndex,\n dst);\n } else {\n BinarizeFloatFeatureImpl << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount,\n borders, gatherIndex,\n dst);\n }\n }\n\n\n}\n","avg_line_length":35.9621212121,"max_line_length":139,"alphanum_fraction":0.5603539077} {"size":20993,"ext":"cu","lang":"Cuda","max_stars_count":256.0,"content":"\/*\n * -----------------------------------------------------------------\n * Programmer(s): Cody J. Balos @ LLNL\n * -----------------------------------------------------------------\n * SUNDIALS Copyright Start\n * Copyright (c) 2002-2021, Lawrence Livermore National Security\n * and Southern Methodist University.\n * All rights reserved.\n *\n * See the top-level LICENSE and NOTICE files for details.\n *\n * SPDX-License-Identifier: BSD-3-Clause\n * SUNDIALS Copyright End\n * -----------------------------------------------------------------\n * SUNMATRIX_CUSPARSE unit tests.\n * -----------------------------------------------------------------\n *\/\n\n#include \n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"test_sunmatrix.h\"\n#include \"dreadrb.h\"\n\nenum { IDENTITY, RANDOM, RBFILE };\n\n\/* Implementation specific test of SUNMatrix_cuSparse_SetKernelExecPolicy *\/\nint Test_SetKernelExecPolicy(SUNMatrix A, int myid);\n\nclass ATestExecPolicy : public SUNCudaExecPolicy\n{\npublic:\n ATestExecPolicy() : stream_(0) {}\n\n virtual size_t gridSize(size_t numWorkElements = 0, size_t blockDim = 0) const\n {\n return 1;\n }\n\n virtual size_t blockSize(size_t numWorkElements = 0, size_t gridDim = 0) const\n {\n return 1;\n }\n\n virtual const cudaStream_t* stream() const\n {\n return &stream_;\n }\n\n virtual SUNCudaExecPolicy* clone() const\n {\n return static_cast(new ATestExecPolicy());\n }\n\nprivate:\n const cudaStream_t stream_;\n};\n\nstatic SUNContext sunctx;\n\n \/* ----------------------------------------------------------------------\n * Main SUNMatrix Testing Routine\n * --------------------------------------------------------------------*\/\nint main(int argc, char *argv[])\n{\n int fails=0; \/* counter for test failures *\/\n sunindextype M, N; \/* overall matrix dims *\/\n sunindextype blkrows, blkcols; \/* block matrix dims *\/\n int nblocks; \/* number of matrix blocks *\/\n int block_nnz_max; \/* max number of nnz in block *\/\n int mattype; \/* matrix storage type *\/\n N_Vector x, y, d_x, d_y; \/* test vectors *\/\n realtype* vecdata; \/* pointers to vector data *\/\n SUNMatrix A, B, C, D, dA, dB, dI; \/* test matrices *\/\n realtype* matdata; \/* pointer to matrix data *\/\n int print_timing, square;\n int matrix_to_use;\n sunindextype i, j;\n FILE* matrixfp;\n char* filename;\n cusparseStatus_t cusp_status;\n cusparseHandle_t cusp_handle;\n\n if (SUNContext_Create(NULL, &sunctx)) {\n printf(\"ERROR: SUNContext_Create failed\\n\");\n return(-1);\n }\n\n \/* initialize some input variables *\/\n blkrows = 0;\n blkcols = 0;\n nblocks = 0;\n square = 0;\n\n \/* check input *\/\n if (argc < 7) {\n printf(\"ERROR: SIX (6) inputs required: matrix (filename|random|identity), matrix rows, matrix cols, number of blocks, matrix type (CSR\/BCSR), print timing (0\/1)\\n\");\n return(-1);\n }\n\n \/* determine what test matrix to use *\/\n if (!strcmp(argv[1], \"random\")) {\n matrix_to_use = RANDOM;\n } else if (!strcmp(argv[1], \"identity\")) {\n matrix_to_use = IDENTITY;\n } else {\n matrix_to_use = RBFILE;\n filename = argv[1];\n }\n\n \/* if we are not reading from a file, verify that the dimension args are legal *\/\n if (matrix_to_use != RBFILE) {\n blkrows = (sunindextype) atol(argv[2]);\n if (blkrows <= 0) {\n printf(\"ERROR: number of rows must be a positive integer\\n\");\n return(-1);\n }\n\n blkcols = (sunindextype) atol(argv[3]);\n if (blkcols <= 0) {\n printf(\"ERROR: number of cols must be a positive integer\\n\");\n return(-1);\n }\n\n square = (blkrows == blkcols) ? 1 : 0;\n }\n\n nblocks = (sunindextype) atol(argv[4]);\n if (nblocks < 1) {\n printf(\"ERROR: number of blocks must be a positive integer\\n\");\n return(-1);\n }\n\n if (!strcmp(argv[5], \"CSR\")) {\n mattype = SUNMAT_CUSPARSE_CSR;\n if (nblocks != 1) {\n printf(\"ERROR: the CSR format only supports 1 block\\n\");\n return(-1);\n }\n } else if (!strcmp(argv[5], \"BCSR\")) {\n mattype = SUNMAT_CUSPARSE_BCSR;\n if (matrix_to_use == RBFILE) {\n printf(\"ERROR: cannot read BCSR format from a file\\n\");\n }\n if (!square) {\n printf(\"ERROR: the BCSR format only supports square block matrices\\n\");\n return(-1);\n }\n } else {\n printf(\"ERROR: matrix type must be CSR or BCSR\\n\");\n return(-1);\n }\n\n print_timing = atoi(argv[6]);\n SetTiming(print_timing);\n\n \/* Initialize cuSPARSE *\/\n cusp_status = cusparseCreate(&cusp_handle);\n if (cusp_status != CUSPARSE_STATUS_SUCCESS) {\n printf(\"ERROR: could not create cuSPARSE handle\\n\");\n return(-1);\n }\n\n \/* Initialize vectors and matrices to NULL *\/\n x = NULL;\n y = NULL;\n A = NULL;\n B = NULL;\n C = NULL;\n D = NULL;\n dA = NULL;\n dB = NULL;\n dI = NULL;\n\n if (matrix_to_use == RANDOM) {\n M = blkrows * nblocks;\n N = blkcols * nblocks;\n block_nnz_max = blkrows*blkcols \/ 2;\n\n \/* Create sparsity pattern for a block. *\/\n sunindextype *cols = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));\n sunindextype *rows = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));\n for (i=0; i>> FAILED test -- SetKernelExecPolicy \\n\", myid);\n TEST_STATUS(\" After SUNMatClone, B == NULL \\n \\n\", myid);\n return(1);\n }\n\n \/* copy data *\/\n if (SUNMatCopy(I, B)) {\n TEST_STATUS(\">>> FAILED test -- SetKernelExecPolicy \\n\", myid);\n TEST_STATUS(\" SUNMatCopy returned nonzero \\n \\n\", myid);\n SUNMatDestroy(B);\n return(1);\n }\n\n \/* set kernel exec policy *\/\n ATestExecPolicy exec_policy;\n SUNMatrix_cuSparse_SetKernelExecPolicy(B, &exec_policy);\n\n \/* try out an operation *\/\n if (SUNMatScaleAddI(RCONST(-1.0), B)) {\n TEST_STATUS(\">>> FAILED test -- SetKernelExecPolicy \\n\", myid);\n TEST_STATUS(\" SUNMatScaleAddI returned nonzero \\n \\n\", myid);\n SUNMatDestroy(B);\n return(1);\n }\n\n \/* check matrix *\/\n if (check_matrix_entry(B, ZERO, tol)) {\n TEST_STATUS(\">>> FAILED test -- SetKernelExecPolicy \\n\", myid);\n TEST_STATUS(\" check_matrix_entry returned nonzero \\n \\n\", myid);\n SUNMatDestroy(B);\n return(1);\n }\n\n TEST_STATUS(\" PASSED test -- SetKernelExecPolicy \\n\", myid);\n\n SUNMatDestroy(B);\n\n return 0;\n}\n\n \/* ----------------------------------------------------------------------\n * Check matrix\n * --------------------------------------------------------------------*\/\n int check_matrix(SUNMatrix dA, SUNMatrix dB, realtype tol)\n {\n int failure = 0;\n SUNMatrix A, B;\n realtype *Adata, *Bdata;\n sunindextype *Aindexptrs, *Bindexptrs;\n sunindextype *Aindexvals, *Bindexvals;\n sunindextype i, ANP, Annz, Bnnz;\n\n \/* copy matrix data to host for the checks *\/\n A = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dA), SUNMatrix_cuSparse_Columns(dA),\n SUNMatrix_cuSparse_NNZ(dA), CSR_MAT, sunctx);\n B = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dB), SUNMatrix_cuSparse_Columns(dB),\n SUNMatrix_cuSparse_NNZ(dB), CSR_MAT, sunctx);\n\n failure = SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A),\n SM_INDEXPTRS_S(A),\n SM_INDEXVALS_S(A));\n failure = SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B),\n SM_INDEXPTRS_S(B),\n SM_INDEXVALS_S(B));\n cudaDeviceSynchronize();\n\n \/* get matrix pointers *\/\n Adata = SUNSparseMatrix_Data(A);\n Aindexptrs = SUNSparseMatrix_IndexPointers(A);\n Aindexvals = SUNSparseMatrix_IndexValues(A);\n ANP = SUNSparseMatrix_NP(A);\n Annz = SUNSparseMatrix_NNZ(A);\n\n Bdata = SUNSparseMatrix_Data(B);\n Bindexptrs = SUNSparseMatrix_IndexPointers(B);\n Bindexvals = SUNSparseMatrix_IndexValues(B);\n Bnnz = SUNSparseMatrix_NNZ(B);\n\n \/* matrices must have same sparsetype, shape and actual data lengths *\/\n if (SUNMatGetID(dA) != SUNMatGetID(dB)) {\n printf(\">>> ERROR: check_matrix: Different storage types (%d vs %d)\\n\",\n SUNMatGetID(dA), SUNMatGetID(dB));\n SUNMatDestroy(dA); SUNMatDestroy(dB);\n return(1);\n }\n if (SUNMatrix_cuSparse_SparseType(A) != SUNMatrix_cuSparse_SparseType(B)) {\n printf(\">>> ERROR: check_matrix: Different storage types (%d vs %d)\\n\",\n SUNMatrix_cuSparse_SparseType(A), SUNMatrix_cuSparse_SparseType(B));\n SUNMatDestroy(A); SUNMatDestroy(B);\n return(1);\n }\n if (SUNMatrix_cuSparse_Rows(dA) != SUNMatrix_cuSparse_Rows(dB)) {\n printf(\">>> ERROR: check_matrix: Different numbers of rows (%ld vs %ld)\\n\",\n (long int) SUNMatrix_cuSparse_Rows(dA), (long int) SUNMatrix_cuSparse_Rows(dB));\n SUNMatDestroy(A); SUNMatDestroy(B);\n return(1);\n }\n if (SUNMatrix_cuSparse_Columns(dA) != SUNMatrix_cuSparse_Columns(dB)) {\n printf(\">>> ERROR: check_matrix: Different numbers of columns (%ld vs %ld)\\n\",\n (long int) SUNMatrix_cuSparse_Columns(dA),\n (long int) SUNMatrix_cuSparse_Columns(dB));\n SUNMatDestroy(A); SUNMatDestroy(B);\n return(1);\n }\n if (Annz != Bnnz) {\n printf(\">>> ERROR: check_matrix: Different numbers of nonzeros (%ld vs %ld)\\n\",\n (long int) Annz, (long int) Bnnz);\n SUNMatDestroy(A); SUNMatDestroy(B);\n return(1);\n }\n\n \/* compare sparsity patterns *\/\n for (i=0; i ZERO) {\n printf(\">>> ERROR: check_matrix: Different indexptrs \\n\");\n SUNMatDestroy(A); SUNMatDestroy(B);\n return(1);\n }\n for (i=0; i ZERO) {\n printf(\">>> ERROR: check_matrix: Different indexvals \\n\");\n SUNMatDestroy(A); SUNMatDestroy(B);\n return(1);\n }\n\n \/* compare matrix values *\/\n for(i=0; i ZERO) {\n printf(\">>> ERROR: check_matrix: Different entries \\n\");\n SUNMatDestroy(A); SUNMatDestroy(B);\n return(1);\n }\n\n SUNMatDestroy(A); SUNMatDestroy(B);\n\n return(0);\n }\n\n int check_matrix_entry(SUNMatrix dA, realtype val, realtype tol)\n {\n int failure = 0;\n realtype *Adata;\n sunindextype i;\n\n \/* copy matrix data to host for the checks *\/\n Adata = (realtype*) malloc(SUNMatrix_cuSparse_NNZ(dA)*sizeof(realtype));\n failure = SUNMatrix_cuSparse_CopyFromDevice(dA, Adata, NULL, NULL);\n cudaDeviceSynchronize();\n\n \/* compare data *\/\n for(i=0; i < SUNMatrix_cuSparse_NNZ(dA); i++) {\n failure += SUNRCompareTol(Adata[i], val, tol);\n }\n\n free(Adata);\n\n if (failure > ZERO)\n return(1);\n else\n return(0);\n }\n\n int check_vector(N_Vector expected, N_Vector computed, realtype tol)\n {\n int failure = 0;\n realtype *xdata, *ydata;\n sunindextype xldata, yldata;\n sunindextype i;\n\n \/* get vector data *\/\n xdata = N_VGetHostArrayPointer_Cuda(expected);\n ydata = N_VGetHostArrayPointer_Cuda(computed);\n\n \/* copy data to host *\/\n N_VCopyFromDevice_Cuda(expected);\n N_VCopyFromDevice_Cuda(computed);\n cudaDeviceSynchronize();\n\n \/* check data lengths *\/\n xldata = N_VGetLength_Cuda(expected);\n yldata = N_VGetLength_Cuda(computed);\n\n if (xldata != yldata) {\n printf(\">>> ERROR: check_vector: Different data array lengths \\n\");\n return(1);\n }\n\n \/* check vector data *\/\n for(i=0; i < xldata; i++){\n failure += SUNRCompareTol(xdata[i], ydata[i], tol);\n }\n\n if (failure > ZERO)\n return(1);\n else\n return(0);\n }\n\n booleantype has_data(SUNMatrix A)\n {\n realtype *Adata = SUNMatrix_cuSparse_Data(A);\n if (Adata == NULL)\n return SUNFALSE;\n else\n return SUNTRUE;\n }\n\n booleantype is_square(SUNMatrix A)\n {\n if (SUNMatrix_cuSparse_Rows(A) == SUNMatrix_cuSparse_Columns(A))\n return SUNTRUE;\n else\n return SUNFALSE;\n }\n\nvoid sync_device(SUNMatrix A)\n{\n cudaDeviceSynchronize();\n}\n","avg_line_length":30.0759312321,"max_line_length":170,"alphanum_fraction":0.6060591626} {"size":8917,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*\n * Copyright (c) 2019, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \"copy_if.cuh\"\n#include \n#include \"table\/device_table.cuh\"\n#include \n#include \n#include \n#include \n \nnamespace {\n\n\/\/ Returns true if the mask is true and valid (non-null) for index i\n\/\/ This is the filter functor for apply_boolean_mask\n\/\/ Note we use a functor here so we can cast to a bitmask_t __restrict__\n\/\/ pointer on the host side, which we can't do with a lambda.\ntemplate \nstruct boolean_mask_filter\n{\n boolean_mask_filter(gdf_column const & boolean_mask) :\n size{boolean_mask.size},\n data{reinterpret_cast(boolean_mask.data)},\n bitmask{reinterpret_cast(boolean_mask.valid)}\n {}\n\n __device__ inline \n bool operator()(gdf_index_type i)\n {\n if (i < size) {\n bool valid = !has_nulls || bit_mask::is_valid(bitmask, i);\n bool is_true = !has_data || (cudf::true_v == data[i]);\n return is_true && valid;\n }\n return false;\n }\n\n gdf_size_type size;\n cudf::bool8 const * __restrict__ data;\n bit_mask_t const * __restrict__ bitmask;\n};\n\n\/\/ Returns true if the valid mask is true for index i\n\/\/ Note we use a functor here so we can cast to a bitmask_t __restrict__\n\/\/ pointer on the host side, which we can't do with a lambda.\nstruct valid_filter\n{\n valid_filter(gdf_column const & column) :\n size{column.size},\n bitmask{reinterpret_cast(column.valid)}\n { CUDF_EXPECTS(nullptr != column.valid, \"Null valid bitmask\");}\n\n __device__ inline \n bool operator()(gdf_index_type i)\n {\n if (i < size) {\n bool valid = bit_mask::is_valid(bitmask, i);\n return valid;\n }\n return false;\n }\n\n gdf_size_type size;\n bit_mask_t const * __restrict__ bitmask;\n};\n\n} \/\/ namespace\n\nnamespace cudf {\n\n\/*\n * Filters a column using a column of boolean values as a mask.\n *\n * calls apply_filter() with the `boolean_mask_filter` functor.\n *\/\ngdf_column apply_boolean_mask(gdf_column const &input,\n gdf_column const &boolean_mask) {\n if (boolean_mask.size == 0 || input.size == 0)\n return cudf::empty_like(input);\n\n \/\/ for non-zero-length masks we expect one of the pointers to be non-null \n CUDF_EXPECTS(boolean_mask.data != nullptr ||\n boolean_mask.valid != nullptr, \"Null boolean_mask\");\n CUDF_EXPECTS(boolean_mask.dtype == GDF_BOOL8, \"Mask must be Boolean type\");\n \n \/\/ zero-size inputs are OK, but otherwise input size must match mask size\n CUDF_EXPECTS(input.size == 0 || input.size == boolean_mask.size, \n \"Column size mismatch\");\n\n if (boolean_mask.data == nullptr)\n return detail::copy_if(input, boolean_mask_filter{boolean_mask});\n else if (boolean_mask.valid == nullptr || boolean_mask.null_count == 0)\n return detail::copy_if(input, boolean_mask_filter{boolean_mask});\n else\n return detail::copy_if(input, boolean_mask_filter{boolean_mask});\n}\n\n\/*\n * Filters a column to remove null elements.\n *\/\ngdf_column drop_nulls(gdf_column const &input) {\n if (input.valid != nullptr && input.null_count != 0)\n return detail::copy_if(input, valid_filter{input});\n else { \/\/ no null bitmask, so just copy\n return cudf::copy(input);\n }\n}\n\n\nnamespace detail {\n\n\/*\n * unique_copy copies elements from the range [first, last) to a range beginning\n * with output, except that in a consecutive group of duplicate elements only\n * depending on last argument keep, only the first one is copied, or the last\n * one is copied or neither is copied. The return value is the end of the range\n * to which the elements are copied.\n *\/\ntemplate::type>\n OutputIterator unique_copy(thrust::execution_policy &exec,\n InputIterator first,\n InputIterator last,\n OutputIterator output,\n BinaryPredicate comp,\n const duplicate_keep_option keep)\n{\n IndexType n = (last-first)-1;\n if (keep == duplicate_keep_option::KEEP_FIRST) {\n return thrust::copy_if(exec,\n first,\n last,\n thrust::counting_iterator(0),\n output,\n [first, comp, n] __device__ (const IndexType i) {\n return (i == 0 || !comp(first[i], first[i-1]));\n });\n } else if (keep == duplicate_keep_option::KEEP_LAST) {\n return thrust::copy_if(exec,\n first,\n last,\n thrust::counting_iterator(0),\n output,\n [first, comp, n] __device__ (const IndexType i) {\n return (i == n || !comp(first[i], first[i+1]));\n });\n } else {\n return thrust::copy_if(exec,\n first,\n last,\n thrust::counting_iterator(0),\n output,\n [first, comp, n] __device__ (const IndexType i) {\n return (i == 0 || !comp(first[i], first[i-1])) \n && (i == n || !comp(first[i], first[i+1]));\n });\n }\n}\n\nauto \nget_unique_ordered_indices(const cudf::table& key_columns,\n const duplicate_keep_option keep,\n const bool nulls_are_equal = true,\n cudaStream_t stream=0)\n{\n gdf_size_type ncols = key_columns.num_columns();\n gdf_size_type nrows = key_columns.num_rows();\n\n \/\/ sort only indices\n rmm::device_vector sorted_indices(nrows);\n gdf_context context;\n gdf_column sorted_indices_col;\n CUDF_TRY(gdf_column_view(&sorted_indices_col, (void*)(sorted_indices.data().get()),\n nullptr, nrows, GDF_INT32));\n CUDF_TRY(gdf_order_by(key_columns.begin(),\n nullptr,\n key_columns.num_columns(),\n &sorted_indices_col,\n &context));\n\n \/\/ extract unique indices \n rmm::device_vector unique_indices(nrows);\n auto exec = rmm::exec_policy(stream)->on(stream);\n auto device_input_table = device_table::create(key_columns, stream);\n rmm::device_vector::iterator result_end;\n\n bool nullable = device_input_table->has_nulls();\n if(nullable) {\n auto comp = row_equality_comparator(*device_input_table,\n nulls_are_equal);\n result_end = unique_copy(exec,\n sorted_indices.begin(),\n sorted_indices.end(),\n unique_indices.begin(),\n comp,\n keep);\n } else {\n auto comp = row_equality_comparator(*device_input_table,\n nulls_are_equal);\n result_end = unique_copy(exec,\n sorted_indices.begin(),\n sorted_indices.end(),\n unique_indices.begin(),\n comp,\n keep);\n }\n \/\/not resizing vector to avoid copy\n\n return std::make_pair(unique_indices, \n thrust::distance(unique_indices.begin(), result_end));\n}\n} \/\/namespace detail\n\ncudf::table drop_duplicates(const cudf::table& input_table,\n const cudf::table& key_columns,\n const duplicate_keep_option keep,\n const bool nulls_are_equal)\n{\n CUDF_EXPECTS( input_table.num_rows() == key_columns.num_rows(), \"number of \\\nrows in input table should be equal to number of rows in key colums table\");\n\n if (0 == input_table.num_rows() || \n 0 == input_table.num_columns() ||\n 0 == key_columns.num_columns() \n ) {\n return cudf::empty_like(input_table);\n }\n rmm::device_vector unique_indices;\n gdf_size_type unique_count; \n std::tie(unique_indices, unique_count) =\n detail::get_unique_ordered_indices(key_columns, keep, nulls_are_equal);\n \/\/ Allocate output columns\n cudf::table destination_table(unique_count, cudf::column_dtypes(input_table), true);\n \/\/ run gather operation to establish new order\n cudf::gather(&input_table, unique_indices.data().get(), &destination_table);\n nvcategory_gather_table(input_table, destination_table);\n return destination_table;\n}\n} \/\/ namespace cudf\n","avg_line_length":34.6964980545,"max_line_length":86,"alphanum_fraction":0.65896602} {"size":18548,"ext":"cuh","lang":"Cuda","max_stars_count":39.0,"content":"#pragma once\n#ifndef SRC_KERNELS_DECOMPRESSION_RUN_POSITION_CUH\n#define SRC_KERNELS_DECOMPRESSION_RUN_POSITION_CUH\n\n#include \"kernels\/common.cuh\"\n#include \"cuda\/on_device\/primitives\/warp.cuh\"\n#include \"cuda\/on_device\/primitives\/block.cuh\"\n#include \"cuda\/on_device\/math.cuh\"\n#include \"kernels\/reduction\/scan.cuh\"\n#include \"cuda\/on_device\/generic_shuffle.cuh\"\n#include \"cuda\/functors.hpp\"\n\nnamespace cuda {\nnamespace kernels {\nnamespace decompression {\nnamespace run_position_encoding {\n\nusing namespace grid_info::linear;\n\nnamespace detail {\n\n\/\/ Everything a block needs to know in order\n\/\/ to perform decompression, which does not\n\/\/ involve any significant computation and can\n\/\/ be allowed to be computed redundantly by all threads\ntemplate\nstruct segment_decompression_params_t {\n\tuint_t first_run_index; \/\/ of the runs intersecting the segment\n\tsize_type_by_index_size num_runs;\n\tuint_t uncompressed_start_position;\n\tsize_type_by_index_size decompressed_length;\n};\n\nusing namespace grid_info::linear;\n\nnamespace block {\n\ntemplate<\n\tunsigned UncompressedIndexSize,\n\tunsigned UncompressedSize,\n\tunsigned PositionOffsetSize,\n\tbool PositionsAreRelative,\n\tunsigned RunIndexSize\n>\n__forceinline__ __device__ void decompress_a_segment_with_few_runs(\n\tuint_t* __restrict__ decompressed,\n\t\/\/ this may be a pointer to the absolute beginning of the input data,\n\t\/\/ or just the beginning of the segment - depending on whether PositionsAreRelative\n\t\/\/ or not.\n\tconst uint_t* __restrict__ run_start_positions, \/\/ for this segment only\n\tconst uint_t* __restrict__ run_data, \/\/ for this segment only\n\tconst segment_decompression_params_t&\n\t params)\n{\n\tusing position_offset_size_type = size_type_by_index_size;\n\tusing run_index_type = uint_t;\n\tusing run_index_size_type = size_type_by_index_size;\n\tusing uncompressed_index_size_type = size_type_by_index_size;\n\tusing uncompressed_type = uint_t;\n\t\t\/\/ Of course the type might actually be different, we only think of it as\n\t\t\/\/ continguous storage.\n\t\t\/\/\n\t\t\/\/ TODO: perhaps we should use an array instead?\n\n\tenum {\n\t\telements_per_thread_write = UncompressedSize >= sizeof(native_word_t) ?\n\t\t\t1 : sizeof(native_word_t) \/ UncompressedSize\n\t};\n\n\t\/\/ TODO: Should we special-case segments which only have a single run,\n\t\/\/ or a couple of runs?\n\n\t\/\/ Note: We have two loops here, the outer one (the block-stride loop)\n\t\/\/ and the inner one (run-iteration loop). Now, because we synch the\n\t\/\/ warp lanes' knowledge of runs they've seen with each iteration of\n\t\/\/ the outer loop, we have that the _maximum_ number of iterations\n\t\/\/ of a lane on the inner loop is the increase in of\n\t\/\/ first_possible_run_for_next_write for the next outer-loop iteration.\n\t\/\/ In the next iteration of the outer loop, the number of possible\n\t\/\/ iterations of the inner loop decreases by that number.\n\t\/\/\n\t\/\/ In other words, the total number of iterations of the inner loop\n\t\/\/ (when we take the maximum over the warp due to predicated execution),\n\t\/\/ over all iterators of the outer loop, is\n\t\/\/\n\t\/\/ I = R + L \/ W = L ( R\/L + 1\/W ) = L ( 1\/A + 1\/W )\n\t\/\/\n\t\/\/ where\n\t\/\/ R = num_runs\n\t\/\/ L = params.decompressed_length\n\t\/\/ A = average run length\n\t\/\/\n\t\/\/ and the number of iterations per element written is\n\t\/\/\n\t\/\/ I \/ (L\/W) = L\/(L\/W) ( 1\/A + 1\/W ) = W ( 1\/A + 1\/W ) = 1 + W\/A\n\t\/\/\n\t\/\/ This is pretty good, even if runs involve memory reads; after all,\n\t\/\/ other warps have likely already read that, so they'll be mostly\n\t\/\/ reads from L1 cache. The L1 cache will be no less than 16K, so\n\t\/\/ with any bit of fairness in warp scheduling we should not need\n\t\/\/ repeated reads from main memory.\n\n\trun_index_size_type first_possible_run_for_next_write = 0;\n\t\t\/\/ this will increase as we advance with our writes beyond further\n\t\t\/\/ and further runs\n\n\tposition_offset_size_type write_position;\n\t\t\/\/ Keeping this out of the lambda below since we'll use it later\n\t\t\/\/ to decide which threads\n\n\tauto locate_and_perform_next_write = [&](position_offset_size_type relative_write_position) {\n\n\t\tusing write_type = typename std::conditional<\n\t\t\t(UncompressedSize >= sizeof(native_word_t)),\n\t\t\tuncompressed_type, native_word_t>::type;\n\n\t\twrite_position = relative_write_position +\n\t\t\t(PositionsAreRelative ? 0 : params.uncompressed_start_position);\n\t\t\t\/\/ note that this value is different for each lane; and that\n\t\t\t\/\/ they are spaced 4 bytes - not 4 positions - from each other\n\n\t\trun_index_size_type run_index = first_possible_run_for_next_write + 1;\n\t\t\t\/\/ TODO: Perhaps hoist this out of the function?\n\n\t\tarray thread_write_buffer;\n\t\t\t\/\/ Notes:\n\t\t\t\/\/ * The total size of this variable is a multiple of sizeof(unsigned)\n\t\t\t\/\/ * This should be optimized into registers\n\n\t\trun_index_type run_covering_current_element;\n\n\t\t#pragma unroll\n\t\tfor(unsigned element_within_thread_write = 0;\n\t\t element_within_thread_write < elements_per_thread_write;\n\t\t element_within_thread_write++)\n\t\t{\n\t\t\t#pragma unroll\n\t\t\tfor(; run_index < params.num_runs; run_index++)\n\t\t\t{\n\t\t\t\tif (run_start_positions[run_index] >\n\t\t\t\t (write_position + element_within_thread_write)) { break; }\n\t\t\t}\n\t\t\trun_covering_current_element = run_index - 1;\n\t\t\t\t\/\/ so it may be num_runs_used - 1 - in which case we haven't\n\t\t\t\t\/\/ checked run_start_positions for the next run's start pos\n\t\t\tthread_write_buffer[element_within_thread_write] = run_data[run_covering_current_element];\n\t\t}\n\n\t\t*(reinterpret_cast(decompressed + write_position)) =\n\t\t\treinterpret_cast(thread_write_buffer);\n\n\t\t{\n\t\t\t\/\/ At this point we're done with the current write; but - that\n\t\t\t\/\/ different lanes may have been considering different runs (if the lane\n\t\t\t\/\/ is not wholly covered by a single run) - with the last lane having\n\t\t\t\/\/ examined the farthest run. Since in subsequent runs we will only be\n\t\t\t\/\/ writing to farther elements in the output, it's useless for _any_\n\t\t\t\/\/ lanes to consider any run before this\n\t\t\t\/\/ last-run-considered-by-the-last-lane. So let's get it:\n\n\t\t\tauto next_relative_write_position_for_this_thread =\n\t\t\t\trelative_write_position + grid_info::linear::block::length() * elements_per_thread_write;\n\t\t\tif (next_relative_write_position_for_this_thread < params.decompressed_length) {\n\t\t\t\tfirst_possible_run_for_next_write =\n\t\t\t\t\tprimitives::warp::get_from_last_lane(run_covering_current_element);\n\t\t\t}\n\t\t\t\/\/ No 'else' here, since in the 'else' case that was the last write\n\t\t\t\/\/ for this thread in this segment\n\t\t}\n\t};\n\n\t\/\/ I'd thought of making this a separate function, with or without the lambda we call here,\n\t\/\/ but we use the 'innards' of the lambda below for the slack so it won't be much 'cleaner'\n\n\tauto truncated_decompressed_length =\n\t\tclear_lower_k_bits(params.decompressed_length, log2_constexpr(elements_per_thread_write));\n\t\t\/\/ in fact, this will round down to a multiple of 4 = sizeof(int)\n\tposition_offset_size_type relative_write_position = thread::index_in_block() * elements_per_thread_write;\n\t#pragma unroll\n\tfor(;\n\t relative_write_position < truncated_decompressed_length;\n\t\trelative_write_position += ::grid_info::linear::block::length() * elements_per_thread_write)\n\t{\n\t\tlocate_and_perform_next_write(relative_write_position);\n\t}\n\n\t\/\/\n\t\/\/\n\t\/\/ From here on - just handling the slack\n\t\/\/\n\t\/\/\n\t\/\/ We need to write to the last few elements, which can't be grouped\n\t\/\/ into a single 4-byte write. We'll do that with some threads\n\t\/\/ performing single-element writes\n\n\tif (elements_per_thread_write > 1) {\n\n\t\tauto num_actually_decompressed = truncated_decompressed_length;\n\t\tauto num_slack_elements = params.decompressed_length - num_actually_decompressed;\n\t\tif (num_slack_elements == 0) { return; }\n\n\t\tif (truncated_decompressed_length == 0) {\n\t\t\tif (thread::index_in_block() >= num_slack_elements) { return; }\n\t\t\tfirst_possible_run_for_next_write = 0;\n\t\t}\n\t\telse {\n\t\t\tauto this_thread_wrote_last_element_so_far =\n\t\t\t\t(relative_write_position == (truncated_decompressed_length - elements_per_thread_write) +\n\t\t\t\t\telements_per_thread_write * grid_info::linear::block::length());\n\t\t\tif (primitives::warp::all_satisfy(not this_thread_wrote_last_element_so_far)) { return; }\n\t\t\tauto lane_which_wrote_the_last_element_so_far =\n\t\t\t\tprimitives::warp::first_lane_satisfying(this_thread_wrote_last_element_so_far);\n\t\t\tauto lane_which_saw_the_farthest_run_so_far = lane_which_wrote_the_last_element_so_far;\n\t\t\tprimitives::warp::update_from_lane(first_possible_run_for_next_write, lane_which_saw_the_farthest_run_so_far);\n\t\t\tif (lane::index() >= num_slack_elements) { return; }\n\t\t}\n\n\t\tauto slack_write_position = num_actually_decompressed + lane::index() +\n\t\t\t(PositionsAreRelative ? 0 : params.uncompressed_start_position);\n\n\t\tauto run_index = first_possible_run_for_next_write + 1;\n\n\t\twhile (run_index < params.num_runs\n\t\t and run_start_positions[run_index] <= slack_write_position)\n\t\t{\n\t\t\trun_index++;\n\t\t}\n\t\tauto run_covering_current_element = run_index - 1;\n\t\tdecompressed[slack_write_position] = run_data[run_covering_current_element];\n\t}\n}\n\ntemplate\n__forceinline__ __device__ void decompress_segment(\n\tuint_t* __restrict__ decompressed,\n\t\/\/ this may be a pointer to the absolute beginning of the input data,\n\t\/\/ or just the beginning of the segment - depending on whether PositionsAreRelative\n\t\/\/ or not.\n\tconst uint_t* __restrict__ run_start_positions, \/\/ for this segment only\n\tconst uint_t* __restrict__ run_data, \/\/ for this segment only\n\tconst segment_decompression_params_t&\n\t params)\n{\n\t\/\/ TODO: Act differently for blocks with high, low and super-low average\n\t\/\/ run length. (Doing that may require additional segment decompression\n\t\/\/ parameters to be computed.)\n\n\tdecompress_a_segment_with_few_runs(\n\t\tdecompressed, run_start_positions, run_data, params);\n}\n\n\n\/\/ TODO: perhaps convert this into a named constructor idiom for\n\/\/ segment_decompression_params_t ?\ntemplate\n__forceinline__ __device__ segment_decompression_params_t\nresolve_segment_decompression_params(\n\tconst uint_t* __restrict__ run_start_positions,\n\tconst uint_t* __restrict__ position_anchors,\n\tsize_type_by_index_size anchoring_period,\n\tsize_type_by_index_size num_anchors,\n\tsize_type_by_index_size num_element_runs,\n\tsize_type_by_index_size total_uncompressed_length)\n{\n\tsegment_decompression_params_t params;\n\t\/\/ Each block is responsible for a certain number of output elements, not of\n\t\/\/ input elements (the latter option runs too much of a risk of block\n\t\/\/ workload imbalance). The block's output elements range from one\n\t\/\/ position anchor to the next (or to the end if it's the last one)\n\n\tauto anchor_index = grid_info::linear::block::index();\n\tauto next_anchor_index = grid_info::linear::block::index() + 1;\n\tparams.uncompressed_start_position = anchor_index * anchoring_period;\n\n\tauto is_last_segment = (next_anchor_index == num_anchors);\n\tparams.decompressed_length = is_last_segment ?\n\t\ttotal_uncompressed_length - params.uncompressed_start_position :\n\t\tanchoring_period;\n\n\tparams.first_run_index = position_anchors[anchor_index];\n\tdecltype(num_element_runs) one_past_last_run_index;\n\tif (is_last_segment) { one_past_last_run_index = num_element_runs; }\n\telse {\n\t\t\/\/ Here is the one point where position encoding is less friendly than\n\t\t\/\/ length encoding: The anchors are not necessarily aligned to the\n\t\t\/\/ beginning of the run; but in RLE, the anchors must include an offset\n\t\t\/\/ value into the run, while in RPE that's not necessary. ... except\n\t\t\/\/ that we do need a bit of work to replace that information. This\n\t\t\/\/ is where we do that work, in order figure out whether the run at\n\t\t\/\/ the next anchor still has some elements in it belonging to this\n\t\t\/\/ segment. It will cost us an extra read :-(\n\t\tauto run_index_at_next_anchor = position_anchors[next_anchor_index];\n\t\tauto next_uncompressed_segment_start_position =\n\t\t\tparams.uncompressed_start_position + anchoring_period;\n\n\t\tone_past_last_run_index = run_index_at_next_anchor +\n\t\t\t(run_start_positions[run_index_at_next_anchor] < next_uncompressed_segment_start_position);\n\t\t\/\/ TODO: Can we avoid this extra read, pretend we do have extras,\n\t\t\/\/ and just notice there's zero of them later on?\n\t\t\/\/ TODO: Would it be worth it, to, say, use one bit of the anchor\n\t\t\/\/ to store a flag indicating whether or not we its aligned\n\t\t\/\/ with the start of a run? It would make our lives easier here...\n\t\t\/\/ although, on the other hand, it would mess up separation of columns.\n\t\t\/\/ Hmm.\n\t}\n\tparams.num_runs = one_past_last_run_index - params.first_run_index;\n\treturn params;\n}\n\n} \/\/ namespace block\n\n} \/\/ namespace detail\n\n\/**\n * Decompress a Run-Position-Encoded (RPE) compressed column,\n * which is made up of consecutive runs of identically-valued\n * elements (typically longer runs), using periodic anchoring\n * information.\n *\n * @note (max anchoring period)\n *\n * @note we assume all input arrays are well-aligned.\n *\n * @todo more thought for uncompressed sizes other than 4\n *\n *\/\ntemplate\n__global__ void decompress(\n\tuint_t* __restrict__ decompressed,\n\tconst uint_t* __restrict__ run_data,\n\tconst uint_t* __restrict__ run_start_positions,\n\tconst uint_t* __restrict__ position_anchors,\n\tsize_type_by_index_size anchoring_period,\n\tsize_type_by_index_size num_anchors,\n\tsize_type_by_index_size num_element_runs, \/\/ = length of the run_* arrays\n\tsize_type_by_index_size uncompressed_length)\n{\n\tstatic_assert(is_power_of_2(UncompressedIndexSize) and UncompressedIndexSize <= 8, \"unsupported PositionOffsetSize\");\n\tstatic_assert(is_power_of_2(PositionOffsetSize) and UncompressedIndexSize <= 8, \"unsupported PositionOffsetSize\");\n\tstatic_assert(PositionsAreRelative or PositionOffsetSize >= UncompressedIndexSize,\n\t\t\"If run positions are in absolute values, their type must be able to cover the entire \"\n\t\t\"potential range of data (i.e. their type must be at least as large as the size type\");\n\n\t\/\/ TODO: For large anchoring periods, consider breaking up segments into consecutive pieces,\n\t\/\/ with several warps handling each of them.\n\n\tauto block_params = detail::block::resolve_segment_decompression_params\n\t\t(\n\t\trun_start_positions, position_anchors, anchoring_period,\n\t\tnum_anchors, num_element_runs, uncompressed_length);\n\n\tdetail::block::decompress_segment\n\t\t(\n\t\tPositionsAreRelative ? decompressed + block_params.uncompressed_start_position : decompressed,\n\t\t\t\/\/ TODO: Would it be better to use at_start_of_first_run instead of start_ ?\n\t\trun_start_positions + block_params.first_run_index,\n\t\trun_data + block_params.first_run_index,\n\t\tblock_params); \/\/ Note we don't pass the anchor arrays\n}\n\n\ntemplate\nclass launch_config_resolution_params_t final : public kernels::launch_config_resolution_params_t {\npublic:\n\tusing parent = kernels::launch_config_resolution_params_t;\npublic:\n\tlaunch_config_resolution_params_t(\n\t\tdevice::properties_t device_properties_,\n\t\tsize_t uncompressed_length,\n\t\tsize_t position_anchoring_period,\n\t\toptional dynamic_shared_mem_limit = nullopt) :\n\t\tparent(\n\t\t\tdevice_properties_,\n\t\t\tdevice_function_t(decompress<\n\t\t\t\tUncompressedIndexSize, UncompressedSize, PositionOffsetSize, PositionsAreRelative, RunIndexSize>),\n\t\t\tdynamic_shared_mem_limit\n\t\t)\n\t{\n\t\tenum { elements_per_thread_write =\n\t\t\tUncompressedSize >= sizeof(unsigned) ? 1 : sizeof(unsigned) \/ UncompressedSize };\n\n\t\tauto num_anchored_segments =\n\t\t\tutil::div_rounding_up(uncompressed_length, position_anchoring_period);\n\t\tauto num_thread_writes_per_segment = div_rounding_up(position_anchoring_period, elements_per_thread_write);\n\n\t\tgrid_construction_resolution = block;\n\t\tlength = num_anchored_segments;\n\t\tserialization_option = none;\n\t\tquanta.threads_in_block = warp_size;\n\t\tupper_limits.warps_in_block = 2;\n\t\t\t\/\/ This should probably be architecture-specific, and perhaps\n\t\t\t\/\/ even depend on the data distribution if that's known.\n\t\t\t\/\/\n\t\t\t\/\/ The point of setting this limit is make the block-length 'jump' between\n\t\t\t\/\/ consecutive writes by the same warp shorter, so that it has less work\n\t\t\t\/\/ looking for the next positions to write to. With an average run length\n\t\t\t\/\/ of 2, the number of iterations over warp_size-long sequences of runs\n\t\t\t\/\/ will on the average be less than 2 (i.e. one advance and two checks).\n\t};\n};\n\n} \/\/ namespace run_position_encoding\n} \/\/ namespace decompression\n} \/\/ namespace kernels\n} \/\/ namespace cuda\n\n#endif \/* SRC_KERNELS_DECOMPRESSION_RUN_POSITION_CUH *\/\n","avg_line_length":45.4607843137,"max_line_length":170,"alphanum_fraction":0.7650420531} {"size":7363,"ext":"cu","lang":"Cuda","max_stars_count":6.0,"content":"\/\/ Copyright 2019 Yan Yan\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace spconv {\nnamespace functor {\ntemplate \nstruct CreateConvIndicePairFunctorP1 {\n Index operator()(const tv::GPU &d, tv::TensorView indicesIn,\n tv::TensorView indicesOut,\n tv::TensorView gridsOut,\n tv::TensorView indicePairs,\n tv::TensorView indiceNum,\n tv::TensorView indicePairUnique,\n const tv::SimpleVector kernelSize,\n const tv::SimpleVector stride,\n const tv::SimpleVector padding,\n const tv::SimpleVector dilation,\n const tv::SimpleVector outSpatialShape,\n bool transpose) {\n Index batchSize = gridsOut.dim(0);\n auto numActIn = indicesIn.dim(0);\n if (numActIn == 0) return 0;\n \/\/ auto timer = spconv::CudaContextTimer<>();\n if (transpose)\n prepareDeConvIndicePairsKernel\n <<>>(indicesIn, indicesOut, gridsOut, indicePairs,\n indiceNum, indicePairUnique, kernelSize, stride,\n padding, dilation, outSpatialShape);\n else\n prepareIndicePairsKernel\n <<>>(indicesIn, indicesOut, gridsOut, indicePairs,\n indiceNum, indicePairUnique, kernelSize, stride,\n padding, dilation, outSpatialShape);\n TV_CHECK_CUDA_ERR();\n \/\/ std::cout << \"p1 gene time \" << timer.report() \/ 1000.0 << std::endl;\n return 1;\n }\n};\n\ntemplate \nstruct CreateConvIndicePairFunctorP2 {\n Index operator()(const tv::GPU &d, tv::TensorView indicesIn,\n tv::TensorView indicesOut,\n tv::TensorView gridsOut,\n tv::TensorView indicePairs,\n tv::TensorView indiceNum,\n tv::TensorView indicePairUnique,\n const tv::SimpleVector outSpatialShape,\n bool transpose, bool resetGrid) {\n Index batchSize = gridsOut.dim(0);\n auto kernelVolume = indicePairs.dim(0);\n auto numActIn = indicesIn.dim(0);\n if (numActIn == 0) return 0;\n Index numAct = indicePairUnique.dim(0) - 1;\n assignGridAndIndiceOutKernel\n <<>>(indicesOut, gridsOut, numAct, indicePairs,\n indicePairUnique, outSpatialShape, batchSize);\n TV_CHECK_CUDA_ERR();\n assignIndicePairsKernel\n <<>>(indicesOut, gridsOut, numActIn, indicePairs,\n indicePairUnique, outSpatialShape);\n TV_CHECK_CUDA_ERR();\n if (resetGrid) {\n resetGridKernel\n <<>>(indicePairUnique.data(), gridsOut, numAct);\n TV_CHECK_CUDA_ERR();\n }\n return numAct;\n }\n};\n\ntemplate \nstruct CreateSubMIndicePairFunctor {\n Index operator()(const tv::GPU &d, tv::TensorView indicesIn,\n tv::TensorView gridsOut,\n tv::TensorView indicePairs,\n tv::TensorView indiceNum,\n const tv::SimpleVector kernelSize,\n const tv::SimpleVector stride,\n const tv::SimpleVector padding,\n const tv::SimpleVector dilation,\n const tv::SimpleVector outSpatialShape,\n bool transpose, bool resetGrid) {\n auto numActIn = indicesIn.dim(0);\n if (numActIn == 0) return 0;\n \/\/ auto timer = spconv::CudaContextTimer<>();\n prepareSubMGridKernel\n <<>>(indicesIn, gridsOut, outSpatialShape);\n TV_CHECK_CUDA_ERR();\n getSubMIndicePairsKernel\n <<>>(indicesIn, gridsOut, indicePairs, indiceNum,\n kernelSize, stride, padding, dilation,\n outSpatialShape);\n TV_CHECK_CUDA_ERR();\n \/\/ std::cout << \"subm gene time \" << timer.report() \/ 1000.0 << std::endl;\n if (resetGrid) {\n resetGridSubMKernel\n <<>>(indicesIn.data(), gridsOut, outSpatialShape,\n numActIn);\n TV_CHECK_CUDA_ERR();\n }\n return numActIn;\n }\n};\n} \/\/ namespace functor\n\n#define DECLARE_GPU_SPECS_INDEX_NDIM(Index, NDIM) \\\n template struct functor::CreateConvIndicePairFunctor; \\\n template struct functor::CreateConvIndicePairFunctorP1; \\\n template struct functor::CreateConvIndicePairFunctorP2; \\\n template struct functor::CreateSubMIndicePairFunctor;\n\n#define DECLARE_GPU_INDEX(Index) \\\n DECLARE_GPU_SPECS_INDEX_NDIM(Index, 1); \\\n DECLARE_GPU_SPECS_INDEX_NDIM(Index, 2); \\\n DECLARE_GPU_SPECS_INDEX_NDIM(Index, 3); \\\n DECLARE_GPU_SPECS_INDEX_NDIM(Index, 4);\n\nDECLARE_GPU_INDEX(int);\n\n#undef DECLARE_GPU_INDEX\n#undef DECLARE_GPU_SPECS_INDEX_NDIM\n} \/\/ namespace spconv\n","avg_line_length":46.3081761006,"max_line_length":79,"alphanum_fraction":0.6144234687} {"size":5275,"ext":"cu","lang":"Cuda","max_stars_count":21.0,"content":"\/*\n -- MAGMA (version 1.6.2) --\n Univ. of Tennessee, Knoxville\n Univ. of California, Berkeley\n Univ. of Colorado, Denver\n @date May 2015\n\n @precisions normal z -> c d s\n\n*\/\n\n#include \"common_magma.h\"\n\n\/\/ 512 is maximum number of threads for CUDA capability 1.x\n#define BLOCK_SIZE 512\n#define BLOCK_SIZEx 32\n#define BLOCK_SIZEy 16\n\n#define PRECISION_z\n\n\n\/\/ copied from dznrm2.cu in trunk\/magmablas\n\/\/ ----------------------------------------\n\/\/ Does sum reduction of array x, leaving total in x[0].\n\/\/ Contents of x are destroyed in the process.\n\/\/ With k threads, can reduce array up to 2*k in size.\n\/\/ Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)\n\/\/ Having n as template parameter allows compiler to evaluate some conditions at compile time.\ntemplate< int n >\n__device__ void sum_reduce( \/*int n,*\/ int i, magmaDouble_ptr x )\n{\n __syncthreads();\n if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }\n if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }\n if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }\n if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }\n if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }\n if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }\n \/\/ probably don't need __syncthreads for < 16 threads\n \/\/ because of implicit warp level synchronization.\n if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }\n if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }\n if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }\n if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }\n if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }\n}\n\/\/ end sum_reduce\n\n\n\n__global__ void\nmagma_zlobpcg_res_kernel( \n magma_int_t num_rows, \n magma_int_t num_vecs, \n magmaDouble_ptr evals, \n magmaDoubleComplex * X, \n magmaDoubleComplex * R,\n magmaDouble_ptr res)\n{\n\n int row = blockIdx.x * blockDim.x + threadIdx.x; \/\/ global row index\n\n if( row( i, sum );\n \n if (i==0)\n res[blockIdx.x] = sqrt(sum[0]);\n}\n*\/\n\n\n\n\/**\n Purpose\n -------\n \n This routine computes for Block-LOBPCG, the set of residuals. \n R = Ax - x evalues\n It replaces:\n for(int i=0; i < n; i++){\n magma_zaxpy(m, MAGMA_Z_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);\n }\n The memory layout of x is:\n\n \/ x1[0] x2[0] x3[0] \\\n | x1[1] x2[1] x3[1] |\n x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .\n | x1[3] x2[3] x3[3] |\n \\ x1[4] x2[4] x3[4] \/\n \n Arguments\n ---------\n\n @param[in]\n num_rows magma_int_t\n number of rows\n\n @param[in]\n num_vecs magma_int_t\n number of vectors\n \n @param[in]\n evalues magmaDouble_ptr \n array of eigenvalues\/approximations\n\n @param[in]\n X magmaDoubleComplex_ptr \n block of eigenvector approximations\n \n @param[in]\n R magmaDoubleComplex_ptr \n block of residuals\n\n @param[in]\n res magmaDouble_ptr \n array of residuals\n\n @param[in]\n queue magma_queue_t\n Queue to execute in.\n\n @ingroup magmasparse_zaux\n ********************************************************************\/\n\nextern \"C\" magma_int_t\nmagma_zlobpcg_res(\n magma_int_t num_rows,\n magma_int_t num_vecs, \n magmaDouble_ptr evalues, \n magmaDoubleComplex_ptr X,\n magmaDoubleComplex_ptr R, \n magmaDouble_ptr res,\n magma_queue_t queue )\n{\n \/\/ every thread handles one row\n\n magma_int_t block_size = BLOCK_SIZE;\n \n dim3 threads( block_size );\n dim3 grid( magma_ceildiv( num_rows, block_size ) );\n\n magma_zlobpcg_res_kernel<<< grid, threads, 0, queue >>>\n ( num_rows, num_vecs, evalues, X, R, res );\n\n\n return MAGMA_SUCCESS;\n}\n\n\n\n","avg_line_length":27.9100529101,"max_line_length":96,"alphanum_fraction":0.5143127962} {"size":9061,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#ifndef THC_GENERIC_FILE\n#define THC_GENERIC_FILE \"THCUNN\/generic\/SpatialClassNLLCriterion.cu\"\n#else\n\nvoid THNN_(SpatialClassNLLCriterion_shapeCheck)(\n THCState *state,\n THCTensor *input,\n THCIndexTensor *target,\n THCTensor *weights)\n{\n AT_CHECK(!target->is_empty() && target->dim() == 3, 1,\n \"only batches of spatial targets supported (non-empty 3D tensors)\" \\\n \" but got targets of size: : \", target->sizes());\n AT_CHECK(!input->is_empty() && input->dim() == 4, 2,\n \"only batches of spatial inputs supported (non-empty 4D tensors), \" \\\n \"but got input of size: \", input->sizes());\n if (THCTensor_(size)(state, input, 0) != THCIndexTensor_(size)(state, target, 0) ||\n THCTensor_(size)(state, input, 2) != THCIndexTensor_(size)(state, target, 1) ||\n THCTensor_(size)(state, input, 3) != THCIndexTensor_(size)(state, target, 2)) {\n THCDescBuff input_size = THCTensor_(sizeDesc)(state, input);\n THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);\n THError(\"input and target batch or spatial sizes don't match: target %s, input %s\",\n target_size.str, input_size.str);\n }\n\n if (weights && THCTensor_(nElement)(state, weights) != THCTensor_(size)(state, input, 1)) {\n THError(\"weight tensor should be defined either for all or no classes\");\n }\n}\n\nstatic void THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(\n THCState *state,\n THCTensor *gradOutput,\n THCIndexTensor *target)\n{\n AT_CHECK(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 3, 2,\n \"Expected non-empty dimension 3 but got gradOutput of size: \", gradOutput->sizes());\n if (THCTensor_(size)(state, gradOutput, 0) != THCIndexTensor_(size)(state, target, 0) ||\n THCTensor_(size)(state, gradOutput, 1) != THCIndexTensor_(size)(state, target, 1) ||\n THCTensor_(size)(state, gradOutput, 2) != THCIndexTensor_(size)(state, target, 2)) {\n THCDescBuff gradOutput_size = THCTensor_(sizeDesc)(state, gradOutput);\n THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);\n THError(\"gradOutput sizes don't match target sizes: target %s, gradOutput %s\",\n target_size.str, gradOutput_size.str);\n }\n}\n\nvoid THNN_(SpatialClassNLLCriterion_updateOutput)(\n THCState *state,\n THCTensor *input,\n THCIndexTensor *target,\n THCTensor *output,\n int64_t reduction,\n THCTensor *weights,\n THCTensor *total_weight,\n int64_t ignore_index)\n{\n THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);\n THCTensor_(resize1d)(state, output, 1);\n THCTensor_(resize1d)(state, total_weight, 1);\n ignore_index -= TH_INDEX_BASE;\n\n if (weights)\n THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight);\n else\n THCUNN_assertSameGPU(state, 4, input, target, output, total_weight);\n\n if (reduction == Reduction::None) {\n int64_t batch_size = THCTensor_(size)(state, input, 0);\n int64_t H = THCTensor_(size)(state, input, 2);\n int64_t W = THCTensor_(size)(state, input, 3);\n\n THCTensor_(resize3d)(state, output, batch_size, H, W);\n\n if (weights) {\n weights = THCTensor_(newContiguous)(state, weights);\n }\n\n int64_t count = batch_size * H * W;\n SpatialClassNLLCriterion_updateOutput_no_reduce_kernel\n <<>>(\n count,\n toDeviceTensor(state, input),\n toDeviceTensor(state, target),\n toDeviceTensor(state, output),\n weights ? THCTensor_(data)(state, weights) : NULL,\n ignore_index);\n\n if (weights) {\n THCTensor_(free)(state, weights);\n }\n return;\n }\n\n input = THCTensor_(newContiguous)(state, input);\n weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;\n target = THCIndexTensor_(newContiguous)(state, target);\n\n scalar_t *input_data = THCTensor_(data)(state, input);\n scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;\n THCIndex_t *target_data = THCIndexTensor_(data)(state, target);\n scalar_t *output_data = THCTensor_(data)(state, output);\n scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);\n\n THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);\n THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) \/ batch_size;\n int blocks_per_sample = GET_BLOCKS(map_nelem) \/ 128;\n blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;\n int total_blocks = blocks_per_sample * batch_size;\n\n THCTensor_(fill)(state, output, ScalarConvert::to(0));\n THCTensor_(fill)(state, total_weight, ScalarConvert::to(0));\n\n cunn_SpatialClassNLLCriterion_updateOutput_kernel\n <<>>(\n output_data,\n total_weight_data,\n input_data,\n target_data,\n weights_data,\n reduction == Reduction::Mean,\n THCTensor_(size)(state, input, 0),\n THCTensor_(size)(state, input, 1),\n THCTensor_(size)(state, input, 2) * THCTensor_(size)(state, input, 3),\n blocks_per_sample,\n ignore_index\n );\n THCudaCheck(cudaGetLastError());\n if (reduction == Reduction::Mean) {\n cunn_SpatialClassNLLCriterion_sizeAverage_kernel<<<1, 1, 0, THCState_getCurrentStream(state)>>>(\n output_data, total_weight_data\n );\n THCudaCheck(cudaGetLastError());\n }\n\n if (weights)\n THCTensor_(free)(state, weights);\n THCIndexTensor_(free)(state, target);\n THCTensor_(free)(state, input);\n}\n\nvoid THNN_(SpatialClassNLLCriterion_updateGradInput)(\n THCState *state,\n THCTensor *input,\n THCIndexTensor *target,\n THCTensor *gradOutput,\n THCTensor *gradInput,\n int64_t reduction,\n THCTensor *weights,\n THCTensor *total_weight,\n int64_t ignore_index)\n{\n THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);\n THCTensor_(resizeAs)(state, gradInput, input);\n THCTensor_(zero)(state, gradInput);\n THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4,\n \"gradInput must be contiguous\");\n ignore_index -= TH_INDEX_BASE;\n\n if (weights)\n THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight);\n else\n THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight);\n\n if (reduction == Reduction::None) {\n THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(\n state,\n gradOutput,\n target);\n\n int64_t batch_size = THCTensor_(size)(state, input, 0);\n int64_t H = THCTensor_(size)(state, input, 2);\n int64_t W = THCTensor_(size)(state, input, 3);\n\n if (weights) {\n weights = THCTensor_(newContiguous)(state, weights);\n }\n\n int64_t count = batch_size * H * W;\n SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel\n <<>>(\n count,\n toDeviceTensor(state, target),\n toDeviceTensor(state, gradOutput),\n toDeviceTensor(state, gradInput),\n weights ? THCTensor_(data)(state, weights) : NULL,\n ignore_index);\n\n if (weights) {\n THCTensor_(free)(state, weights);\n }\n return;\n }\n\n input = THCTensor_(newContiguous)(state, input);\n weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;\n target = THCIndexTensor_(newContiguous)(state, target);\n\n scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput);\n scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;\n scalar_t *gradInput_data = THCTensor_(data)(state, gradInput);\n THCIndex_t *target_data = THCIndexTensor_(data)(state, target);\n scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);\n\n THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);\n THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) \/ batch_size;\n int blocks_per_sample = GET_BLOCKS(map_nelem) \/ 128;\n blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;\n int total_blocks = blocks_per_sample * batch_size;\n\n cunn_SpatialClassNLLCriterion_updateGradInput_kernel\n <<>>(\n gradInput_data,\n gradOutput_data,\n target_data,\n weights_data,\n total_weight_data,\n reduction == Reduction::Mean,\n THCTensor_(size)(state, input, 0),\n THCTensor_(size)(state, input, 1),\n THCTensor_(size)(state, input, 2) *THCTensor_(size)(state, input, 3),\n blocks_per_sample,\n ignore_index\n );\n THCudaCheck(cudaGetLastError());\n\n if (weights)\n THCTensor_(free)(state, weights);\n THCIndexTensor_(free)(state, target);\n THCTensor_(free)(state, input);\n}\n\n#endif\n","avg_line_length":38.7222222222,"max_line_length":103,"alphanum_fraction":0.6893278888} {"size":10567,"ext":"cuh","lang":"Cuda","max_stars_count":null,"content":"\/*\n * Copyright (c) 2020-2022, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n#pragma once\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace cugraph {\n\nnamespace detail {\n\ntemplate \nclass edge_partition_device_view_base_t {\n public:\n edge_partition_device_view_base_t(edge_t const* offsets,\n vertex_t const* indices,\n std::optional weights,\n edge_t number_of_edges)\n : offsets_(offsets),\n indices_(indices),\n weights_(weights ? thrust::optional(*weights) : thrust::nullopt),\n number_of_edges_(number_of_edges)\n {\n }\n\n __host__ __device__ edge_t number_of_edges() const { return number_of_edges_; }\n\n __host__ __device__ edge_t const* offsets() const { return offsets_; }\n __host__ __device__ vertex_t const* indices() const { return indices_; }\n __host__ __device__ thrust::optional weights() const { return weights_; }\n\n \/\/ major_idx == major offset if CSR\/CSC, major_offset != major_idx if DCSR\/DCSC\n __device__ thrust::tuple, edge_t> local_edges(\n vertex_t major_idx) const noexcept\n {\n auto edge_offset = *(offsets_ + major_idx);\n auto local_degree = *(offsets_ + (major_idx + 1)) - edge_offset;\n auto indices = indices_ + edge_offset;\n auto weights =\n weights_ ? thrust::optional{*weights_ + edge_offset} : thrust::nullopt;\n return thrust::make_tuple(indices, weights, local_degree);\n }\n\n \/\/ major_idx == major offset if CSR\/CSC, major_offset != major_idx if DCSR\/DCSC\n __device__ edge_t local_degree(vertex_t major_idx) const noexcept\n {\n return *(offsets_ + (major_idx + 1)) - *(offsets_ + major_idx);\n }\n\n \/\/ major_idx == major offset if CSR\/CSC, major_offset != major_idx if DCSR\/DCSC\n __device__ edge_t local_offset(vertex_t major_idx) const noexcept\n {\n return *(offsets_ + major_idx);\n }\n\n private:\n \/\/ should be trivially copyable to device\n edge_t const* offsets_{nullptr};\n vertex_t const* indices_{nullptr};\n thrust::optional weights_{thrust::nullopt};\n edge_t number_of_edges_{0};\n};\n\n} \/\/ namespace detail\n\ntemplate \nclass edge_partition_device_view_t;\n\n\/\/ multi-GPU version\ntemplate \nclass edge_partition_device_view_t>\n : public detail::edge_partition_device_view_base_t {\n public:\n edge_partition_device_view_t(edge_partition_view_t view)\n : detail::edge_partition_device_view_base_t(\n view.offsets(), view.indices(), view.weights(), view.number_of_edges()),\n dcs_nzd_vertices_(view.dcs_nzd_vertices()\n ? thrust::optional{*(view.dcs_nzd_vertices())}\n : thrust::nullopt),\n dcs_nzd_vertex_count_(view.dcs_nzd_vertex_count()\n ? thrust::optional{*(view.dcs_nzd_vertex_count())}\n : thrust::nullopt),\n major_range_first_(view.major_range_first()),\n major_range_last_(view.major_range_last()),\n minor_range_first_(view.minor_range_first()),\n minor_range_last_(view.minor_range_last()),\n major_value_start_offset_(view.major_value_start_offset())\n {\n }\n\n __host__ __device__ vertex_t major_range_first() const noexcept { return major_range_first_; }\n\n __host__ __device__ vertex_t major_range_last() const noexcept { return major_range_last_; }\n\n __host__ __device__ vertex_t major_range_size() const noexcept\n {\n return major_range_last_ - major_range_first_;\n }\n\n __host__ __device__ vertex_t minor_range_first() const noexcept { return minor_range_first_; }\n\n __host__ __device__ vertex_t minor_rage_last() const noexcept { return minor_range_last_; }\n\n __host__ __device__ vertex_t minor_range_size() const noexcept\n {\n return minor_range_last_ - minor_range_first_;\n }\n\n __host__ __device__ vertex_t major_offset_from_major_nocheck(vertex_t major) const noexcept\n {\n return major - major_range_first_;\n }\n\n __host__ __device__ vertex_t minor_offset_from_minor_nocheck(vertex_t minor) const noexcept\n {\n return minor - minor_range_first_;\n }\n\n __host__ __device__ vertex_t major_from_major_offset_nocheck(vertex_t major_offset) const noexcept\n {\n return major_range_first_ + major_offset;\n }\n\n \/\/ major_hypersparse_idx: index within the hypersparse segment\n __host__ __device__ thrust::optional major_hypersparse_idx_from_major_nocheck(\n vertex_t major) const noexcept\n {\n if (dcs_nzd_vertices_) {\n \/\/ we can avoid binary search (and potentially improve performance) if we add an auxiliary\n \/\/ array or cuco::static_map (at the expense of additional memory)\n auto it = thrust::lower_bound(\n thrust::seq, *dcs_nzd_vertices_, *dcs_nzd_vertices_ + *dcs_nzd_vertex_count_, major);\n return it != *dcs_nzd_vertices_ + *dcs_nzd_vertex_count_\n ? (*it == major ? thrust::optional{static_cast(\n thrust::distance(*dcs_nzd_vertices_, it))}\n : thrust::nullopt)\n : thrust::nullopt;\n } else {\n return thrust::nullopt;\n }\n }\n\n \/\/ major_hypersparse_idx: index within the hypersparse segment\n __host__ __device__ thrust::optional major_from_major_hypersparse_idx_nocheck(\n vertex_t major_hypersparse_idx) const noexcept\n {\n return dcs_nzd_vertices_\n ? thrust::optional{(*dcs_nzd_vertices_)[major_hypersparse_idx]}\n : thrust::nullopt;\n }\n\n __host__ __device__ vertex_t minor_from_minor_offset_nocheck(vertex_t minor_offset) const noexcept\n {\n return minor_range_first_ + minor_offset;\n }\n\n __host__ __device__ vertex_t major_value_start_offset() const\n {\n return major_value_start_offset_;\n }\n\n __host__ __device__ thrust::optional dcs_nzd_vertices() const\n {\n return dcs_nzd_vertices_;\n }\n __host__ __device__ thrust::optional dcs_nzd_vertex_count() const\n {\n return dcs_nzd_vertex_count_;\n }\n\n private:\n \/\/ should be trivially copyable to device\n\n thrust::optional dcs_nzd_vertices_{nullptr};\n thrust::optional dcs_nzd_vertex_count_{0};\n\n vertex_t major_range_first_{0};\n vertex_t major_range_last_{0};\n vertex_t minor_range_first_{0};\n vertex_t minor_range_last_{0};\n\n vertex_t major_value_start_offset_{0};\n};\n\n\/\/ single-GPU version\ntemplate \nclass edge_partition_device_view_t>\n : public detail::edge_partition_device_view_base_t {\n public:\n edge_partition_device_view_t(edge_partition_view_t view)\n : detail::edge_partition_device_view_base_t(\n view.offsets(), view.indices(), view.weights(), view.number_of_edges()),\n number_of_vertices_(view.major_range_last())\n {\n }\n\n __host__ __device__ vertex_t major_value_start_offset() const { return vertex_t{0}; }\n\n __host__ __device__ constexpr vertex_t major_range_first() const noexcept { return vertex_t{0}; }\n\n __host__ __device__ vertex_t major_range_last() const noexcept { return number_of_vertices_; }\n\n __host__ __device__ vertex_t major_range_size() const noexcept { return number_of_vertices_; }\n\n __host__ __device__ constexpr vertex_t minor_range_first() const noexcept { return vertex_t{0}; }\n\n __host__ __device__ vertex_t minor_range_last() const noexcept { return number_of_vertices_; }\n\n __host__ __device__ vertex_t minor_range_size() const noexcept { return number_of_vertices_; }\n\n __host__ __device__ vertex_t major_offset_from_major_nocheck(vertex_t major) const noexcept\n {\n return major;\n }\n\n __host__ __device__ vertex_t minor_offset_from_minor_nocheck(vertex_t minor) const noexcept\n {\n return minor;\n }\n\n __host__ __device__ vertex_t major_from_major_offset_nocheck(vertex_t major_offset) const noexcept\n {\n return major_offset;\n }\n\n \/\/ major_hypersparse_idx: index within the hypersparse segment\n __host__ __device__ thrust::optional major_hypersparse_idx_from_major_nocheck(\n vertex_t major) const noexcept\n {\n assert(false);\n return thrust::nullopt;\n }\n\n \/\/ major_hypersparse_idx: index within the hypersparse segment\n __host__ __device__ thrust::optional major_from_major_hypersparse_idx_nocheck(\n vertex_t major_hypersparse_idx) const noexcept\n {\n assert(false);\n return thrust::nullopt;\n }\n\n __host__ __device__ vertex_t minor_from_minor_offset_nocheck(vertex_t minor_offset) const noexcept\n {\n return minor_offset;\n }\n\n __host__ __device__ thrust::optional dcs_nzd_vertices() const\n {\n return thrust::nullopt;\n }\n\n __host__ __device__ thrust::optional dcs_nzd_vertex_count() const\n {\n return thrust::nullopt;\n }\n\n private:\n vertex_t number_of_vertices_;\n};\n\n} \/\/ namespace cugraph\n","avg_line_length":35.5791245791,"max_line_length":100,"alphanum_fraction":0.7148670389} {"size":7114,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*\n * Software License Agreement (BSD License)\n *\n * Point Cloud Library (PCL) - www.pointclouds.org\n * Copyright (c) 2011, Willow Garage, Inc.\n *\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following\n * disclaimer in the documentation and\/or other materials provided\n * with the distribution.\n * * Neither the name of Willow Garage, Inc. nor the names of its\n * contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n *\/\n\n#include \"device.hpp\"\n\/\/#include \n\/\/#include \n\/\/#include \n\nnamespace pcl {\nnamespace device {\ntemplate struct TransformEstimator {\n enum {\n CTA_SIZE_X = 32,\n CTA_SIZE_Y = 8,\n CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y\n };\n\n struct plus {\n __forceinline__ __device__ T operator()(const T &lhs,\n const volatile T &rhs) const {\n return lhs + rhs;\n }\n };\n\n PtrStep v_dst;\n PtrStep n_dst;\n PtrStep v_src;\n PtrStepSz coresp;\n\n mutable PtrStep gbuf;\n\n __device__ __forceinline__ void operator()() const {\n int x = threadIdx.x + blockIdx.x * blockDim.x;\n int y = threadIdx.y + blockIdx.y * blockDim.y;\n\n float row[7];\n row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f;\n\n if (x < coresp.cols || y < coresp.rows) {\n short2 ukr = coresp.ptr(y)[x];\n\n if (ukr.x != -1) {\n float3 n;\n n.x = n_dst.ptr(ukr.y)[ukr.x];\n n.y = n_dst.ptr(ukr.y + coresp.rows)[ukr.x];\n n.z = n_dst.ptr(ukr.y + 2 * coresp.rows)[ukr.x];\n\n float3 d;\n d.x = v_dst.ptr(ukr.y)[ukr.x];\n d.y = v_dst.ptr(ukr.y + coresp.rows)[ukr.x];\n d.z = v_dst.ptr(ukr.y + 2 * coresp.rows)[ukr.x];\n\n float3 s;\n s.x = v_src.ptr(y)[x];\n s.y = v_src.ptr(y + coresp.rows)[x];\n s.z = v_src.ptr(y + 2 * coresp.rows)[x];\n\n float b = dot(n, d - s);\n\n *(float3 *)&row[0] = cross(s, n);\n *(float3 *)&row[3] = n;\n row[6] = b;\n }\n }\n\n __shared__ T smem[CTA_SIZE];\n int tid = Block::flattenedThreadId();\n\n int shift = 0;\n for (int i = 0; i < 6; ++i) \/\/ rows\n {\n#pragma unroll\n for (int j = i; j < 7; ++j) \/\/ cols + b\n {\n __syncthreads();\n smem[tid] = row[i] * row[j];\n __syncthreads();\n\n Block::reduce(smem, plus());\n\n if (tid == 0)\n gbuf.ptr(shift++)[blockIdx.x + gridDim.x * blockIdx.y] =\n smem[0];\n }\n }\n }\n};\n\ntemplate struct TranformReduction {\n enum {\n CTA_SIZE = 512,\n STRIDE = CTA_SIZE,\n\n B = 6,\n COLS = 6,\n ROWS = 6,\n DIAG = 6,\n UPPER_DIAG_MAT = (COLS * ROWS - DIAG) \/ 2 + DIAG,\n TOTAL = UPPER_DIAG_MAT + B,\n\n GRID_X = TOTAL\n };\n\n PtrStep gbuf;\n int length;\n mutable T *output;\n\n __device__ __forceinline__ void operator()() const {\n const T *beg = gbuf.ptr(blockIdx.x);\n const T *end = beg + length;\n\n int tid = threadIdx.x;\n\n T sum = 0.f;\n for (const T *t = beg + tid; t < end; t += STRIDE)\n sum += *t;\n\n __shared__ T smem[CTA_SIZE];\n\n smem[tid] = sum;\n __syncthreads();\n\n Block::reduce(smem, TransformEstimator::plus());\n\n if (tid == 0)\n output[blockIdx.x] = smem[0];\n }\n};\n\n__global__ void TransformEstimatorKernel1(const TransformEstimator te) {\n te();\n}\n__global__ void TransformEstimatorKernel2(const TranformReduction tr) {\n tr();\n}\n} \/\/ namespace device\n} \/\/ namespace pcl\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nvoid pcl::device::estimateTransform(const MapArr &v_dst, const MapArr &n_dst,\n const MapArr &v_src,\n const PtrStepSz &coresp,\n DeviceArray2D &gbuf,\n DeviceArray &mbuf,\n float *matrixA_host, float *vectorB_host) {\n typedef TransformEstimator TEst;\n typedef TranformReduction TRed;\n\n dim3 block(TEst::CTA_SIZE_X, TEst::CTA_SIZE_Y);\n dim3 grid(1, 1, 1);\n grid.x = divUp(coresp.cols, block.x);\n grid.y = divUp(coresp.rows, block.y);\n\n mbuf.create(TRed::TOTAL);\n if (gbuf.rows() != TRed::TOTAL || gbuf.cols() < (int)(grid.x * grid.y))\n gbuf.create(TRed::TOTAL, grid.x * grid.y);\n\n TEst te;\n te.n_dst = n_dst;\n te.v_dst = v_dst;\n te.v_src = v_src;\n te.coresp = coresp;\n te.gbuf = gbuf;\n\n TransformEstimatorKernel1<<>>(te);\n cudaSafeCall(cudaGetLastError());\n \/\/ cudaSafeCall(cudaDeviceSynchronize());\n\n TRed tr;\n tr.gbuf = gbuf;\n tr.length = grid.x * grid.y;\n tr.output = mbuf;\n\n TransformEstimatorKernel2<<>>(tr);\n\n cudaSafeCall(cudaGetLastError());\n cudaSafeCall(cudaDeviceSynchronize());\n\n float host_data[TRed::TOTAL];\n mbuf.download(host_data);\n\n int shift = 0;\n for (int i = 0; i < 6; ++i) \/\/ rows\n for (int j = i; j < 7; ++j) \/\/ cols + b\n {\n float value = host_data[shift++];\n if (j == 6) \/\/ vector b\n vectorB_host[i] = value;\n else\n matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value;\n }\n}\n","avg_line_length":31.3392070485,"max_line_length":114,"alphanum_fraction":0.5514478493} {"size":27550,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"\/******************************************************************************\r\n * Copyright (c) 2011, Duane Merrill. All rights reserved.\r\n * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.\r\n *\r\n * Redistribution and use in source and binary forms, with or without\r\n * modification, are permitted provided that the following conditions are met:\r\n * * Redistributions of source code must retain the above copyright\r\n * notice, this list of conditions and the following disclaimer.\r\n * * Redistributions in binary form must reproduce the above copyright\r\n * notice, this list of conditions and the following disclaimer in the\r\n * documentation and\/or other materials provided with the distribution.\r\n * * Neither the name of the NVIDIA CORPORATION nor the\r\n * names of its contributors may be used to endorse or promote products\r\n * derived from this software without specific prior written permission.\r\n *\r\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\r\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\r\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n *\r\n ******************************************************************************\/\r\n\r\n\/******************************************************************************\r\n * Test of DeviceSelect::Flagged and DevicePartition::Flagged utilities\r\n ******************************************************************************\/\r\n\r\n\/\/ Ensure printing of CUDA runtime errors to console\r\n#define CUB_STDERR\r\n\r\n#include \r\n\r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n\r\n#include \r\n#include \r\n#include \r\n#include \r\n\r\n#include \"test_util.h\"\r\n\r\nusing namespace cub;\r\n\r\n\r\n\/\/---------------------------------------------------------------------\r\n\/\/ Globals, constants and typedefs\r\n\/\/---------------------------------------------------------------------\r\n\r\nbool g_verbose = false;\r\nint g_timing_iterations = 0;\r\nint g_repeat = 0;\r\nCachingDeviceAllocator g_allocator(true);\r\n\r\n\/\/ Dispatch types\r\nenum Backend\r\n{\r\n CUB, \/\/ CUB method\r\n THRUST, \/\/ Thrust method\r\n CDP, \/\/ GPU-based (dynamic parallelism) dispatch to CUB method\r\n};\r\n\r\n\r\n\/\/---------------------------------------------------------------------\r\n\/\/ Dispatch to different CUB DeviceSelect entrypoints\r\n\/\/---------------------------------------------------------------------\r\n\r\n\r\n\/**\r\n * Dispatch to select flagged entrypoint\r\n *\/\r\ntemplate \r\nCUB_RUNTIME_FUNCTION __forceinline__\r\ncudaError_t Dispatch(\r\n Int2Type dispatch_to,\r\n Int2Type partition,\r\n int timing_timing_iterations,\r\n size_t *d_temp_storage_bytes,\r\n cudaError_t *d_cdp_error,\r\n\r\n void *d_temp_storage,\r\n size_t &temp_storage_bytes,\r\n InputIteratorT d_in,\r\n FlagIterator d_flags,\r\n OutputIteratorT d_out,\r\n NumSelectedIteratorT d_num_selected_out,\r\n OffsetT num_items,\r\n cudaStream_t stream,\r\n bool debug_synchronous)\r\n{\r\n cudaError_t error = cudaSuccess;\r\n for (int i = 0; i < timing_timing_iterations; ++i)\r\n {\r\n error = DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);\r\n }\r\n return error;\r\n}\r\n\r\n\r\n\/**\r\n * Dispatch to partition flagged entrypoint\r\n *\/\r\ntemplate \r\nCUB_RUNTIME_FUNCTION __forceinline__\r\ncudaError_t Dispatch(\r\n Int2Type dispatch_to,\r\n Int2Type partition,\r\n int timing_timing_iterations,\r\n size_t *d_temp_storage_bytes,\r\n cudaError_t *d_cdp_error,\r\n\r\n void *d_temp_storage,\r\n size_t &temp_storage_bytes,\r\n InputIteratorT d_in,\r\n FlagIterator d_flags,\r\n OutputIteratorT d_out,\r\n NumSelectedIteratorT d_num_selected_out,\r\n OffsetT num_items,\r\n cudaStream_t stream,\r\n bool debug_synchronous)\r\n{\r\n cudaError_t error = cudaSuccess;\r\n for (int i = 0; i < timing_timing_iterations; ++i)\r\n {\r\n error = DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);\r\n }\r\n return error;\r\n}\r\n\r\n\r\n\r\n\/\/---------------------------------------------------------------------\r\n\/\/ Dispatch to different Thrust entrypoints\r\n\/\/---------------------------------------------------------------------\r\n\r\n\/**\r\n * Dispatch to select flagged entrypoint\r\n *\/\r\ntemplate \r\n__host__ __forceinline__\r\ncudaError_t Dispatch(\r\n Int2Type dispatch_to,\r\n Int2Type partition,\r\n int timing_timing_iterations,\r\n size_t *d_temp_storage_bytes,\r\n cudaError_t *d_cdp_error,\r\n\r\n void *d_temp_storage,\r\n size_t &temp_storage_bytes,\r\n InputIteratorT d_in,\r\n FlagIterator d_flags,\r\n OutputIteratorT d_out,\r\n NumSelectedIteratorT d_num_selected_out,\r\n OffsetT num_items,\r\n cudaStream_t stream,\r\n bool debug_synchronous)\r\n{\r\n typedef typename std::iterator_traits::value_type T;\r\n typedef typename std::iterator_traits::value_type FlagT;\r\n\r\n\r\n if (d_temp_storage == 0)\r\n {\r\n temp_storage_bytes = 1;\r\n }\r\n else\r\n {\r\n thrust::device_ptr d_out_wrapper_end;\r\n\r\n thrust::device_ptr d_in_wrapper(d_in);\r\n thrust::device_ptr d_out_wrapper(d_out);\r\n thrust::device_ptr d_flags_wrapper(d_flags);\r\n\r\n for (int i = 0; i < timing_timing_iterations; ++i)\r\n {\r\n d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, Cast());\r\n }\r\n\r\n OffsetT num_selected = d_out_wrapper_end - d_out_wrapper;\r\n CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));\r\n }\r\n\r\n return cudaSuccess;\r\n}\r\n\r\n\r\n\/**\r\n * Dispatch to partition entrypoint\r\n *\/\r\ntemplate \r\n__host__ __forceinline__\r\ncudaError_t Dispatch(\r\n Int2Type dispatch_to,\r\n Int2Type partition,\r\n int timing_timing_iterations,\r\n size_t *d_temp_storage_bytes,\r\n cudaError_t *d_cdp_error,\r\n\r\n void *d_temp_storage,\r\n size_t &temp_storage_bytes,\r\n InputIteratorT d_in,\r\n FlagIterator d_flags,\r\n OutputIteratorT d_out,\r\n NumSelectedIteratorT d_num_selected_out,\r\n OffsetT num_items,\r\n cudaStream_t stream,\r\n bool debug_synchronous)\r\n{\r\n typedef typename std::iterator_traits::value_type T;\r\n typedef typename std::iterator_traits::value_type FlagT;\r\n\r\n typedef thrust::reverse_iterator > ReverseOutputIteratorT;\r\n\r\n if (d_temp_storage == 0)\r\n {\r\n temp_storage_bytes = 1;\r\n }\r\n else\r\n {\r\n thrust::pair, ReverseOutputIteratorT> d_out_wrapper_end;\r\n\r\n thrust::device_ptr d_in_wrapper(d_in);\r\n thrust::device_ptr d_out_wrapper(d_out);\r\n thrust::device_ptr d_flags_wrapper(d_flags);\r\n ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);\r\n\r\n for (int i = 0; i < timing_timing_iterations; ++i)\r\n {\r\n d_out_wrapper_end = thrust::partition_copy(\r\n d_in_wrapper,\r\n d_in_wrapper + num_items,\r\n d_flags_wrapper,\r\n d_out_wrapper,\r\n d_out_unselected,\r\n Cast());\r\n }\r\n\r\n OffsetT num_selected = d_out_wrapper_end.first - d_out_wrapper;\r\n CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));\r\n }\r\n\r\n return cudaSuccess;\r\n}\r\n\r\n\r\n\r\n\/\/---------------------------------------------------------------------\r\n\/\/ CUDA Nested Parallelism Test Kernel\r\n\/\/---------------------------------------------------------------------\r\n\r\n\/**\r\n * Simple wrapper kernel to invoke DeviceSelect\r\n *\/\r\ntemplate \r\n__global__ void CnpDispatchKernel(\r\n PartitionTag partition_tag,\r\n int timing_timing_iterations,\r\n size_t *d_temp_storage_bytes,\r\n cudaError_t *d_cdp_error,\r\n\r\n void *d_temp_storage,\r\n size_t temp_storage_bytes,\r\n InputIteratorT d_in,\r\n FlagIterator d_flags,\r\n OutputIteratorT d_out,\r\n NumSelectedIteratorT d_num_selected_out,\r\n OffsetT num_items,\r\n bool debug_synchronous)\r\n{\r\n\r\n#ifndef CUB_CDP\r\n *d_cdp_error = cudaErrorNotSupported;\r\n#else\r\n *d_cdp_error = Dispatch(Int2Type(), partition_tag, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,\r\n d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, 0, debug_synchronous);\r\n *d_temp_storage_bytes = temp_storage_bytes;\r\n#endif\r\n}\r\n\r\n\r\n\/**\r\n * Dispatch to CDP kernel\r\n *\/\r\ntemplate \r\ncudaError_t Dispatch(\r\n Int2Type dispatch_to,\r\n PartitionTag partition_tag,\r\n int timing_timing_iterations,\r\n size_t *d_temp_storage_bytes,\r\n cudaError_t *d_cdp_error,\r\n\r\n void *d_temp_storage,\r\n size_t &temp_storage_bytes,\r\n InputIteratorT d_in,\r\n FlagIterator d_flags,\r\n OutputIteratorT d_out,\r\n NumSelectedIteratorT d_num_selected_out,\r\n OffsetT num_items,\r\n cudaStream_t stream,\r\n bool debug_synchronous)\r\n{\r\n \/\/ Invoke kernel to invoke device-side dispatch\r\n CnpDispatchKernel<<<1,1>>>(partition_tag, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,\r\n d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, debug_synchronous);\r\n\r\n \/\/ Copy out temp_storage_bytes\r\n CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost));\r\n\r\n \/\/ Copy out error\r\n cudaError_t retval;\r\n CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost));\r\n return retval;\r\n}\r\n\r\n\r\n\r\n\/\/---------------------------------------------------------------------\r\n\/\/ Test generation\r\n\/\/---------------------------------------------------------------------\r\n\r\n\r\n\/**\r\n * Initialize problem\r\n *\/\r\ntemplate \r\nvoid Initialize(\r\n T *h_in,\r\n int num_items)\r\n{\r\n for (int i = 0; i < num_items; ++i)\r\n {\r\n \/\/ Initialize each item to a randomly selected value from [0..126]\r\n unsigned int value;\r\n RandomBits(value, 0, 0, 7);\r\n if (value == 127)\r\n value = 126;\r\n InitValue(INTEGER_SEED, h_in[i], value);\r\n }\r\n\r\n if (g_verbose)\r\n {\r\n printf(\"Input:\\n\");\r\n DisplayResults(h_in, num_items);\r\n printf(\"\\n\\n\");\r\n }\r\n}\r\n\r\n\r\n\/**\r\n * Solve selection problem (select if less than compare)\r\n *\/\r\ntemplate <\r\n typename InputIteratorT,\r\n typename FlagT,\r\n typename T>\r\nint Solve(\r\n InputIteratorT h_in,\r\n FlagT *h_flags,\r\n T *h_reference,\r\n T compare,\r\n int num_items)\r\n{\r\n int num_selected = 0;\r\n for (int i = 0; i < num_items; ++i)\r\n {\r\n if (h_in[i] < compare)\r\n {\r\n h_flags[i] = 1;\r\n h_reference[num_selected] = h_in[i];\r\n num_selected++;\r\n }\r\n else\r\n {\r\n h_flags[i] = 0;\r\n h_reference[num_items - (i - num_selected) - 1] = h_in[i];\r\n }\r\n }\r\n\r\n return num_selected;\r\n}\r\n\r\n\r\n\r\n\/**\r\n * Test DeviceSelect for a given problem input\r\n *\/\r\ntemplate <\r\n Backend BACKEND,\r\n bool PARTITION,\r\n typename DeviceInputIteratorT,\r\n typename DeviceFlagIterator,\r\n typename T>\r\nvoid Test(\r\n DeviceInputIteratorT d_in,\r\n DeviceFlagIterator d_flags,\r\n T *h_reference,\r\n int num_selected,\r\n int num_items,\r\n char* type_string)\r\n{\r\n typedef typename std::iterator_traits::value_type FlagT;\r\n\r\n \/\/ Allocate device output array and num selected\r\n T *d_out = NULL;\r\n int *d_num_selected_out = NULL;\r\n CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items));\r\n CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));\r\n\r\n \/\/ Allocate CDP device arrays\r\n size_t *d_temp_storage_bytes = NULL;\r\n cudaError_t *d_cdp_error = NULL;\r\n CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));\r\n CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));\r\n\r\n \/\/ Allocate temporary storage\r\n void *d_temp_storage = NULL;\r\n size_t temp_storage_bytes = 0;\r\n CubDebugExit(Dispatch(Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error,\r\n d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, 0, true));\r\n CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));\r\n\r\n \/\/ Clear device output array\r\n CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * num_items));\r\n CubDebugExit(cudaMemset(d_num_selected_out, 0, sizeof(int)));\r\n\r\n \/\/ Run warmup\/correctness iteration\r\n CubDebugExit(Dispatch(Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error,\r\n d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, 0, true));\r\n\r\n \/\/ Check for correctness (and display results, if specified)\r\n int compare1 = (PARTITION) ?\r\n CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) :\r\n CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);\r\n printf(\"\\t Data %s\\n\", compare1 ? \"FAIL\" : \"PASS\");\r\n\r\n int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);\r\n printf(\"\\t Count %s\\n\", compare2 ? \"FAIL\" : \"PASS\");\r\n\r\n \/\/ Flush any stdout\/stderr\r\n fflush(stdout);\r\n fflush(stderr);\r\n\r\n \/\/ Performance\r\n GpuTimer gpu_timer;\r\n gpu_timer.Start();\r\n CubDebugExit(Dispatch(Int2Type(), Int2Type(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error,\r\n d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, 0, false));\r\n gpu_timer.Stop();\r\n float elapsed_millis = gpu_timer.ElapsedMillis();\r\n\r\n \/\/ Display performance\r\n if (g_timing_iterations > 0)\r\n {\r\n float avg_millis = elapsed_millis \/ g_timing_iterations;\r\n float grate = float(num_items) \/ avg_millis \/ 1000.0 \/ 1000.0;\r\n int output_items = (PARTITION) ? num_items : num_selected;\r\n float gbandwidth = float(((num_items + output_items) * sizeof(T)) + (num_items * sizeof(FlagT))) \/ avg_millis \/ 1000.0 \/ 1000.0;\r\n printf(\", %.3f avg ms, %.3f billion items\/s, %.3f logical GB\/s\", avg_millis, grate, gbandwidth);\r\n }\r\n printf(\"\\n\\n\");\r\n\r\n \/\/ Flush any stdout\/stderr\r\n fflush(stdout);\r\n fflush(stderr);\r\n\r\n \/\/ Cleanup\r\n if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));\r\n if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));\r\n if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));\r\n if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));\r\n if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));\r\n\r\n \/\/ Correctness asserts\r\n AssertEquals(0, compare1 | compare2);\r\n}\r\n\r\n\r\n\/**\r\n * Test DeviceSelect on pointer type\r\n *\/\r\ntemplate <\r\n Backend BACKEND,\r\n bool PARTITION,\r\n typename T>\r\nvoid TestPointer(\r\n int num_items,\r\n float select_ratio,\r\n char* type_string)\r\n{\r\n typedef char FlagT;\r\n\r\n \/\/ Allocate host arrays\r\n T *h_in = new T[num_items];\r\n T *h_reference = new T[num_items];\r\n FlagT *h_flags = new FlagT[num_items];\r\n\r\n \/\/ Initialize input\r\n Initialize(h_in, num_items);\r\n\r\n \/\/ Select a comparison value that is select_ratio through the space of [0,127]\r\n T compare;\r\n if (select_ratio <= 0.0)\r\n InitValue(INTEGER_SEED, compare, 0); \/\/ select none\r\n else if (select_ratio >= 1.0)\r\n InitValue(INTEGER_SEED, compare, 127); \/\/ select all\r\n else\r\n InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));\r\n\r\n \/\/ Set flags and solve\r\n int num_selected = Solve(h_in, h_flags, h_reference, compare, num_items);\r\n\r\n printf(\"\\nPointer %s cub::%s::Flagged %d items, %d selected (select ratio %.3f), %s %d-byte elements\\n\",\r\n (PARTITION) ? \"DevicePartition\" : \"DeviceSelect\",\r\n (BACKEND == CDP) ? \"CDP CUB\" : (BACKEND == THRUST) ? \"Thrust\" : \"CUB\",\r\n num_items, num_selected, float(num_selected) \/ num_items, type_string, (int) sizeof(T));\r\n fflush(stdout);\r\n\r\n \/\/ Allocate problem device arrays\r\n T *d_in = NULL;\r\n FlagT *d_flags = NULL;\r\n\r\n CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));\r\n CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(FlagT) * num_items));\r\n\r\n \/\/ Initialize device input\r\n CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice));\r\n CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(FlagT) * num_items, cudaMemcpyHostToDevice));\r\n\r\n \/\/ Run Test\r\n Test(d_in, d_flags, h_reference, num_selected, num_items, type_string);\r\n\r\n \/\/ Cleanup\r\n if (h_in) delete[] h_in;\r\n if (h_reference) delete[] h_reference;\r\n if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));\r\n if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));\r\n}\r\n\r\n\r\n\/**\r\n * Test DeviceSelect on iterator type\r\n *\/\r\ntemplate <\r\n Backend BACKEND,\r\n bool PARTITION,\r\n typename T>\r\nvoid TestIterator(\r\n int num_items,\r\n char* type_string,\r\n Int2Type is_number)\r\n{\r\n typedef char FlagT;\r\n\r\n \/\/ Use a counting iterator as the input and a constant iterator as flags\r\n CountingInputIterator h_in(0);\r\n ConstantInputIterator h_flags(1);\r\n\r\n \/\/ Allocate host arrays\r\n T* h_reference = new T[num_items];\r\n\r\n \/\/ Initialize solution\r\n int num_selected = num_items;\r\n for (int i = 0; i < num_items; ++i)\r\n h_reference[i] = h_in[i];\r\n\r\n printf(\"\\nIterator %s cub::DeviceSelect::Flagged %d items, %d selected, %s %d-byte elements\\n\",\r\n (BACKEND == CDP) ? \"CDP CUB\" : (BACKEND == THRUST) ? \"Thrust\" : \"CUB\",\r\n num_items, num_selected, type_string, (int) sizeof(T));\r\n fflush(stdout);\r\n\r\n \/\/ Run Test\r\n Test(h_in, h_flags, h_reference, num_selected, num_items, type_string);\r\n\r\n \/\/ Cleanup\r\n if (h_reference) delete[] h_reference;\r\n}\r\n\r\n\r\n\/**\r\n * Test DeviceSelect on iterator type\r\n *\/\r\ntemplate <\r\n Backend BACKEND,\r\n bool PARTITION,\r\n typename T>\r\nvoid TestIterator(\r\n int num_items,\r\n char* type_string,\r\n Int2Type is_number)\r\n{}\r\n\r\n\r\n\/**\r\n * Test different selection ratios\r\n *\/\r\ntemplate <\r\n Backend BACKEND,\r\n bool PARTITION,\r\n typename T>\r\nvoid Test(\r\n int num_items,\r\n char* type_string)\r\n{\r\n for (float select_ratio = 0; select_ratio <= 1.0; select_ratio += 0.2)\r\n {\r\n TestPointer(num_items, select_ratio, type_string);\r\n }\r\n\r\n \/\/ Iterator test always keeps all items\r\n TestIterator(num_items, type_string, Int2Type::CATEGORY != NOT_A_NUMBER>());\r\n}\r\n\r\n\/**\r\n * Test select vs. partition\r\n *\/\r\ntemplate <\r\n Backend BACKEND,\r\n typename T>\r\nvoid TestMethod(\r\n int num_items,\r\n char* type_string)\r\n{\r\n Test(num_items, type_string);\r\n\r\n \/\/ mooch\r\n\/\/ Test(num_items, type_string);\r\n}\r\n\r\n\r\n\/**\r\n * Test different dispatch\r\n *\/\r\ntemplate <\r\n typename T>\r\nvoid TestOp(\r\n int num_items,\r\n char* type_string)\r\n{\r\n TestMethod(num_items, type_string);\r\n#ifdef CUB_CDP\r\n TestMethod(num_items, type_string);\r\n#endif\r\n}\r\n\r\n\r\n\/**\r\n * Test different input sizes\r\n *\/\r\ntemplate \r\nvoid Test(\r\n int num_items,\r\n char* type_string)\r\n{\r\n if (num_items < 0)\r\n {\r\n TestOp(1, type_string);\r\n TestOp(100, type_string);\r\n TestOp(10000, type_string);\r\n TestOp(1000000, type_string);\r\n }\r\n else\r\n {\r\n TestOp(num_items, type_string);\r\n }\r\n}\r\n\r\n\r\n\/**\r\n * Test select\/partition on pointer types\r\n *\/\r\ntemplate \r\nvoid ComparePointer(\r\n int num_items,\r\n float select_ratio,\r\n char* type_string)\r\n{\r\n printf(\"-- Select ----------------------------\\n\");\r\n TestPointer(num_items, select_ratio, type_string);\r\n TestPointer(num_items, select_ratio, type_string);\r\n\/* mooch\r\n printf(\"-- Partition ----------------------------\\n\");\r\n TestPointer(num_items, select_ratio, type_string);\r\n TestPointer(num_items, select_ratio, type_string);\r\n*\/\r\n}\r\n\r\n\r\n\/\/---------------------------------------------------------------------\r\n\/\/ Main\r\n\/\/---------------------------------------------------------------------\r\n\r\n\/**\r\n * Main\r\n *\/\r\nint main(int argc, char** argv)\r\n{\r\n int num_items = -1;\r\n float select_ratio = 0.5;\r\n\r\n \/\/ Initialize command line\r\n CommandLineArgs args(argc, argv);\r\n g_verbose = args.CheckCmdLineFlag(\"v\");\r\n args.GetCmdLineArgument(\"n\", num_items);\r\n args.GetCmdLineArgument(\"i\", g_timing_iterations);\r\n args.GetCmdLineArgument(\"repeat\", g_repeat);\r\n args.GetCmdLineArgument(\"ratio\", select_ratio);\r\n\r\n \/\/ Print usage\r\n if (args.CheckCmdLineFlag(\"help\"))\r\n {\r\n printf(\"%s \"\r\n \"[--n= \"\r\n \"[--i= \"\r\n \"[--device=] \"\r\n \"[--ratio=] \"\r\n \"[--repeat=] \"\r\n \"[--v] \"\r\n \"[--cdp] \"\r\n \"\\n\", argv[0]);\r\n exit(0);\r\n }\r\n\r\n \/\/ Initialize device\r\n CubDebugExit(args.DeviceInit());\r\n printf(\"\\n\");\r\n\r\n \/\/ Get device ordinal\r\n int device_ordinal;\r\n CubDebugExit(cudaGetDevice(&device_ordinal));\r\n\r\n \/\/ Get device SM version\r\n int sm_version;\r\n CubDebugExit(SmVersion(sm_version, device_ordinal));\r\n\r\n#ifdef QUICK_TEST\r\n\r\n \/\/ Compile\/run quick tests\r\n if (num_items < 0) num_items = 32000000;\r\n\r\n ComparePointer( num_items * ((sm_version <= 130) ? 1 : 4), select_ratio, CUB_TYPE_STRING(char));\r\n ComparePointer( num_items * ((sm_version <= 130) ? 1 : 2), select_ratio, CUB_TYPE_STRING(short));\r\n ComparePointer( num_items, select_ratio, CUB_TYPE_STRING(int));\r\n ComparePointer( num_items \/ 2, select_ratio, CUB_TYPE_STRING(long long));\r\n ComparePointer( num_items \/ 4, select_ratio, CUB_TYPE_STRING(TestFoo));\r\n\r\n#else\r\n\r\n \/\/ Compile\/run thorough tests\r\n for (int i = 0; i <= g_repeat; ++i)\r\n {\r\n \/\/ Test different input types\r\n Test(num_items, CUB_TYPE_STRING(unsigned char));\r\n Test(num_items, CUB_TYPE_STRING(unsigned short));\r\n Test(num_items, CUB_TYPE_STRING(unsigned int));\r\n Test(num_items, CUB_TYPE_STRING(unsigned long long));\r\n\r\n Test(num_items, CUB_TYPE_STRING(uchar2));\r\n Test(num_items, CUB_TYPE_STRING(ushort2));\r\n Test(num_items, CUB_TYPE_STRING(uint2));\r\n Test(num_items, CUB_TYPE_STRING(ulonglong2));\r\n\r\n Test(num_items, CUB_TYPE_STRING(uchar4));\r\n Test(num_items, CUB_TYPE_STRING(ushort4));\r\n Test(num_items, CUB_TYPE_STRING(uint4));\r\n Test(num_items, CUB_TYPE_STRING(ulonglong4));\r\n\r\n Test(num_items, CUB_TYPE_STRING(TestFoo));\r\n Test(num_items, CUB_TYPE_STRING(TestBar));\r\n }\r\n\r\n#endif\r\n\r\n return 0;\r\n}\r\n\r\n\r\n\r\n","avg_line_length":35.0508905852,"max_line_length":158,"alphanum_fraction":0.584137931} {"size":35663,"ext":"cuh","lang":"Cuda","max_stars_count":5168.0,"content":"\/******************************************************************************\n * Copyright (c) 2011, Duane Merrill. All rights reserved.\n * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and\/or other materials provided with the distribution.\n * * Neither the name of the NVIDIA CORPORATION nor the\n * names of its contributors may be used to endorse or promote products\n * derived from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n ******************************************************************************\/\n\n\/**\n * \\file\n * cub::AgentRle implements a stateful abstraction of CUDA thread blocks for participating in device-wide run-length-encode.\n *\/\n\n#pragma once\n\n#include \n\n#include \"single_pass_scan_operators.cuh\"\n#include \"..\/block\/block_load.cuh\"\n#include \"..\/block\/block_store.cuh\"\n#include \"..\/block\/block_scan.cuh\"\n#include \"..\/block\/block_exchange.cuh\"\n#include \"..\/block\/block_discontinuity.cuh\"\n#include \"..\/grid\/grid_queue.cuh\"\n#include \"..\/iterator\/cache_modified_input_iterator.cuh\"\n#include \"..\/iterator\/constant_input_iterator.cuh\"\n#include \"..\/util_namespace.cuh\"\n\n\/\/\/ Optional outer namespace(s)\nCUB_NS_PREFIX\n\n\/\/\/ CUB namespace\nnamespace cub {\n\n\n\/******************************************************************************\n * Tuning policy types\n ******************************************************************************\/\n\n\/**\n * Parameterizable tuning policy type for AgentRle\n *\/\ntemplate <\n int _BLOCK_THREADS, \/\/\/< Threads per thread block\n int _ITEMS_PER_THREAD, \/\/\/< Items per thread (per tile of input)\n BlockLoadAlgorithm _LOAD_ALGORITHM, \/\/\/< The BlockLoad algorithm to use\n CacheLoadModifier _LOAD_MODIFIER, \/\/\/< Cache load modifier for reading input elements\n bool _STORE_WARP_TIME_SLICING, \/\/\/< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage)\n BlockScanAlgorithm _SCAN_ALGORITHM> \/\/\/< The BlockScan algorithm to use\nstruct AgentRlePolicy\n{\n enum\n {\n BLOCK_THREADS = _BLOCK_THREADS, \/\/\/< Threads per thread block\n ITEMS_PER_THREAD = _ITEMS_PER_THREAD, \/\/\/< Items per thread (per tile of input)\n STORE_WARP_TIME_SLICING = _STORE_WARP_TIME_SLICING, \/\/\/< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage)\n };\n\n static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; \/\/\/< The BlockLoad algorithm to use\n static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; \/\/\/< Cache load modifier for reading input elements\n static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; \/\/\/< The BlockScan algorithm to use\n};\n\n\n\n\n\n\/******************************************************************************\n * Thread block abstractions\n ******************************************************************************\/\n\n\/**\n * \\brief AgentRle implements a stateful abstraction of CUDA thread blocks for participating in device-wide run-length-encode \n *\/\ntemplate <\n typename AgentRlePolicyT, \/\/\/< Parameterized AgentRlePolicyT tuning policy type\n typename InputIteratorT, \/\/\/< Random-access input iterator type for data\n typename OffsetsOutputIteratorT, \/\/\/< Random-access output iterator type for offset values\n typename LengthsOutputIteratorT, \/\/\/< Random-access output iterator type for length values\n typename EqualityOpT, \/\/\/< T equality operator type\n typename OffsetT> \/\/\/< Signed integer type for global offsets\nstruct AgentRle\n{\n \/\/---------------------------------------------------------------------\n \/\/ Types and constants\n \/\/---------------------------------------------------------------------\n\n \/\/\/ The input value type\n typedef typename std::iterator_traits::value_type T;\n\n \/\/\/ The lengths output value type\n typedef typename If<(Equals::value_type, void>::VALUE), \/\/ LengthT = (if output iterator's value type is void) ?\n OffsetT, \/\/ ... then the OffsetT type,\n typename std::iterator_traits::value_type>::Type LengthT; \/\/ ... else the output iterator's value type\n\n \/\/\/ Tuple type for scanning (pairs run-length and run-index)\n typedef KeyValuePair LengthOffsetPair;\n\n \/\/\/ Tile status descriptor interface type\n typedef ReduceByKeyScanTileState ScanTileStateT;\n\n \/\/ Constants\n enum\n {\n WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH),\n BLOCK_THREADS = AgentRlePolicyT::BLOCK_THREADS,\n ITEMS_PER_THREAD = AgentRlePolicyT::ITEMS_PER_THREAD,\n WARP_ITEMS = WARP_THREADS * ITEMS_PER_THREAD,\n TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,\n WARPS = (BLOCK_THREADS + WARP_THREADS - 1) \/ WARP_THREADS,\n\n \/\/\/ Whether or not to sync after loading data\n SYNC_AFTER_LOAD = (AgentRlePolicyT::LOAD_ALGORITHM != BLOCK_LOAD_DIRECT),\n\n \/\/\/ Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage)\n STORE_WARP_TIME_SLICING = AgentRlePolicyT::STORE_WARP_TIME_SLICING,\n ACTIVE_EXCHANGE_WARPS = (STORE_WARP_TIME_SLICING) ? 1 : WARPS,\n };\n\n\n \/**\n * Special operator that signals all out-of-bounds items are not equal to everything else,\n * forcing both (1) the last item to be tail-flagged and (2) all oob items to be marked\n * trivial.\n *\/\n template \n struct OobInequalityOp\n {\n OffsetT num_remaining;\n EqualityOpT equality_op;\n\n __device__ __forceinline__ OobInequalityOp(\n OffsetT num_remaining,\n EqualityOpT equality_op)\n :\n num_remaining(num_remaining),\n equality_op(equality_op)\n {}\n\n template \n __host__ __device__ __forceinline__ bool operator()(T first, T second, Index idx)\n {\n if (!LAST_TILE || (idx < num_remaining))\n return !equality_op(first, second);\n else\n return true;\n }\n };\n\n\n \/\/ Cache-modified Input iterator wrapper type (for applying cache modifier) for data\n typedef typename If::VALUE,\n CacheModifiedInputIterator, \/\/ Wrap the native input pointer with CacheModifiedVLengthnputIterator\n InputIteratorT>::Type \/\/ Directly use the supplied input iterator type\n WrappedInputIteratorT;\n\n \/\/ Parameterized BlockLoad type for data\n typedef BlockLoad<\n T,\n AgentRlePolicyT::BLOCK_THREADS,\n AgentRlePolicyT::ITEMS_PER_THREAD,\n AgentRlePolicyT::LOAD_ALGORITHM>\n BlockLoadT;\n\n \/\/ Parameterized BlockDiscontinuity type for data\n typedef BlockDiscontinuity BlockDiscontinuityT;\n\n \/\/ Parameterized WarpScan type\n typedef WarpScan WarpScanPairs;\n\n \/\/ Reduce-length-by-run scan operator\n typedef ReduceBySegmentOp ReduceBySegmentOpT;\n\n \/\/ Callback type for obtaining tile prefix during block scan\n typedef TilePrefixCallbackOp<\n LengthOffsetPair,\n ReduceBySegmentOpT,\n ScanTileStateT>\n TilePrefixCallbackOpT;\n\n \/\/ Warp exchange types\n typedef WarpExchange WarpExchangePairs;\n\n typedef typename If::Type WarpExchangePairsStorage;\n\n typedef WarpExchange WarpExchangeOffsets;\n typedef WarpExchange WarpExchangeLengths;\n\n typedef LengthOffsetPair WarpAggregates[WARPS];\n\n \/\/ Shared memory type for this thread block\n struct _TempStorage\n {\n \/\/ Aliasable storage layout\n union Aliasable\n {\n struct\n {\n typename BlockDiscontinuityT::TempStorage discontinuity; \/\/ Smem needed for discontinuity detection\n typename WarpScanPairs::TempStorage warp_scan[WARPS]; \/\/ Smem needed for warp-synchronous scans\n Uninitialized warp_aggregates; \/\/ Smem needed for sharing warp-wide aggregates\n typename TilePrefixCallbackOpT::TempStorage prefix; \/\/ Smem needed for cooperative prefix callback\n };\n\n \/\/ Smem needed for input loading\n typename BlockLoadT::TempStorage load;\n\n \/\/ Aliasable layout needed for two-phase scatter\n union ScatterAliasable\n {\n unsigned long long align;\n WarpExchangePairsStorage exchange_pairs[ACTIVE_EXCHANGE_WARPS];\n typename WarpExchangeOffsets::TempStorage exchange_offsets[ACTIVE_EXCHANGE_WARPS];\n typename WarpExchangeLengths::TempStorage exchange_lengths[ACTIVE_EXCHANGE_WARPS];\n\n } scatter_aliasable;\n\n } aliasable;\n\n OffsetT tile_idx; \/\/ Shared tile index\n LengthOffsetPair tile_inclusive; \/\/ Inclusive tile prefix\n LengthOffsetPair tile_exclusive; \/\/ Exclusive tile prefix\n };\n\n \/\/ Alias wrapper allowing storage to be unioned\n struct TempStorage : Uninitialized<_TempStorage> {};\n\n\n \/\/---------------------------------------------------------------------\n \/\/ Per-thread fields\n \/\/---------------------------------------------------------------------\n\n _TempStorage& temp_storage; \/\/\/< Reference to temp_storage\n\n WrappedInputIteratorT d_in; \/\/\/< Pointer to input sequence of data items\n OffsetsOutputIteratorT d_offsets_out; \/\/\/< Input run offsets\n LengthsOutputIteratorT d_lengths_out; \/\/\/< Output run lengths\n\n EqualityOpT equality_op; \/\/\/< T equality operator\n ReduceBySegmentOpT scan_op; \/\/\/< Reduce-length-by-flag scan operator\n OffsetT num_items; \/\/\/< Total number of input items\n\n\n \/\/---------------------------------------------------------------------\n \/\/ Constructor\n \/\/---------------------------------------------------------------------\n\n \/\/ Constructor\n __device__ __forceinline__\n AgentRle(\n TempStorage &temp_storage, \/\/\/< [in] Reference to temp_storage\n InputIteratorT d_in, \/\/\/< [in] Pointer to input sequence of data items\n OffsetsOutputIteratorT d_offsets_out, \/\/\/< [out] Pointer to output sequence of run offsets\n LengthsOutputIteratorT d_lengths_out, \/\/\/< [out] Pointer to output sequence of run lengths\n EqualityOpT equality_op, \/\/\/< [in] T equality operator\n OffsetT num_items) \/\/\/< [in] Total number of input items\n :\n temp_storage(temp_storage.Alias()),\n d_in(d_in),\n d_offsets_out(d_offsets_out),\n d_lengths_out(d_lengths_out),\n equality_op(equality_op),\n scan_op(cub::Sum()),\n num_items(num_items)\n {}\n\n\n \/\/---------------------------------------------------------------------\n \/\/ Utility methods for initializing the selections\n \/\/---------------------------------------------------------------------\n\n template \n __device__ __forceinline__ void InitializeSelections(\n OffsetT tile_offset,\n OffsetT num_remaining,\n T (&items)[ITEMS_PER_THREAD],\n LengthOffsetPair (&lengths_and_num_runs)[ITEMS_PER_THREAD])\n {\n bool head_flags[ITEMS_PER_THREAD];\n bool tail_flags[ITEMS_PER_THREAD];\n\n OobInequalityOp inequality_op(num_remaining, equality_op);\n\n if (FIRST_TILE && LAST_TILE)\n {\n \/\/ First-and-last-tile always head-flags the first item and tail-flags the last item\n\n BlockDiscontinuityT(temp_storage.aliasable.discontinuity).FlagHeadsAndTails(\n head_flags, tail_flags, items, inequality_op);\n }\n else if (FIRST_TILE)\n {\n \/\/ First-tile always head-flags the first item\n\n \/\/ Get the first item from the next tile\n T tile_successor_item;\n if (threadIdx.x == BLOCK_THREADS - 1)\n tile_successor_item = d_in[tile_offset + TILE_ITEMS];\n\n BlockDiscontinuityT(temp_storage.aliasable.discontinuity).FlagHeadsAndTails(\n head_flags, tail_flags, tile_successor_item, items, inequality_op);\n }\n else if (LAST_TILE)\n {\n \/\/ Last-tile always flags the last item\n\n \/\/ Get the last item from the previous tile\n T tile_predecessor_item;\n if (threadIdx.x == 0)\n tile_predecessor_item = d_in[tile_offset - 1];\n\n BlockDiscontinuityT(temp_storage.aliasable.discontinuity).FlagHeadsAndTails(\n head_flags, tile_predecessor_item, tail_flags, items, inequality_op);\n }\n else\n {\n \/\/ Get the first item from the next tile\n T tile_successor_item;\n if (threadIdx.x == BLOCK_THREADS - 1)\n tile_successor_item = d_in[tile_offset + TILE_ITEMS];\n\n \/\/ Get the last item from the previous tile\n T tile_predecessor_item;\n if (threadIdx.x == 0)\n tile_predecessor_item = d_in[tile_offset - 1];\n\n BlockDiscontinuityT(temp_storage.aliasable.discontinuity).FlagHeadsAndTails(\n head_flags, tile_predecessor_item, tail_flags, tile_successor_item, items, inequality_op);\n }\n\n \/\/ Zip counts and runs\n #pragma unroll\n for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)\n {\n lengths_and_num_runs[ITEM].key = head_flags[ITEM] && (!tail_flags[ITEM]);\n lengths_and_num_runs[ITEM].value = ((!head_flags[ITEM]) || (!tail_flags[ITEM]));\n }\n }\n\n \/\/---------------------------------------------------------------------\n \/\/ Scan utility methods\n \/\/---------------------------------------------------------------------\n\n \/**\n * Scan of allocations\n *\/\n __device__ __forceinline__ void WarpScanAllocations(\n LengthOffsetPair &tile_aggregate,\n LengthOffsetPair &warp_aggregate,\n LengthOffsetPair &warp_exclusive_in_tile,\n LengthOffsetPair &thread_exclusive_in_warp,\n LengthOffsetPair (&lengths_and_num_runs)[ITEMS_PER_THREAD])\n {\n \/\/ Perform warpscans\n unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x \/ WARP_THREADS);\n int lane_id = LaneId();\n\n LengthOffsetPair identity;\n identity.key = 0;\n identity.value = 0;\n\n LengthOffsetPair thread_inclusive;\n LengthOffsetPair thread_aggregate = internal::ThreadReduce(lengths_and_num_runs, scan_op);\n WarpScanPairs(temp_storage.aliasable.warp_scan[warp_id]).Scan(\n thread_aggregate,\n thread_inclusive,\n thread_exclusive_in_warp,\n identity,\n scan_op);\n\n \/\/ Last lane in each warp shares its warp-aggregate\n if (lane_id == WARP_THREADS - 1)\n temp_storage.aliasable.warp_aggregates.Alias()[warp_id] = thread_inclusive;\n\n CTA_SYNC();\n\n \/\/ Accumulate total selected and the warp-wide prefix\n warp_exclusive_in_tile = identity;\n warp_aggregate = temp_storage.aliasable.warp_aggregates.Alias()[warp_id];\n tile_aggregate = temp_storage.aliasable.warp_aggregates.Alias()[0];\n\n #pragma unroll\n for (int WARP = 1; WARP < WARPS; ++WARP)\n {\n if (warp_id == WARP)\n warp_exclusive_in_tile = tile_aggregate;\n\n tile_aggregate = scan_op(tile_aggregate, temp_storage.aliasable.warp_aggregates.Alias()[WARP]);\n }\n }\n\n\n \/\/---------------------------------------------------------------------\n \/\/ Utility methods for scattering selections\n \/\/---------------------------------------------------------------------\n\n \/**\n * Two-phase scatter, specialized for warp time-slicing\n *\/\n template \n __device__ __forceinline__ void ScatterTwoPhase(\n OffsetT tile_num_runs_exclusive_in_global,\n OffsetT warp_num_runs_aggregate,\n OffsetT warp_num_runs_exclusive_in_tile,\n OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD],\n LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD],\n Int2Type is_warp_time_slice)\n {\n unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x \/ WARP_THREADS);\n int lane_id = LaneId();\n\n \/\/ Locally compact items within the warp (first warp)\n if (warp_id == 0)\n {\n WarpExchangePairs(temp_storage.aliasable.scatter_aliasable.exchange_pairs[0]).ScatterToStriped(\n lengths_and_offsets, thread_num_runs_exclusive_in_warp);\n }\n\n \/\/ Locally compact items within the warp (remaining warps)\n #pragma unroll\n for (int SLICE = 1; SLICE < WARPS; ++SLICE)\n {\n CTA_SYNC();\n\n if (warp_id == SLICE)\n {\n WarpExchangePairs(temp_storage.aliasable.scatter_aliasable.exchange_pairs[0]).ScatterToStriped(\n lengths_and_offsets, thread_num_runs_exclusive_in_warp);\n }\n }\n\n \/\/ Global scatter\n #pragma unroll\n for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)\n {\n if ((ITEM * WARP_THREADS) < warp_num_runs_aggregate - lane_id)\n {\n OffsetT item_offset =\n tile_num_runs_exclusive_in_global +\n warp_num_runs_exclusive_in_tile +\n (ITEM * WARP_THREADS) + lane_id;\n\n \/\/ Scatter offset\n d_offsets_out[item_offset] = lengths_and_offsets[ITEM].key;\n\n \/\/ Scatter length if not the first (global) length\n if ((!FIRST_TILE) || (ITEM != 0) || (threadIdx.x > 0))\n {\n d_lengths_out[item_offset - 1] = lengths_and_offsets[ITEM].value;\n }\n }\n }\n }\n\n\n \/**\n * Two-phase scatter\n *\/\n template \n __device__ __forceinline__ void ScatterTwoPhase(\n OffsetT tile_num_runs_exclusive_in_global,\n OffsetT warp_num_runs_aggregate,\n OffsetT warp_num_runs_exclusive_in_tile,\n OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD],\n LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD],\n Int2Type is_warp_time_slice)\n {\n unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x \/ WARP_THREADS);\n int lane_id = LaneId();\n\n \/\/ Unzip\n OffsetT run_offsets[ITEMS_PER_THREAD];\n LengthT run_lengths[ITEMS_PER_THREAD];\n\n #pragma unroll\n for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)\n {\n run_offsets[ITEM] = lengths_and_offsets[ITEM].key;\n run_lengths[ITEM] = lengths_and_offsets[ITEM].value;\n }\n\n WarpExchangeOffsets(temp_storage.aliasable.scatter_aliasable.exchange_offsets[warp_id]).ScatterToStriped(\n run_offsets, thread_num_runs_exclusive_in_warp);\n\n WARP_SYNC(0xffffffff);\n\n WarpExchangeLengths(temp_storage.aliasable.scatter_aliasable.exchange_lengths[warp_id]).ScatterToStriped(\n run_lengths, thread_num_runs_exclusive_in_warp);\n\n \/\/ Global scatter\n #pragma unroll\n for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)\n {\n if ((ITEM * WARP_THREADS) + lane_id < warp_num_runs_aggregate)\n {\n OffsetT item_offset =\n tile_num_runs_exclusive_in_global +\n warp_num_runs_exclusive_in_tile +\n (ITEM * WARP_THREADS) + lane_id;\n\n \/\/ Scatter offset\n d_offsets_out[item_offset] = run_offsets[ITEM];\n\n \/\/ Scatter length if not the first (global) length\n if ((!FIRST_TILE) || (ITEM != 0) || (threadIdx.x > 0))\n {\n d_lengths_out[item_offset - 1] = run_lengths[ITEM];\n }\n }\n }\n }\n\n\n \/**\n * Direct scatter\n *\/\n template \n __device__ __forceinline__ void ScatterDirect(\n OffsetT tile_num_runs_exclusive_in_global,\n OffsetT warp_num_runs_aggregate,\n OffsetT warp_num_runs_exclusive_in_tile,\n OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD],\n LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD])\n {\n #pragma unroll\n for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)\n {\n if (thread_num_runs_exclusive_in_warp[ITEM] < warp_num_runs_aggregate)\n {\n OffsetT item_offset =\n tile_num_runs_exclusive_in_global +\n warp_num_runs_exclusive_in_tile +\n thread_num_runs_exclusive_in_warp[ITEM];\n\n \/\/ Scatter offset\n d_offsets_out[item_offset] = lengths_and_offsets[ITEM].key;\n\n \/\/ Scatter length if not the first (global) length\n if (item_offset >= 1)\n {\n d_lengths_out[item_offset - 1] = lengths_and_offsets[ITEM].value;\n }\n }\n }\n }\n\n\n \/**\n * Scatter\n *\/\n template \n __device__ __forceinline__ void Scatter(\n OffsetT tile_num_runs_aggregate,\n OffsetT tile_num_runs_exclusive_in_global,\n OffsetT warp_num_runs_aggregate,\n OffsetT warp_num_runs_exclusive_in_tile,\n OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD],\n LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD])\n {\n if ((ITEMS_PER_THREAD == 1) || (tile_num_runs_aggregate < BLOCK_THREADS))\n {\n \/\/ Direct scatter if the warp has any items\n if (warp_num_runs_aggregate)\n {\n ScatterDirect(\n tile_num_runs_exclusive_in_global,\n warp_num_runs_aggregate,\n warp_num_runs_exclusive_in_tile,\n thread_num_runs_exclusive_in_warp,\n lengths_and_offsets);\n }\n }\n else\n {\n \/\/ Scatter two phase\n ScatterTwoPhase(\n tile_num_runs_exclusive_in_global,\n warp_num_runs_aggregate,\n warp_num_runs_exclusive_in_tile,\n thread_num_runs_exclusive_in_warp,\n lengths_and_offsets,\n Int2Type());\n }\n }\n\n\n\n \/\/---------------------------------------------------------------------\n \/\/ Cooperatively scan a device-wide sequence of tiles with other CTAs\n \/\/---------------------------------------------------------------------\n\n \/**\n * Process a tile of input (dynamic chained scan)\n *\/\n template <\n bool LAST_TILE>\n __device__ __forceinline__ LengthOffsetPair ConsumeTile(\n OffsetT num_items, \/\/\/< Total number of global input items\n OffsetT num_remaining, \/\/\/< Number of global input items remaining (including this tile)\n int tile_idx, \/\/\/< Tile index\n OffsetT tile_offset, \/\/\/< Tile offset\n ScanTileStateT &tile_status) \/\/\/< Global list of tile status\n {\n if (tile_idx == 0)\n {\n \/\/ First tile\n\n \/\/ Load items\n T items[ITEMS_PER_THREAD];\n if (LAST_TILE)\n BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items, num_remaining, T());\n else\n BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items);\n\n if (SYNC_AFTER_LOAD)\n CTA_SYNC();\n\n \/\/ Set flags\n LengthOffsetPair lengths_and_num_runs[ITEMS_PER_THREAD];\n\n InitializeSelections(\n tile_offset,\n num_remaining,\n items,\n lengths_and_num_runs);\n\n \/\/ Exclusive scan of lengths and runs\n LengthOffsetPair tile_aggregate;\n LengthOffsetPair warp_aggregate;\n LengthOffsetPair warp_exclusive_in_tile;\n LengthOffsetPair thread_exclusive_in_warp;\n\n WarpScanAllocations(\n tile_aggregate,\n warp_aggregate,\n warp_exclusive_in_tile,\n thread_exclusive_in_warp,\n lengths_and_num_runs);\n\n \/\/ Update tile status if this is not the last tile\n if (!LAST_TILE && (threadIdx.x == 0))\n tile_status.SetInclusive(0, tile_aggregate);\n\n \/\/ Update thread_exclusive_in_warp to fold in warp run-length\n if (thread_exclusive_in_warp.key == 0)\n thread_exclusive_in_warp.value += warp_exclusive_in_tile.value;\n\n LengthOffsetPair lengths_and_offsets[ITEMS_PER_THREAD];\n OffsetT thread_num_runs_exclusive_in_warp[ITEMS_PER_THREAD];\n LengthOffsetPair lengths_and_num_runs2[ITEMS_PER_THREAD];\n\n \/\/ Downsweep scan through lengths_and_num_runs\n internal::ThreadScanExclusive(lengths_and_num_runs, lengths_and_num_runs2, scan_op, thread_exclusive_in_warp);\n\n \/\/ Zip\n\n #pragma unroll\n for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)\n {\n lengths_and_offsets[ITEM].value = lengths_and_num_runs2[ITEM].value;\n lengths_and_offsets[ITEM].key = tile_offset + (threadIdx.x * ITEMS_PER_THREAD) + ITEM;\n thread_num_runs_exclusive_in_warp[ITEM] = (lengths_and_num_runs[ITEM].key) ?\n lengths_and_num_runs2[ITEM].key : \/\/ keep\n WARP_THREADS * ITEMS_PER_THREAD; \/\/ discard\n }\n\n OffsetT tile_num_runs_aggregate = tile_aggregate.key;\n OffsetT tile_num_runs_exclusive_in_global = 0;\n OffsetT warp_num_runs_aggregate = warp_aggregate.key;\n OffsetT warp_num_runs_exclusive_in_tile = warp_exclusive_in_tile.key;\n\n \/\/ Scatter\n Scatter(\n tile_num_runs_aggregate,\n tile_num_runs_exclusive_in_global,\n warp_num_runs_aggregate,\n warp_num_runs_exclusive_in_tile,\n thread_num_runs_exclusive_in_warp,\n lengths_and_offsets);\n\n \/\/ Return running total (inclusive of this tile)\n return tile_aggregate;\n }\n else\n {\n \/\/ Not first tile\n\n \/\/ Load items\n T items[ITEMS_PER_THREAD];\n if (LAST_TILE)\n BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items, num_remaining, T());\n else\n BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items);\n\n if (SYNC_AFTER_LOAD)\n CTA_SYNC();\n\n \/\/ Set flags\n LengthOffsetPair lengths_and_num_runs[ITEMS_PER_THREAD];\n\n InitializeSelections(\n tile_offset,\n num_remaining,\n items,\n lengths_and_num_runs);\n\n \/\/ Exclusive scan of lengths and runs\n LengthOffsetPair tile_aggregate;\n LengthOffsetPair warp_aggregate;\n LengthOffsetPair warp_exclusive_in_tile;\n LengthOffsetPair thread_exclusive_in_warp;\n\n WarpScanAllocations(\n tile_aggregate,\n warp_aggregate,\n warp_exclusive_in_tile,\n thread_exclusive_in_warp,\n lengths_and_num_runs);\n\n \/\/ First warp computes tile prefix in lane 0\n TilePrefixCallbackOpT prefix_op(tile_status, temp_storage.aliasable.prefix, Sum(), tile_idx);\n unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x \/ WARP_THREADS);\n if (warp_id == 0)\n {\n prefix_op(tile_aggregate);\n if (threadIdx.x == 0)\n temp_storage.tile_exclusive = prefix_op.exclusive_prefix;\n }\n\n CTA_SYNC();\n\n LengthOffsetPair tile_exclusive_in_global = temp_storage.tile_exclusive;\n\n \/\/ Update thread_exclusive_in_warp to fold in warp and tile run-lengths\n LengthOffsetPair thread_exclusive = scan_op(tile_exclusive_in_global, warp_exclusive_in_tile);\n if (thread_exclusive_in_warp.key == 0)\n thread_exclusive_in_warp.value += thread_exclusive.value;\n\n \/\/ Downsweep scan through lengths_and_num_runs\n LengthOffsetPair lengths_and_num_runs2[ITEMS_PER_THREAD];\n LengthOffsetPair lengths_and_offsets[ITEMS_PER_THREAD];\n OffsetT thread_num_runs_exclusive_in_warp[ITEMS_PER_THREAD];\n\n internal::ThreadScanExclusive(lengths_and_num_runs, lengths_and_num_runs2, scan_op, thread_exclusive_in_warp);\n\n \/\/ Zip\n #pragma unroll\n for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)\n {\n lengths_and_offsets[ITEM].value = lengths_and_num_runs2[ITEM].value;\n lengths_and_offsets[ITEM].key = tile_offset + (threadIdx.x * ITEMS_PER_THREAD) + ITEM;\n thread_num_runs_exclusive_in_warp[ITEM] = (lengths_and_num_runs[ITEM].key) ?\n lengths_and_num_runs2[ITEM].key : \/\/ keep\n WARP_THREADS * ITEMS_PER_THREAD; \/\/ discard\n }\n\n OffsetT tile_num_runs_aggregate = tile_aggregate.key;\n OffsetT tile_num_runs_exclusive_in_global = tile_exclusive_in_global.key;\n OffsetT warp_num_runs_aggregate = warp_aggregate.key;\n OffsetT warp_num_runs_exclusive_in_tile = warp_exclusive_in_tile.key;\n\n \/\/ Scatter\n Scatter(\n tile_num_runs_aggregate,\n tile_num_runs_exclusive_in_global,\n warp_num_runs_aggregate,\n warp_num_runs_exclusive_in_tile,\n thread_num_runs_exclusive_in_warp,\n lengths_and_offsets);\n\n \/\/ Return running total (inclusive of this tile)\n return prefix_op.inclusive_prefix;\n }\n }\n\n\n \/**\n * Scan tiles of items as part of a dynamic chained scan\n *\/\n template \/\/\/< Output iterator type for recording number of items selected\n __device__ __forceinline__ void ConsumeRange(\n int num_tiles, \/\/\/< Total number of input tiles\n ScanTileStateT& tile_status, \/\/\/< Global list of tile status\n NumRunsIteratorT d_num_runs_out) \/\/\/< Output pointer for total number of runs identified\n {\n \/\/ Blocks are launched in increasing order, so just assign one tile per block\n int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; \/\/ Current tile index\n OffsetT tile_offset = tile_idx * TILE_ITEMS; \/\/ Global offset for the current tile\n OffsetT num_remaining = num_items - tile_offset; \/\/ Remaining items (including this tile)\n\n if (tile_idx < num_tiles - 1)\n {\n \/\/ Not the last tile (full)\n ConsumeTile(num_items, num_remaining, tile_idx, tile_offset, tile_status);\n }\n else if (num_remaining > 0)\n {\n \/\/ The last tile (possibly partially-full)\n LengthOffsetPair running_total = ConsumeTile(num_items, num_remaining, tile_idx, tile_offset, tile_status);\n\n if (threadIdx.x == 0)\n {\n \/\/ Output the total number of items selected\n *d_num_runs_out = running_total.key;\n\n \/\/ The inclusive prefix contains accumulated length reduction for the last run\n if (running_total.key > 0)\n d_lengths_out[running_total.key - 1] = running_total.value;\n }\n }\n }\n};\n\n\n} \/\/ CUB namespace\nCUB_NS_POSTFIX \/\/ Optional outer namespace(s)\n\n","avg_line_length":42.5572792363,"max_line_length":263,"alphanum_fraction":0.5803493817} {"size":7942,"ext":"cu","lang":"Cuda","max_stars_count":202.0,"content":"\ufeff\/\/ Hitogram.cu\n\/\/ \u5b9e\u73b0\u8ba1\u7b97\u56fe\u50cf\u76f4\u65b9\u56fe\u7b97\u6cd5\n\n#include \"Histogram.h\"\n\n#include \nusing namespace std;\n\n#include \"ErrorCode.h\"\n\n\/\/ \u5b8f\uff1aHISTOGRAM_PACK_LEVEL\n\/\/ \u5b9a\u4e49\u4e86\u4e00\u4e2a\u7ebf\u7a0b\u4e2d\u8ba1\u7b97\u7684\u50cf\u7d20\u70b9\u4e2a\u6570\uff0c\u82e5\u8be5\u503c\u4e3a4\uff0c\u5219\u5728\u4e00\u4e2a\u7ebf\u7a0b\u4e2d\u8ba1\u7b97 2 ^ 4 = 16\n\/\/ \u4e2a\u50cf\u7d20\u70b9\u3002\n#define HISTOGRAM_PACK_LEVEL 4\n\n#define HISTOGRAM_PACK_NUM (1 << HISTOGRAM_PACK_LEVEL)\n#define HISTOGRAM_PACK_MASK (HISTOGRAM_PACK_NUM - 1)\n\n#if (HISTOGRAM_PACK_LEVEL < 1 || HISTOGRAM_PACK_LEVEL > 5)\n# error Unsupport HISTOGRAM_PACK_LEVEL Value!!!\n#endif\n\n\/\/ \u5b8f\uff1aDEF_BLOCK_X \u548c DEF_BLOCK_Y\n\/\/ \u5b9a\u4e49\u4e86\u9ed8\u8ba4\u7684\u7ebf\u7a0b\u5757\u7684\u5c3a\u5bf8\u3002\n#define DEF_BLOCK_X 32 \n#define DEF_BLOCK_Y 8 \n\n\/\/ Kernel \u51fd\u6570: _histogramKer\uff08\u8ba1\u7b97\u56fe\u50cf\u7684\u76f4\u65b9\u56fe\uff09\n\/\/ \u6839\u636e\u8f93\u5165\u56fe\u50cf\u6bcf\u4e2a\u50cf\u7d20\u70b9\u7684\u7070\u5ea6\u503c\uff0c\u7d2f\u52a0\u5230\u76f4\u65b9\u56fe\u6570\u7ec4\u4e2d\u7684\u76f8\u5e94\u7684\u4f4d\u7f6e\uff0c\u4ece\u800c\u5f97\u5230\n\/\/ \u8f93\u5165\u56fe\u50cf\u7684\u76f4\u65b9\u56fe\u3002\nstatic __global__ void \/\/ Kernel \u51fd\u6570\u65e0\u8fd4\u56de\u503c\u3002\n_histogramKer(\n ImageCuda inimg, \/\/ \u8f93\u5165\u56fe\u50cf\u3002\n unsigned int *devhist \/\/\u56fe\u50cf\u76f4\u65b9\u56fe\u3002\n);\n\n\/\/ Kernel \u51fd\u6570: _histogramKer\uff08\u8ba1\u7b97\u56fe\u50cf\u7684\u76f4\u65b9\u56fe\uff09\nstatic __global__ void _histogramKer(ImageCuda inimg, unsigned int *devhist)\n{\n \/\/ \u7533\u8bf7\u5927\u5c0f\u4e3a\u7070\u5ea6\u56fe\u50cf\u7070\u5ea6\u7ea7 256 \u7684\u5171\u4eab\u5185\u5b58\uff0c\u5176\u4e2d\u4e0b\u6807\u4ee3\u8868\u56fe\u50cf\u7684\u7070\u5ea6\u503c\uff0c\u6570\n \/\/ \u7ec4\u7528\u6765\u7d2f\u52a0\u7b49\u4e8e\u8be5\u7070\u5ea6\u503c\u7684\u50cf\u7d20\u70b9\u4e2a\u6570\u3002\n __shared__ unsigned int temp[256];\n\n \/\/ \u8ba1\u7b97\u60f3\u6210\u5bf9\u5e94\u7684\u8f93\u51fa\u70b9\u7684\u4f4d\u7f6e\uff0c\u5176\u4e2d c \u548c r \u5206\u522b\u8868\u793a\u7ebf\u7a0b\u5904\u7406\u7684\u50cf\u7d20\u70b9\u7684\n \/\/ \u5750\u6807\u7684 x \u548c y \u5206\u91cf\uff08\u5176\u4e2d\uff0cc \u8868\u793a column\uff1br \u8868\u793a row\uff09\u3002\u7531\u4e8e\u6211\u4eec\u91c7\u7528\u4e86\u5e76\n \/\/ \u884c\u5ea6\u7f29\u51cf\u7684\u7b56\u7565\uff0c\u9ed8\u8ba4\u4ee4\u4e00\u4e2a\u7ebf\u7a0b\u5904\u7406 16 \u4e2a\u8f93\u51fa\u50cf\u7d20\uff0c\u8fd9\u56db\u4e2a\u50cf\u7d20\u4f4d\u4e8e\u7edf\u4e00\u5217\n \/\/ \u7684\u76f8\u90bb 16 \u884c\u4e0a\uff0c\u56e0\u6b64\uff0c\u5bf9\u4e8e r \u9700\u8981\u8fdb\u884c\u53f3\u79fb\u8ba1\u7b97\u3002\n int c = blockIdx.x * blockDim.x + threadIdx.x;\n int r = (blockIdx.y * blockDim.y + threadIdx.y) << HISTOGRAM_PACK_LEVEL;\n \n \/\/ \u8ba1\u7b97\u8be5\u7ebf\u7a0b\u5728\u5757\u5185\u7684\u76f8\u5bf9\u4f4d\u7f6e\u3002\n int inindex = threadIdx.y * blockDim.x + threadIdx.x;\n \n \/\/ \u4e34\u65f6\u53d8\u91cf\uff0ccurgray \u7528\u4e8e\u5b58\u50a8\u5f53\u524d\u70b9\u7684\u50cf\u7d20\u503c\uff0cinptrgray \u5b58\u50a8\u4e0b\u4e00\u4e2a\u70b9\u7684\u50cf\u7d20\u503c\u3002\n \/\/ cursum \u7528\u4e8e\u5b58\u50a8\u5c40\u90e8\u7d2f\u52a0\u548c\u3002\n unsigned int curgray = 0, inptrgray;\n unsigned int curnum = 0;\n \n \/\/ \u82e5\u7ebf\u7a0b\u5728\u5757\u5185\u7684\u76f8\u5bf9\u4f4d\u7f6e\u5c0f\u4e8e 256\uff0c\u5373\u7070\u5ea6\u7ea7\u5927\u5c0f\uff0c\u5219\u7528\u6765\u7ed9\u5171\u4eab\u5185\u5b58\u8d4b\u521d\u503c 0\u3002\n if (inindex < 256)\n temp[inindex] = 0;\n \/\/ \u8fdb\u884c\u5757\u5185\u540c\u6b65\uff0c\u4fdd\u8bc1\u6267\u884c\u5230\u6b64\u5904\uff0c\u5171\u4eab\u5185\u5b58\u7684\u6570\u7ec4\u4e2d\u6240\u6709\u5143\u7d20\u7684\u503c\u90fd\u4e3a 0\u3002\n __syncthreads();\n \n do {\n \/\/ \u7ebf\u7a0b\u4e2d\u5904\u7406\u7b2c\u4e00\u4e2a\u70b9\u3002\n \/\/ \u68c0\u67e5\u7b2c\u4e00\u4e2a\u50cf\u7d20\u70b9\u662f\u5426\u8d8a\u754c\uff0c\u5982\u679c\u8d8a\u754c\uff0c\u5219\u4e0d\u8fdb\u884c\u5904\u7406\uff0c\u4e00\u65b9\u9762\u8282\u7701\u8ba1\u7b97\u8d44\n \/\/ \u6e90\uff0c\u4e00\u65b9\u9762\u9632\u6b62\u7531\u4e8e\u6bb5\u9519\u8bef\u5bfc\u81f4\u7684\u7a0b\u5e8f\u5d29\u6e83\u3002\n if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height)\n break;\n\t\t\t\n \/\/ \u8ba1\u7b97\u7b2c\u4e00\u4e2a\u8f93\u5165\u5750\u6807\u70b9\u5bf9\u5e94\u7684\u56fe\u50cf\u6570\u636e\u6570\u7ec4\u4e0b\u6807\u3002\n int inidx = r * inimg.pitchBytes + c; \n\n \/\/ \u8bfb\u53d6\u7b2c\u4e00\u4e2a\u8f93\u5165\u5750\u6807\u70b9\u5bf9\u5e94\u7684\u50cf\u7d20\u503c\u3002\n curgray = inimg.imgMeta.imgData[inidx];\n curnum = 1;\n\n \/\/ \u5904\u7406\u7b2c\u4e8c\u4e2a\u70b9\u3002\n \/\/ \u6b64\u540e\u7684\u50cf\u7d20\u70b9\uff0c\u6bcf\u4e2a\u50cf\u7d20\u70b9\u90fd\u5728\u524d\u4e00\u4e2a\u7684\u4e0b\u4e00\u884c\uff0c\u800c x \u5206\u91cf\u4fdd\u6301\u4e0d\u53d8\u3002\u56e0\n \/\/ \u6b64\uff0c\u9700\u8981\u68c0\u67e5\u8fd9\u4e2a\u50cf\u7d20\u70b9\u662f\u5426\u8d8a\u754c\u3002\u68c0\u67e5\u53ea\u9488\u5bf9 y \u5206\u91cf\u5373\u53ef\uff0cx \u5206\u91cf\u5728\u5404\u70b9\n \/\/ \u4e4b\u95f4\u6ca1\u6709\u53d8\u5316\uff0c\u6545\u4e0d\u7528\u68c0\u67e5\u3002\n if (++r >= inimg.imgMeta.height)\n break;\n\n \/\/ \u5f97\u5230\u7b2c\u4e8c\u4e2a\u70b9\u7684\u50cf\u7d20\u503c\u3002\t\t\t\n \/\/ \u6839\u636e\u4e0a\u4e00\u4e2a\u50cf\u7d20\u70b9\uff0c\u8ba1\u7b97\u5f53\u524d\u50cf\u7d20\u70b9\u7684\u5bf9\u5e94\u7684\u8f93\u51fa\u56fe\u50cf\u7684\u4e0b\u6807\u3002\u7531\u4e8e\u53ea\u6709 y\n \/\/ \u5206\u91cf\u589e\u52a0 1\uff0c\u6240\u4ee5\u4e0b\u6807\u53ea\u9700\u8981\u52a0\u4e0a\u4e00\u4e2a pitch \u5373\u53ef\uff0c\u4e0d\u9700\u8981\u5728\u8fdb\u884c\u4e58\u6cd5\u8ba1\u7b97\u3002\n inidx += inimg.pitchBytes;\n inptrgray = inimg.imgMeta.imgData[inidx];\n\t\t\n \/\/ \u82e5\u5f53\u524d\u7b2c\u4e8c\u4e2a\u70b9\u7684\u50cf\u7d20\u503c\u4e0d\u7b49\u4e8e\u524d\u4e00\u4e2a\uff0c\u628a\u5f53\u524d\u4e34\u65f6\u53d8\u91cf cursum \u4e2d\u7684\u7edf\u8ba1\u7ed3\n \/\/ \u679c\u589e\u52a0\u5230\u5171\u4eab\u5185\u5b58\u4e2d\u7684\u76f8\u5e94\u533a\u57df\uff1b\u82e5\u8be5\u503c\u7b49\u4e8e\u524d\u4e00\u4e2a\u70b9\u7684\u50cf\u7d20\u503c\uff0c\u5219\u4e34\u65f6\u53d8\u91cf\n \/\/ cursum \u52a0 1\uff0c\u7ee7\u7eed\u68c0\u67e5\u4e0b\u4e00\u4e2a\u50cf\u7d20\u70b9\u3002\n if (curgray != inptrgray) {\n \/\/ \u4f7f\u7528\u539f\u5b50\u64cd\u4f5c\u628a\u4e34\u65f6\u53d8\u91cf curnum \u7684\u7ed3\u679c\u52a0\u5230\u5171\u4eab\u5185\u5b58\u4e2d\uff0c\u53ef\u4ee5\u9632\u6b62\u591a\u4e2a\n \/\/ \u7ebf\u7a0b\u540c\u65f6\u66f4\u6539\u6570\u636e\u800c\u53d1\u751f\u7684\u5199\u9519\u8bef\u3002\n atomicAdd(&temp[curgray], curnum);\n curgray = inptrgray;\n \/\/curnum = 1;\n } else {\n curnum++;\n }\n \n \/\/ \u5b8f\uff1aHISTOGRAM_KERNEL_MAIN_PHASE\n \/\/ \u5b9a\u4e49\u8ba1\u7b97\u4e0b\u4e00\u4e2a\u50cf\u7d20\u70b9\u7684\u7a0b\u5e8f\u7247\u6bb5\u3002\u4f7f\u7528\u8fd9\u4e2a\u5b8f\u53ef\u4ee5\u5b9e\u73b0\u83b7\u53d6\u4e0b\u4e00\u4e2a\u70b9\u7684\u50cf\u7d20\n \/\/ \u503c\uff0c\u5e76\u7d2f\u52a0\u5230\u5171\u4eab\u5185\u5b58\uff0c\u5e76\u4e14\u7b80\u5316\u7f16\u7801\u91cf\u3002\n#define HISTOGRAM_KERNEL_MAIN_PHASE \\\n if (++r >= inimg.imgMeta.height) \\\n break; \\\n inidx += inimg.pitchBytes; \\\n inptrgray = inimg.imgMeta.imgData[inidx]; \\\n if (curgray != inptrgray) { \\\n atomicAdd(&temp[curgray], curnum); \\\n curgray = inptrgray; \\\n curnum = 1; \\\n } else { \\\n curnum++; \\\n }\n\n#define HISTOGRAM_KERNEL_MAIN_PHASEx2 \\\n HISTOGRAM_KERNEL_MAIN_PHASE \\\n HISTOGRAM_KERNEL_MAIN_PHASE\n\n#define HISTOGRAM_KERNEL_MAIN_PHASEx4 \\\n HISTOGRAM_KERNEL_MAIN_PHASEx2 \\\n HISTOGRAM_KERNEL_MAIN_PHASEx2\n\n#define HISTOGRAM_KERNEL_MAIN_PHASEx8 \\\n HISTOGRAM_KERNEL_MAIN_PHASEx4 \\\n HISTOGRAM_KERNEL_MAIN_PHASEx4\n\n#define HISTOGRAM_KERNEL_MAIN_PHASEx16 \\\n HISTOGRAM_KERNEL_MAIN_PHASEx8 \\\n HISTOGRAM_KERNEL_MAIN_PHASEx8\n\n\/\/ \u5bf9\u4e8e\u4e0d\u540c\u7684 HISTOGRAM_PACK_LEVEL \uff0c\u5b9a\u4e49\u4e0d\u540c\u7684\u6267\u884c\u6b21\u6570\uff0c\u4ece\u800c\u4f7f\u4e00\u4e2a\u7ebf\u7a0b\u5185\u90e8\n\/\/ \u5b9e\u73b0\u5bf9\u591a\u4e2a\u70b9\u7684\u50cf\u7d20\u503c\u7684\u7edf\u8ba1\u3002\n#if (HISTOGRAM_PACK_LEVEL >= 2)\n HISTOGRAM_KERNEL_MAIN_PHASEx2\n# if (HISTOGRAM_PACK_LEVEL >= 3)\n HISTOGRAM_KERNEL_MAIN_PHASEx4\n# if (HISTOGRAM_PACK_LEVEL >= 4)\n HISTOGRAM_KERNEL_MAIN_PHASEx8\n# if (HISTOGRAM_PACK_LEVEL >= 5)\n HISTOGRAM_KERNEL_MAIN_PHASEx16\n# endif\n# endif\n# endif\n#endif\n\n\/\/ \u53d6\u6d88\u524d\u9762\u7684\u5b8f\u5b9a\u4e49\u3002\n#undef HISTOGRAM_KERNEL_MAIN_PHASEx16\n#undef HISTOGRAM_KERNEL_MAIN_PHASEx8\n#undef HISTOGRAM_KERNEL_MAIN_PHASEx4\n#undef HISTOGRAM_KERNEL_MAIN_PHASEx2\n#undef HISTOGRAM_KERNEL_MAIN_PHASE\n\n } while (0);\n\n \/\/ \u4f7f\u7528\u539f\u5b50\u64cd\u4f5c\u628a\u4e34\u65f6\u53d8\u91cf curnum \u7684\u7ed3\u679c\u52a0\u5230\u5171\u4eab\u5185\u5b58\u4e2d\uff0c\u53ef\u4ee5\u9632\u6b62\u591a\u4e2a\n \/\/ \u7ebf\u7a0b\u540c\u65f6\u66f4\u6539\u6570\u636e\u800c\u53d1\u751f\u7684\u5199\u9519\u8bef\u3002\n if (curnum != 0)\n atomicAdd(&temp[curgray], curnum);\n \n \/\/ \u5757\u5185\u540c\u6b65\u3002\u6b64\u5904\u4fdd\u8bc1\u56fe\u50cf\u4e2d\u6240\u6709\u70b9\u7684\u50cf\u7d20\u503c\u90fd\u88ab\u7edf\u8ba1\u8fc7\u3002\n __syncthreads();\n \n \/\/ \u7528\u6bcf\u4e00\u4e2a\u5757\u5185\u524d 256 \u4e2a\u7ebf\u7a0b\uff0c\u5c06\u5171\u4eab\u5185\u5b58 temp \u4e2d\u7684\u7ed3\u679c\u4fdd\u5b58\u5230\u8f93\u51fa\u6570\u7ec4\u4e2d\u3002\n if (inindex < 256)\n atomicAdd(&devhist[inindex], temp[inindex]);\n}\n\n\/\/ Host \u6210\u5458\u65b9\u6cd5\uff1ahistogram\uff08\u8ba1\u7b97\u56fe\u50cf\u76f4\u65b9\u56fe\uff09\n__host__ int Histogram::histogram(Image *inimg, \n unsigned int *histogram, bool onhostarray)\n{\n \/\/ \u68c0\u67e5\u56fe\u50cf\u662f\u5426\u4e3a NULL\u3002\n if (inimg == NULL || histogram == NULL)\n return NULL_POINTER;\n\n \/\/ \u8fd9\u4e00\u6bb5\u4ee3\u7801\u8fdb\u884c\u56fe\u50cf\u7684\u9884\u5904\u7406\u5de5\u4f5c\u3002\u56fe\u50cf\u7684\u9884\u5904\u7406\u4e3b\u8981\u5b8c\u6210\u5728 Device \u5185\u5b58\u4e0a\u4e3a\u8f93\n \/\/ \u5165\u548c\u8f93\u51fa\u56fe\u50cf\u51c6\u5907\u5185\u5b58\u7a7a\u95f4\uff0c\u4ee5\u4fbf\u76db\u653e\u6570\u636e\u3002\n int errcode; \/\/ \u5c40\u90e8\u53d8\u91cf\uff0c\u9519\u8bef\u7801\n\n \/\/ \u5c06\u8f93\u5165\u56fe\u50cf\u62f7\u8d1d\u5230 Device \u5185\u5b58\u4e2d\u3002\n errcode = ImageBasicOp::copyToCurrentDevice(inimg);\n if (errcode != NO_ERROR)\n return errcode;\n\n \/\/ \u63d0\u53d6\u8f93\u5165\u56fe\u50cf\u7684 ROI \u5b50\u56fe\u50cf\u3002\n ImageCuda insubimgCud;\n errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);\n if (errcode != NO_ERROR)\n return errcode;\n\n \/\/ \u8ba1\u7b97\u8c03\u7528 Kernel \u51fd\u6570\u7684\u7ebf\u7a0b\u5757\u7684\u5c3a\u5bf8\u548c\u7ebf\u7a0b\u5757\u7684\u6570\u91cf\u3002\n dim3 blocksize, gridsize;\n int height = (insubimgCud.imgMeta.height + \n HISTOGRAM_PACK_MASK) \/ HISTOGRAM_PACK_NUM;\n blocksize.x = DEF_BLOCK_X;\n blocksize.y = DEF_BLOCK_Y;\n gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) \/ blocksize.x;\n gridsize.y = (height + blocksize.y - 1) \/ blocksize.y;\n\n \/\/ \u5224\u65ad\u5f53\u524d histogram \u6570\u7ec4\u662f\u5426\u5b58\u50a8\u5728 Host \u7aef\u3002\u82e5\u662f\uff0c\u5219\u9700\u8981\u5728 Device \u7aef\u4e3a\u76f4\n \/\/ \u65b9\u56fe\u7533\u8bf7\u4e00\u6bb5\u7a7a\u95f4\uff1b\u82e5\u8be5\u6570\u7ec4\u662f\u5728 Device\u7aef\uff0c\u5219\u76f4\u63a5\u8c03\u7528\u6838\u51fd\u6570\u3002\n if (onhostarray){\t\n \/\/ \u5728 Device \u4e0a\u5206\u914d\u5b58\u50a8\u4e34\u65f6\u76f4\u65b9\u56fe\u7684\u7a7a\u95f4\u3002\n cudaError_t cudaerrcode;\n unsigned int *devhisto;\n cudaerrcode = cudaMalloc((void**)&devhisto,\n 256 * sizeof (unsigned int));\n if (cudaerrcode != cudaSuccess) {\n return cudaerrcode;\n }\n\n \/\/ \u521d\u59cb\u5316 Device \u4e0a\u7684\u5185\u5b58\u7a7a\u95f4\u3002\n cudaerrcode = cudaMemset(devhisto, 0, 256 * sizeof (unsigned int));\n if (cudaerrcode != cudaSuccess) {\n cudaFree(devhisto);\n return cudaerrcode;\n }\n\n \/\/ \u8c03\u7528\u6838\u51fd\u6570\uff0c\u8ba1\u7b97\u8f93\u5165\u56fe\u50cf\u7684\u76f4\u65b9\u56fe\u3002\n _histogramKer<<>>(insubimgCud, devhisto);\n\tif (cudaGetLastError() != cudaSuccess) {\n cudaFree(devhisto);\n return CUDA_ERROR;\n }\n\n \/\/ \u5c06\u76f4\u65b9\u56fe\u7684\u7ed3\u679c\u62f7\u56de Host \u7aef\u5185\u5b58\u4e2d\u3002\n cudaerrcode = cudaMemcpy(\n histogram, devhisto, 256 * sizeof (unsigned int), \n cudaMemcpyDeviceToHost);\n if (cudaerrcode != cudaSuccess) {\n cudaFree(devhisto);\n return cudaerrcode;\n }\n\n \/\/ \u91ca\u653e Device \u7aef\u7684\u76f4\u65b9\u56fe\u5b58\u50a8\u7a7a\u95f4\u3002\t\n cudaFree(devhisto);\n\n \/\/ \u5982\u679c histogram \u5728 Device \u7aef\uff0c\u76f4\u63a5\u8c03\u7528\u6838\u51fd\u6570\u3002\n } else {\n\t _histogramKer<<>>(insubimgCud, histogram);\n if (cudaGetLastError() != cudaSuccess) {\n return CUDA_ERROR;\n }\n }\n\n return NO_ERROR;\n}\n\n","avg_line_length":31.1450980392,"max_line_length":77,"alphanum_fraction":0.5832284059} {"size":1691,"ext":"cu","lang":"Cuda","max_stars_count":11.0,"content":"\/\/ Filename: saveFunctionsSchemeIncompressible.cu\n\/\/\n\/\/ Copyright (c) 2010-2013, Florencio Balboa Usabiaga\n\/\/\n\/\/ This file is part of Fluam\n\/\/\n\/\/ Fluam is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ Fluam is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Fluam. If not, see .\n\n\n\nbool saveFunctionsSchemeIncompressible(int index){\n \/\/Initialize save functions\n \/\/cout << \"INDEX \" << index << endl;\n if(index==0){\n if(!saveSeed()) return 0;\n if(!temperatureFunction(index)) return 0;\n \/\/if(!saveCellsAlongZ(index)) return 0;\n \/\/if(!hydroAnalysisIncompressible(0)) return 0;\n if(!saveTime(index)) return 0;\n }\n \/\/Use save functions\n else if(index==1){\n if(!temperatureFunction(index)) return 0;\n \/\/if(!saveCellsAlongZ(index)) return 0;\n \/\/if(!hydroAnalysisIncompressible(1)) return 0;\n }\n \/\/Close save functions\n else if(index==2){\n if(!saveTime(index)) return 0;\n if(!temperatureFunction(index)) return 0;\n \/\/if(!saveCellsAlongZ(index)) return 0;\n \/\/if(!hydroAnalysisIncompressible(2)) return 0;\n if(!saveFluidFinalConfiguration()) return 0;\n }\n else{\n cout << \"SAVE FUNCTIONS ERROR, INDEX !=0,1,2 \" << endl;\n return 0;\n }\n\n return 1;\n}\n\n\n\n\n\n\n\n\n","avg_line_length":27.7213114754,"max_line_length":71,"alphanum_fraction":0.6924896511} {"size":6420,"ext":"cuh","lang":"Cuda","max_stars_count":null,"content":"\/\/ Copyright (c) 2009-2019 The Regents of the University of Michigan\n\/\/ This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.\n\n#pragma once\n\n#include \"hoomd\/HOOMDMath.h\"\n#include \n\n#include \"IntegratorHPMCMonoGPUDepletants.cuh\"\n\nnamespace hoomd\n {\nnamespace hpmc\n {\nnamespace gpu\n {\n\/\/! Wraps arguments to kernel::hpmc_insert_depletants_phase(n)\n\/*! \\ingroup hpmc_data_structs *\/\nstruct hpmc_auxilliary_args_t\n {\n \/\/! Construct a hpmc_auxilliary_args_t\n hpmc_auxilliary_args_t(const unsigned int* _d_tag,\n const Scalar4* _d_vel,\n const Scalar4* _d_trial_vel,\n const unsigned int _ntrial,\n const unsigned int _nwork_local[],\n const unsigned int _work_offset[],\n const unsigned int* _d_n_depletants_ntrial,\n int* _d_deltaF_int,\n const hipStream_t* _streams_phase1,\n const hipStream_t* _streams_phase2,\n const unsigned int _max_len,\n unsigned int* _d_req_len,\n const bool _add_ghosts,\n const unsigned int _n_ghosts,\n const GPUPartition& _gpu_partition_rank)\n : d_tag(_d_tag), d_vel(_d_vel), d_trial_vel(_d_trial_vel), ntrial(_ntrial),\n nwork_local(_nwork_local), work_offset(_work_offset),\n d_n_depletants_ntrial(_d_n_depletants_ntrial), d_deltaF_int(_d_deltaF_int),\n streams_phase1(_streams_phase1), streams_phase2(_streams_phase2), max_len(_max_len),\n d_req_len(_d_req_len), add_ghosts(_add_ghosts), n_ghosts(_n_ghosts),\n gpu_partition_rank(_gpu_partition_rank) {};\n\n const unsigned int* d_tag; \/\/!< Particle tags\n const Scalar4* d_vel; \/\/!< Particle velocities (.x component is the auxilliary variable)\n const Scalar4* d_trial_vel; \/\/!< Particle velocities after trial move (.x component is the\n \/\/!< auxilliary variable)\n const unsigned int ntrial; \/\/!< Number of trial insertions per depletant\n const unsigned int* nwork_local; \/\/!< Number of insertions this rank handles, per GPU\n const unsigned int* work_offset; \/\/!< Offset into insertions for this rank\n const unsigned int* d_n_depletants_ntrial; \/\/!< Number of depletants per particle, depletant\n \/\/!< type pair and trial insertion\n int* d_deltaF_int; \/\/!< Free energy difference rescaled to integer units\n const hipStream_t* streams_phase1; \/\/!< Stream for this depletant type, phase1 kernel\n const hipStream_t* streams_phase2; \/\/!< Stream for this depletant type, phase2 kernel\n const unsigned int max_len; \/\/!< Max length of dynamically allocated shared memory list\n unsigned int* d_req_len; \/\/!< Requested length of shared mem list per group\n const bool add_ghosts; \/\/!< True if we should add the ghosts from the domain decomposition\n const unsigned int n_ghosts; \/\/!< Number of ghost particles\n const GPUPartition& gpu_partition_rank; \/\/!< Split of particles for this rank\n };\n\n\/\/! Driver for kernel::hpmc_insert_depletants_auxilliary_phase2()\ntemplate\nvoid hpmc_depletants_auxilliary_phase2(const hpmc_args_t& args,\n const hpmc_implicit_args_t& implicit_args,\n const hpmc_auxilliary_args_t& auxilliary_args,\n const typename Shape::param_type* params);\n\n\/\/! Driver for kernel::hpmc_insert_depletants_auxilliary_phase1()\ntemplate\nvoid hpmc_depletants_auxilliary_phase1(const hpmc_args_t& args,\n const hpmc_implicit_args_t& implicit_args,\n const hpmc_auxilliary_args_t& auxilliary_args,\n const typename Shape::param_type* params);\n\n\/\/! Driver for kernel::hpmc_depletants_accept\nvoid hpmc_depletants_accept(const uint16_t seed,\n const uint64_t timestep,\n const unsigned int select,\n const unsigned int rank,\n const int* d_deltaF_int,\n const Index2D depletant_idx,\n const unsigned int deltaF_pitch,\n const Scalar* d_fugacity,\n const unsigned int* d_ntrial,\n unsigned* d_reject_out,\n const GPUPartition& gpu_partition,\n const unsigned int block_size);\n\nvoid generate_num_depletants_ntrial(const Scalar4* d_vel,\n const Scalar4* d_trial_vel,\n const unsigned int ntrial,\n const unsigned int depletant_type_a,\n const unsigned int depletant_type_b,\n const Index2D depletant_idx,\n const Scalar* d_lambda,\n const Scalar4* d_postype,\n unsigned int* d_n_depletants,\n const unsigned int N_local,\n const bool add_ghosts,\n const unsigned int n_ghosts,\n const GPUPartition& gpu_partition,\n const unsigned int block_size,\n const hipStream_t* streams);\n\nvoid get_max_num_depletants_ntrial(const unsigned int ntrial,\n unsigned int* d_n_depletants,\n unsigned int* max_n_depletants,\n const bool add_ghosts,\n const unsigned int n_ghosts,\n const hipStream_t* streams,\n const GPUPartition& gpu_partition,\n CachedAllocator& alloc);\n } \/\/ end namespace gpu\n\n } \/\/ end namespace hpmc\n } \/\/ end namespace hoomd\n","avg_line_length":53.9495798319,"max_line_length":100,"alphanum_fraction":0.5602803738} {"size":4035,"ext":"cu","lang":"Cuda","max_stars_count":5.0,"content":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\n\/*!\n * \\file np_elemwise_unary_op_basic.cu\n * \\brief GPU Implementation of numpy unary functions.\n *\/\n#include \"..\/tensor\/elemwise_binary_op.h\"\n\nnamespace mxnet {\nnamespace op {\n\nNNVM_REGISTER_OP(_npx_relu)\n.set_attr(\"FCompute\", UnaryOp::Compute);\n\nNNVM_REGISTER_OP(_npx_sigmoid)\n.set_attr(\"FCompute\", UnaryOp::Compute);\n\nNNVM_REGISTER_OP(_np_copy)\n.set_attr(\"FCompute\", UnaryOp::IdentityCompute);\n\n#define MXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(__name$, __kernel$) \\\nNNVM_REGISTER_OP(__name$) \\\n.set_attr(\"FCompute\", UnaryOp::Compute) \\\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_negative, mshadow_op::negation);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_reciprocal, mshadow_op::reciprocal);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_absolute, mshadow_op::abs);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_sign, mshadow_op::sign);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_rint, mshadow_op::rint);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_ceil, mshadow_op::ceil);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_floor, mshadow_op::floor);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_trunc, mshadow_op::trunc);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_fix, mshadow_op::fix);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_square, mshadow_op::square);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_sqrt, mshadow_op::square_root);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_cbrt, mshadow_op::cube_root);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_exp, mshadow_op::exp);\n\nNNVM_REGISTER_OP(_npi_log)\n.set_attr(\"FCompute\", UnaryOp::Compute);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_log10, mshadow_op::log10);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_log2, mshadow_op::log2);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_log1p, mshadow_op::log1p);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_expm1, mshadow_op::expm1);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_logical_not, mshadow_op::nt);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_sin, mshadow_op::sin);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_cos, mshadow_op::cos);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_tan, mshadow_op::tan);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_arcsin, mshadow_op::arcsin);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_arccos, mshadow_op::arccos);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_arctan, mshadow_op::arctan);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_degrees, mshadow_op::degrees);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_radians, mshadow_op::radians);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_sinh, mshadow_op::sinh);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_cosh, mshadow_op::cosh);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_tanh, mshadow_op::tanh);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_arcsinh, mshadow_op::arcsinh);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_arccosh, mshadow_op::arccosh);\n\nMXNET_OPERATOR_REGISTER_NUMPY_UNARY_GPU(_npi_arctanh, mshadow_op::arctanh);\n\n} \/\/ namespace op\n} \/\/ namespace mxnet\n","avg_line_length":36.3513513514,"max_line_length":81,"alphanum_fraction":0.8168525403} {"size":1887,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*\n * Copyright (c) 2019-2021, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \"knn_opg_common.cuh\"\n\nnamespace ML {\nnamespace KNN {\nnamespace opg {\n\nusing namespace knn_common;\n\ntemplate struct KNN_CL_params;\n\nvoid knn_classify(raft::handle_t &handle, std::vector *> *out,\n std::vector> *probas,\n std::vector &idx_data,\n Matrix::PartDescriptor &idx_desc,\n std::vector &query_data,\n Matrix::PartDescriptor &query_desc,\n std::vector> &y,\n std::vector &uniq_labels, std::vector &n_unique,\n bool rowMajorIndex, bool rowMajorQuery, bool probas_only,\n int k, size_t batch_size, bool verbose) {\n knn_operation knn_op =\n probas_only ? knn_operation::class_proba : knn_operation::classification;\n KNN_CL_params params(\n knn_op, &idx_data, &idx_desc, &query_data, &query_desc, rowMajorIndex,\n rowMajorQuery, k, batch_size, verbose, n_unique.size(), &y, &n_unique,\n &uniq_labels, out, probas);\n\n cuda_utils cutils(handle);\n opg_knn(params, cutils);\n}\n}; \/\/ namespace opg\n}; \/\/ namespace KNN\n}; \/\/ namespace ML\n","avg_line_length":37.74,"max_line_length":80,"alphanum_fraction":0.6703762586} {"size":2861,"ext":"cu","lang":"Cuda","max_stars_count":42.0,"content":"#include \n#include \"rama_utils.h\"\n#include \n#include \"test.h\"\n\nvoid test_cc(const std::vector row_offsets, const std::vector col_ids, const int expected_nr_ccs)\n{\n const int nnz = col_ids.size();\n const int num_rows = row_offsets.size()-1;\n\n int* d_row_offsets;\n int* d_col_ids;\n int* d_node_stat_out;\n\n if (cudaSuccess != cudaMalloc((void **)&d_row_offsets, (num_rows + 1) * sizeof(int))) {fprintf(stderr, \"ERROR: could not allocate d_row_offsets\\n\\n\"); exit(-1);}\n if (cudaSuccess != cudaMalloc((void **)&d_col_ids, nnz * sizeof(int))) {fprintf(stderr, \"ERROR: could not allocate d_col_ids\\n\\n\"); exit(-1);}\n if (cudaSuccess != cudaMalloc((void **)&d_node_stat_out, num_rows * sizeof(int))) {fprintf(stderr, \"ERROR: could not allocate d_node_stat_out,\\n\\n\"); exit(-1);}\n\n if (cudaSuccess != cudaMemcpy(d_row_offsets, row_offsets.data(), (num_rows + 1) * sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, \"ERROR: copying to device failed\\n\\n\"); exit(-1);}\n if (cudaSuccess != cudaMemcpy(d_col_ids, col_ids.data(), nnz * sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, \"ERROR: copying to device failed\\n\\n\"); exit(-1);}\n\n computeCC_gpu(num_rows, nnz, d_row_offsets, d_col_ids, d_node_stat_out, get_cuda_device());\n\n std::vector node_stat_out(num_rows, -1);\n if (cudaSuccess != cudaMemcpy(node_stat_out.data(), d_node_stat_out, num_rows * sizeof(int), cudaMemcpyDeviceToHost)) {fprintf(stderr, \"ERROR: copying from device failed\\n\\n\"); exit(-1);}\n\n std::set all_cc_ids;\n for (int n = 0; n < num_rows; n++)\n {\n int c_id = node_stat_out[n];\n std::cout<<\"Node: \"<= 0);\n for (int neighbour_index = row_offsets[n]; neighbour_index < row_offsets[n + 1]; neighbour_index++)\n { \n int neighbour = col_ids[neighbour_index];\n std::cout<<\"Node: \"< row_offsets = {0, 1, 2, 4, 5, 6};\n std::vector col_ids = {1, 0, 3, 4, 2, 2};\n\n test_cc(row_offsets, col_ids, 2);\n }\n\n {\n \/\/ CSR representation of [1, 2]\n\n std::vector row_offsets = {0, 0, 1, 2};\n std::vector col_ids = {2, 1};\n\n test_cc(row_offsets, col_ids, 2);\n }\n}\n","avg_line_length":40.2957746479,"max_line_length":192,"alphanum_fraction":0.6193638588} {"size":11005,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*\n * Copyright (c) 2018-2020, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \n#include \n#include \n#include \n#include \"test_utils.h\"\n\nnamespace MLCommon {\nnamespace Matrix {\n\ntemplate \n__global__ void nativePowerKernel(Type *in, Type *out, int len) {\n int idx = threadIdx.x + blockIdx.x * blockDim.x;\n if (idx < len) {\n out[idx] = in[idx] * in[idx];\n }\n}\n\ntemplate \nvoid naivePower(Type *in, Type *out, int len, cudaStream_t stream) {\n static const int TPB = 64;\n int nblks = ceildiv(len, TPB);\n nativePowerKernel<<>>(in, out, len);\n CUDA_CHECK(cudaPeekAtLastError());\n}\n\ntemplate \n__global__ void nativeSqrtKernel(Type *in, Type *out, int len) {\n int idx = threadIdx.x + blockIdx.x * blockDim.x;\n if (idx < len) {\n out[idx] = sqrt(in[idx]);\n }\n}\n\ntemplate \nvoid naiveSqrt(Type *in, Type *out, int len) {\n static const int TPB = 64;\n int nblks = ceildiv(len, TPB);\n nativeSqrtKernel<<>>(in, out, len);\n CUDA_CHECK(cudaPeekAtLastError());\n}\n\ntemplate \n__global__ void naiveSignFlipKernel(Type *in, Type *out, int rowCount,\n int colCount) {\n int d_i = blockIdx.x * rowCount;\n int end = d_i + rowCount;\n\n if (blockIdx.x < colCount) {\n Type max = 0.0;\n int max_index = 0;\n for (int i = d_i; i < end; i++) {\n Type val = in[i];\n if (val < 0.0) {\n val = -val;\n }\n if (val > max) {\n max = val;\n max_index = i;\n }\n }\n\n for (int i = d_i; i < end; i++) {\n if (in[max_index] < 0.0) {\n out[i] = -in[i];\n } else {\n out[i] = in[i];\n }\n }\n }\n\n __syncthreads();\n}\n\ntemplate \nvoid naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) {\n naiveSignFlipKernel<<>>(in, out, rowCount, colCount);\n CUDA_CHECK(cudaPeekAtLastError());\n}\n\ntemplate \nstruct MathInputs {\n T tolerance;\n int n_row;\n int n_col;\n int len;\n unsigned long long int seed;\n};\n\ntemplate \n::std::ostream &operator<<(::std::ostream &os, const MathInputs &dims) {\n return os;\n}\n\ntemplate \nclass MathTest : public ::testing::TestWithParam> {\n protected:\n void SetUp() override {\n params = ::testing::TestWithParam>::GetParam();\n Random::Rng r(params.seed);\n int len = params.len;\n\n allocate(in_power, len);\n allocate(out_power_ref, len);\n allocate(in_sqrt, len);\n allocate(out_sqrt_ref, len);\n allocate(in_sign_flip, len);\n allocate(out_sign_flip_ref, len);\n\n cudaStream_t stream;\n CUDA_CHECK(cudaStreamCreate(&stream));\n allocator.reset(new raft::mr::device::default_allocator);\n\n allocate(in_ratio, 4);\n T in_ratio_h[4] = {1.0, 2.0, 2.0, 3.0};\n updateDevice(in_ratio, in_ratio_h, 4, stream);\n\n allocate(out_ratio_ref, 4);\n T out_ratio_ref_h[4] = {0.125, 0.25, 0.25, 0.375};\n updateDevice(out_ratio_ref, out_ratio_ref_h, 4, stream);\n\n r.uniform(in_power, len, T(-1.0), T(1.0), stream);\n r.uniform(in_sqrt, len, T(0.0), T(1.0), stream);\n \/\/ r.uniform(in_ratio, len, T(0.0), T(1.0));\n r.uniform(in_sign_flip, len, T(-100.0), T(100.0), stream);\n\n naivePower(in_power, out_power_ref, len, stream);\n power(in_power, len, stream);\n\n naiveSqrt(in_sqrt, out_sqrt_ref, len);\n seqRoot(in_sqrt, len, stream);\n\n ratio(in_ratio, in_ratio, 4, allocator, stream);\n\n naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col);\n signFlip(in_sign_flip, params.n_row, params.n_col, stream);\n\n allocate(in_recip, 4);\n allocate(in_recip_ref, 4);\n allocate(out_recip, 4);\n \/\/ default threshold is 1e-15\n std::vector in_recip_h = {0.1, 0.01, -0.01, 0.1e-16};\n std::vector in_recip_ref_h = {10.0, 100.0, -100.0, 0.0};\n updateDevice(in_recip, in_recip_h.data(), 4, stream);\n updateDevice(in_recip_ref, in_recip_ref_h.data(), 4, stream);\n T recip_scalar = T(1.0);\n\n \/\/ this `reciprocal()` has to go first bc next one modifies its input\n reciprocal(in_recip, out_recip, recip_scalar, 4, stream);\n\n reciprocal(in_recip, recip_scalar, 4, stream, true);\n\n std::vector in_small_val_zero_h = {0.1, 1e-16, -1e-16, -0.1};\n std::vector in_small_val_zero_ref_h = {0.1, 0.0, 0.0, -0.1};\n allocate(in_smallzero, 4);\n allocate(out_smallzero, 4);\n allocate(out_smallzero_ref, 4);\n updateDevice(in_smallzero, in_small_val_zero_h.data(), 4, stream);\n updateDevice(out_smallzero_ref, in_small_val_zero_ref_h.data(), 4, stream);\n setSmallValuesZero(out_smallzero, in_smallzero, 4, stream);\n setSmallValuesZero(in_smallzero, 4, stream);\n CUDA_CHECK(cudaStreamDestroy(stream));\n }\n\n void TearDown() override {\n CUDA_CHECK(cudaFree(in_power));\n CUDA_CHECK(cudaFree(out_power_ref));\n CUDA_CHECK(cudaFree(in_sqrt));\n CUDA_CHECK(cudaFree(out_sqrt_ref));\n CUDA_CHECK(cudaFree(in_ratio));\n CUDA_CHECK(cudaFree(out_ratio_ref));\n CUDA_CHECK(cudaFree(in_sign_flip));\n CUDA_CHECK(cudaFree(out_sign_flip_ref));\n CUDA_CHECK(cudaFree(in_recip));\n CUDA_CHECK(cudaFree(in_recip_ref));\n CUDA_CHECK(cudaFree(out_recip));\n CUDA_CHECK(cudaFree(in_smallzero));\n CUDA_CHECK(cudaFree(out_smallzero));\n CUDA_CHECK(cudaFree(out_smallzero_ref));\n }\n\n protected:\n MathInputs params;\n T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio,\n *out_ratio_ref, *in_sign_flip, *out_sign_flip_ref, *in_recip, *in_recip_ref,\n *out_recip, *in_smallzero, *out_smallzero, *out_smallzero_ref;\n std::shared_ptr allocator;\n};\n\nconst std::vector> inputsf = {\n {0.00001f, 1024, 1024, 1024 * 1024, 1234ULL}};\n\nconst std::vector> inputsd = {\n {0.00001, 1024, 1024, 1024 * 1024, 1234ULL}};\n\ntypedef MathTest MathPowerTestF;\nTEST_P(MathPowerTestF, Result) {\n ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathPowerTestD;\nTEST_P(MathPowerTestD, Result) {\n ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathSqrtTestF;\nTEST_P(MathSqrtTestF, Result) {\n ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathSqrtTestD;\nTEST_P(MathSqrtTestD, Result) {\n ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathRatioTestF;\nTEST_P(MathRatioTestF, Result) {\n ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathRatioTestD;\nTEST_P(MathRatioTestD, Result) {\n ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathSignFlipTestF;\nTEST_P(MathSignFlipTestF, Result) {\n ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathSignFlipTestD;\nTEST_P(MathSignFlipTestD, Result) {\n ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathReciprocalTestF;\nTEST_P(MathReciprocalTestF, Result) {\n ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,\n CompareApprox(params.tolerance)));\n\n \/\/ 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.\n ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathReciprocalTestD;\nTEST_P(MathReciprocalTestD, Result) {\n ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,\n CompareApprox(params.tolerance)));\n\n \/\/ 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.\n ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathSetSmallZeroTestF;\nTEST_P(MathSetSmallZeroTestF, Result) {\n ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,\n CompareApprox(params.tolerance)));\n\n ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,\n CompareApprox(params.tolerance)));\n}\n\ntypedef MathTest MathSetSmallZeroTestD;\nTEST_P(MathSetSmallZeroTestD, Result) {\n ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,\n CompareApprox(params.tolerance)));\n\n ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,\n CompareApprox(params.tolerance)));\n}\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF,\n ::testing::ValuesIn(inputsf));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD,\n ::testing::ValuesIn(inputsd));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF,\n ::testing::ValuesIn(inputsf));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD,\n ::testing::ValuesIn(inputsd));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF,\n ::testing::ValuesIn(inputsf));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD,\n ::testing::ValuesIn(inputsd));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestF,\n ::testing::ValuesIn(inputsf));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestD,\n ::testing::ValuesIn(inputsd));\n\nINSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestF,\n ::testing::ValuesIn(inputsf));\nINSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestD,\n ::testing::ValuesIn(inputsd));\n\n} \/\/ end namespace Matrix\n} \/\/ end namespace MLCommon\n","avg_line_length":33.1475903614,"max_line_length":95,"alphanum_fraction":0.6782371649} {"size":2000,"ext":"cu","lang":"Cuda","max_stars_count":1.0,"content":"#include \"Kernel.h\"\n\n__global__\nvoid cukernel(Real __restrict__ * in, Real __restrict__* out, const int kSize, const int iStride, const int jStride, const int kStride)\n{\n\n int ipos = blockIdx.x * 32 + threadIdx.x;\n int jpos = blockIdx.y * 8 + threadIdx.y;\n\n\n out[ipos*iStride + jpos*jStride] = (pow(in[ipos*iStride + jpos*jStride] *in[ipos*iStride + jpos*jStride], 3.5) +\n pow(in[ipos*iStride + jpos*jStride + kStride], 2.3)\n );\n for(int k=1; k < kSize-1; ++k)\n {\n out[ipos*iStride + jpos*jStride + k*kStride] = (pow(in[ipos*iStride + jpos*jStride + k*kStride] *in[ipos*iStride + jpos*jStride + k*kStride], 3.5) +\n pow(in[ipos*iStride + jpos*jStride + (k+1)*kStride], 2.3) - \n pow(in[ipos*iStride + jpos*jStride + (k-1)*kStride], 1.3)\n )\n + out[(ipos+1)*iStride + jpos*jStride + k*kStride] + out[(ipos-1)*iStride + jpos*jStride + k*kStride] + \n out[ipos*iStride + (jpos+1)*jStride + k*kStride] + out[ipos*iStride + (jpos-1)*jStride + k*kStride]\n ;\n\n \n }\n out[ipos*iStride + jpos*jStride + (kSize-1)*kStride] = (pow(in[ipos*iStride + jpos*jStride + (kSize-1)*kStride] *in[ipos*iStride + jpos*jStride + (kSize-1)*kStride], 3.5) -\n pow(in[ipos*iStride + jpos*jStride + (kSize-2)*kStride], 1.3)\n );\n\n}\n\nvoid launch_kernel(IJKSize domain, Real __restrict__* in, Real __restrict__* out, cudaStream_t& stream)\n{\n dim3 threads, blocks;\n threads.x = 32;\n threads.y = 8;\n threads.z = 1;\n\n blocks.x = domain.iSize() \/ 32;\n blocks.y = domain.jSize() \/ 8;\n blocks.z = 1;\n if(domain.iSize() % 32 != 0 || domain.jSize() % 8 != 0)\n std::cout << \"ERROR: Domain sizes should be multiple of 32x8\" << std::endl;\n\n const int iStride = 1;\n const int jStride = domain.iSize()+cNumBoundaryLines*2;\n const int kStride = (domain.jSize()+cNumBoundaryLines*2)* jStride;\n cukernel<<>>(in, out, domain.kSize(), iStride, jStride, kStride);\n}\n\n","avg_line_length":39.2156862745,"max_line_length":176,"alphanum_fraction":0.6115} {"size":18348,"ext":"cu","lang":"Cuda","max_stars_count":682.0,"content":"\/\/********************************************************\/\/\n\/\/ CUDA SIFT extractor by M\u00e5rten Bj\u00f6rkman aka Celebrandil \/\/\n\/\/********************************************************\/\/ \n\n#include \n#include \n#include \n#include \n#include \n#include \"cudautils.h\"\n\n#include \"cudaImage.h\"\n#include \"cudaSift.h\"\n#include \"cudaSiftD.h\"\n#include \"cudaSiftH.h\"\n\n#include \"cudaSiftD.cu\"\n\nvoid InitCuda(int devNum)\n{\n int nDevices;\n cudaGetDeviceCount(&nDevices);\n if (!nDevices) {\n std::cerr << \"No CUDA devices available\" << std::endl;\n return;\n }\n devNum = std::min(nDevices-1, devNum);\n deviceInit(devNum); \n cudaDeviceProp prop;\n cudaGetDeviceProperties(&prop, devNum);\n printf(\"Device Number: %d\\n\", devNum);\n printf(\" Device name: %s\\n\", prop.name);\n printf(\" Memory Clock Rate (MHz): %d\\n\", prop.memoryClockRate\/1000);\n printf(\" Memory Bus Width (bits): %d\\n\", prop.memoryBusWidth);\n printf(\" Peak Memory Bandwidth (GB\/s): %.1f\\n\\n\",\n\t 2.0*prop.memoryClockRate*(prop.memoryBusWidth\/8)\/1.0e6);\n}\n\nfloat *AllocSiftTempMemory(int width, int height, int numOctaves, bool scaleUp)\n{\n TimerGPU timer(0);\n const int nd = NUM_SCALES + 3;\n int w = width*(scaleUp ? 2 : 1); \n int h = height*(scaleUp ? 2 : 1);\n int p = iAlignUp(w, 128);\n int size = h*p; \/\/ image sizes\n int sizeTmp = nd*h*p; \/\/ laplace buffer sizes\n for (int i=0;i1) {\n CudaImage subImg;\n int p = iAlignUp(w\/2, 128);\n subImg.Allocate(w\/2, h\/2, p, false, memorySub); \n ScaleDown(subImg, img, 0.5f);\n float totInitBlur = (float)sqrt(initBlur*initBlur + 0.5f*0.5f) \/ 2.0f;\n ExtractSiftLoop(siftData, subImg, numOctaves-1, totInitBlur, thresh, lowestScale, subsampling*2.0f, memoryTmp, memorySub + (h\/2)*p);\n }\n ExtractSiftOctave(siftData, img, numOctaves, thresh, lowestScale, subsampling, memoryTmp);\n#ifdef VERBOSE\n double totTime = timer.read();\n printf(\"ExtractSift time total = %.2f ms %d\\n\\n\", totTime, numOctaves);\n#endif\n return 0;\n}\n\nvoid ExtractSiftOctave(SiftData &siftData, CudaImage &img, int octave, float thresh, float lowestScale, float subsampling, float *memoryTmp)\n{\n const int nd = NUM_SCALES + 3;\n#ifdef VERBOSE\n unsigned int *d_PointCounterAddr;\n safeCall(cudaGetSymbolAddress((void**)&d_PointCounterAddr, d_PointCounter));\n unsigned int fstPts, totPts;\n safeCall(cudaMemcpy(&fstPts, &d_PointCounterAddr[2*octave-1], sizeof(int), cudaMemcpyDeviceToHost)); \n TimerGPU timer0;\n#endif\n CudaImage diffImg[nd];\n int w = img.width; \n int h = img.height;\n int p = iAlignUp(w, 128);\n for (int i=0;i();\n \/\/ Specify texture object parameters\n struct cudaTextureDesc texDesc;\n memset(&texDesc, 0, sizeof(texDesc));\n texDesc.addressMode[0] = cudaAddressModeClamp;\n texDesc.addressMode[1] = cudaAddressModeClamp;\n texDesc.filterMode = cudaFilterModeLinear;\n texDesc.readMode = cudaReadModeElementType;\n texDesc.normalizedCoords = 0;\n \/\/ Create texture object\n cudaTextureObject_t texObj = 0;\n cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);\n\n#ifdef VERBOSE\n TimerGPU timer1;\n#endif\n float baseBlur = pow(2.0f, -1.0f\/NUM_SCALES);\n float diffScale = pow(2.0f, 1.0f\/NUM_SCALES);\n LaplaceMulti(texObj, img, diffImg, octave); \n FindPointsMulti(diffImg, siftData, thresh, 10.0f, 1.0f\/NUM_SCALES, lowestScale\/subsampling, subsampling, octave);\n#ifdef VERBOSE\n double gpuTimeDoG = timer1.read();\n TimerGPU timer4;\n#endif\n ComputeOrientations(texObj, img, siftData, octave); \n ExtractSiftDescriptors(texObj, siftData, subsampling, octave); \n \/\/OrientAndExtract(texObj, siftData, subsampling, octave); \n \n safeCall(cudaDestroyTextureObject(texObj));\n#ifdef VERBOSE\n double gpuTimeSift = timer4.read();\n double totTime = timer0.read();\n printf(\"GPU time : %.2f ms + %.2f ms + %.2f ms = %.2f ms\\n\", totTime-gpuTimeDoG-gpuTimeSift, gpuTimeDoG, gpuTimeSift, totTime);\n safeCall(cudaMemcpy(&totPts, &d_PointCounterAddr[2*octave+1], sizeof(int), cudaMemcpyDeviceToHost));\n totPts = (totPts0) \n printf(\" %.2f ms \/ DoG, %.4f ms \/ Sift, #Sift = %d\\n\", gpuTimeDoG\/NUM_SCALES, gpuTimeSift\/(totPts-fstPts), totPts-fstPts); \n#endif\n}\n\nvoid InitSiftData(SiftData &data, int num, bool host, bool dev)\n{\n data.numPts = 0;\n data.maxPts = num;\n int sz = sizeof(SiftPoint)*num;\n#ifdef MANAGEDMEM\n safeCall(cudaMallocManaged((void **)&data.m_data, sz));\n#else\n data.h_data = NULL;\n if (host)\n data.h_data = (SiftPoint *)malloc(sz);\n data.d_data = NULL;\n if (dev)\n safeCall(cudaMalloc((void **)&data.d_data, sz));\n#endif\n}\n\nvoid FreeSiftData(SiftData &data)\n{\n#ifdef MANAGEDMEM\n safeCall(cudaFree(data.m_data));\n#else\n if (data.d_data!=NULL)\n safeCall(cudaFree(data.d_data));\n data.d_data = NULL;\n if (data.h_data!=NULL)\n free(data.h_data);\n#endif\n data.numPts = 0;\n data.maxPts = 0;\n}\n\nvoid PrintSiftData(SiftData &data)\n{\n#ifdef MANAGEDMEM\n SiftPoint *h_data = data.m_data;\n#else\n SiftPoint *h_data = data.h_data;\n if (data.h_data==NULL) {\n h_data = (SiftPoint *)malloc(sizeof(SiftPoint)*data.maxPts);\n safeCall(cudaMemcpy(h_data, data.d_data, sizeof(SiftPoint)*data.numPts, cudaMemcpyDeviceToHost));\n data.h_data = h_data;\n }\n#endif\n for (int i=0;i>>(res.d_data, src.d_data, src.width, src.pitch, src.height, res.pitch);\n#else\n dim3 blocks(iDivUp(src.width, SCALEDOWN_W), iDivUp(src.height, SCALEDOWN_H));\n dim3 threads(SCALEDOWN_W + 4);\n ScaleDown<<>>(res.d_data, src.d_data, src.width, src.pitch, src.height, res.pitch);\n#endif\n checkMsg(\"ScaleDown() execution failed\\n\");\n return 0.0;\n}\n\ndouble ScaleUp(CudaImage &res, CudaImage &src)\n{\n if (res.d_data==NULL || src.d_data==NULL) {\n printf(\"ScaleUp: missing data\\n\");\n return 0.0;\n }\n dim3 blocks(iDivUp(res.width, SCALEUP_W), iDivUp(res.height, SCALEUP_H));\n dim3 threads(SCALEUP_W\/2, SCALEUP_H\/2);\n ScaleUp<<>>(res.d_data, src.d_data, src.width, src.pitch, src.height, res.pitch); \n checkMsg(\"ScaleUp() execution failed\\n\");\n return 0.0;\n} \n\ndouble ComputeOrientations(cudaTextureObject_t texObj, CudaImage &src, SiftData &siftData, int octave)\n{\n dim3 blocks(512); \n#ifdef MANAGEDMEM\n ComputeOrientationsCONST<<>>(texObj, siftData.m_data, octave);\n#else\n#if 1\n dim3 threads(11*11);\n ComputeOrientationsCONST<<>>(texObj, siftData.d_data, octave);\n#else\n dim3 threads(256); \n ComputeOrientationsCONSTNew<<>>(src.d_data, src.width, src.pitch, src.height, siftData.d_data, octave);\n#endif\n#endif\n checkMsg(\"ComputeOrientations() execution failed\\n\");\n return 0.0;\n}\n\ndouble ExtractSiftDescriptors(cudaTextureObject_t texObj, SiftData &siftData, float subsampling, int octave)\n{\n dim3 blocks(512); \n dim3 threads(16, 8);\n#ifdef MANAGEDMEM\n ExtractSiftDescriptorsCONST<<>>(texObj, siftData.m_data, subsampling, octave);\n#else\n ExtractSiftDescriptorsCONSTNew<<>>(texObj, siftData.d_data, subsampling, octave);\n#endif\n checkMsg(\"ExtractSiftDescriptors() execution failed\\n\");\n return 0.0; \n}\n\ndouble OrientAndExtract(cudaTextureObject_t texObj, SiftData &siftData, float subsampling, int octave)\n{\n dim3 blocks(256); \n dim3 threads(128);\n#ifdef MANAGEDMEM\n OrientAndExtractCONST<<>>(texObj, siftData.m_data, subsampling, octave);\n#else\n OrientAndExtractCONST<<>>(texObj, siftData.d_data, subsampling, octave);\n#endif\n checkMsg(\"OrientAndExtract() execution failed\\n\");\n return 0.0;\n}\n\ndouble RescalePositions(SiftData &siftData, float scale)\n{\n dim3 blocks(iDivUp(siftData.numPts, 64));\n dim3 threads(64);\n RescalePositions<<>>(siftData.d_data, siftData.numPts, scale);\n checkMsg(\"RescapePositions() execution failed\\n\");\n return 0.0; \n}\n\ndouble LowPass(CudaImage &res, CudaImage &src, float scale)\n{\n float kernel[2*LOWPASS_R+1];\n static float oldScale = -1.0f;\n if (scale!=oldScale) {\n float kernelSum = 0.0f;\n float ivar2 = 1.0f\/(2.0f*scale*scale);\n for (int j=-LOWPASS_R;j<=LOWPASS_R;j++) {\n kernel[j+LOWPASS_R] = (float)expf(-(double)j*j*ivar2);\n kernelSum += kernel[j+LOWPASS_R]; \n }\n for (int j=-LOWPASS_R;j<=LOWPASS_R;j++) \n kernel[j+LOWPASS_R] \/= kernelSum; \n safeCall(cudaMemcpyToSymbol(d_LowPassKernel, kernel, (2*LOWPASS_R+1)*sizeof(float)));\n oldScale = scale;\n } \n int width = res.width;\n int pitch = res.pitch;\n int height = res.height;\n dim3 blocks(iDivUp(width, LOWPASS_W), iDivUp(height, LOWPASS_H));\n#if 1\n dim3 threads(LOWPASS_W+2*LOWPASS_R, 4); \n LowPassBlock<<>>(src.d_data, res.d_data, width, pitch, height);\n#else\n dim3 threads(LOWPASS_W+2*LOWPASS_R, LOWPASS_H);\n LowPass<<>>(src.d_data, res.d_data, width, pitch, height);\n#endif\n checkMsg(\"LowPass() execution failed\\n\");\n return 0.0; \n}\n\n\/\/==================== Multi-scale functions ===================\/\/\n\nvoid PrepareLaplaceKernels(int numOctaves, float initBlur, float *kernel)\n{\n if (numOctaves>1) {\n float totInitBlur = (float)sqrt(initBlur*initBlur + 0.5f*0.5f) \/ 2.0f;\n PrepareLaplaceKernels(numOctaves-1, totInitBlur, kernel);\n }\n float scale = pow(2.0f, -1.0f\/NUM_SCALES);\n float diffScale = pow(2.0f, 1.0f\/NUM_SCALES);\n for (int i=0;i>>(baseImage.d_data, results[0].d_data, width, pitch, height, octave);\n#endif\n#if 0\n dim3 threads(LAPLACE_W+2*LAPLACE_R, LAPLACE_S);\n dim3 blocks(iDivUp(width, LAPLACE_W), iDivUp(height, LAPLACE_H));\n LaplaceMultiMemTest<<>>(baseImage.d_data, results[0].d_data, width, pitch, height, octave);\n#endif\n#if 0\n dim3 threads(LAPLACE_W+2*LAPLACE_R, LAPLACE_S);\n dim3 blocks(iDivUp(width, LAPLACE_W), height);\n LaplaceMultiMemOld<<>>(baseImage.d_data, results[0].d_data, width, pitch, height, octave);\n#endif\n#if 0\n dim3 threads(LAPLACE_W+2*LAPLACE_R, LAPLACE_S);\n dim3 blocks(iDivUp(width, LAPLACE_W), height);\n LaplaceMultiTex<<>>(texObj, results[0].d_data, width, pitch, height, octave);\n#endif\n checkMsg(\"LaplaceMulti() execution failed\\n\");\n return 0.0; \n}\n\ndouble FindPointsMulti(CudaImage *sources, SiftData &siftData, float thresh, float edgeLimit, float factor, float lowestScale, float subsampling, int octave)\n{\n if (sources->d_data==NULL) {\n printf(\"FindPointsMulti: missing data\\n\");\n return 0.0;\n }\n int w = sources->width;\n int p = sources->pitch;\n int h = sources->height;\n#if 0\n dim3 blocks(iDivUp(w, MINMAX_W)*NUM_SCALES, iDivUp(h, MINMAX_H));\n dim3 threads(MINMAX_W + 2, MINMAX_H);\n FindPointsMultiTest<<>>(sources->d_data, siftData.d_data, w, p, h, subsampling, lowestScale, thresh, factor, edgeLimit, octave); \n#endif\n#if 1\n dim3 blocks(iDivUp(w, MINMAX_W)*NUM_SCALES, iDivUp(h, MINMAX_H));\n dim3 threads(MINMAX_W + 2); \n#ifdef MANAGEDMEM\n FindPointsMulti<<>>(sources->d_data, siftData.m_data, w, p, h, subsampling, lowestScale, thresh, factor, edgeLimit, octave); \n#else\n FindPointsMultiNew<<>>(sources->d_data, siftData.d_data, w, p, h, subsampling, lowestScale, thresh, factor, edgeLimit, octave);\n#endif\n#endif\n checkMsg(\"FindPointsMulti() execution failed\\n\");\n return 0.0;\n}\n\n","avg_line_length":35.5581395349,"max_line_length":177,"alphanum_fraction":0.6766950076} {"size":1687,"ext":"cuh","lang":"Cuda","max_stars_count":1.0,"content":"#pragma once\n\n#include \"Common.cuh\"\n#include \"Flags.cuh\"\n#include \"Types.h\"\n\n#include \n\nEXTERN_C\n{\n\tEXPORT int _Sum(double& sum, const MemoryBuffer& v);\n\tEXPORT int _DetermineSumCache(MemoryBuffer& cache, const MemoryBuffer& v, const MemoryBuffer& oneElementCache);\n\tEXPORT int _SumWithProvidedCache(double& sum, const MemoryBuffer& v, MemoryBuffer& cache, MemoryBuffer& outputCache);\n\tinline EXPORT int _SumRaw(double& sum, const ptr_t v, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)\n\t{\n\t\treturn _Sum(sum, MemoryBuffer(v, size, memorySpace, mathDomain));\n\t}\n\n\tEXPORT int _Min(double& min, const MemoryBuffer& x);\n\tinline EXPORT int _MinRaw(double& min, const ptr_t x, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)\n\t{\n\t\treturn _Min(min, MemoryBuffer(x, size, memorySpace, mathDomain));\n\t}\n\n\tEXPORT int _Max(double& max, const MemoryBuffer& x);\n\tinline EXPORT int _MaxRaw(double& max, const ptr_t x, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)\n\t{\n\t\treturn _Max(max, MemoryBuffer(x, size, memorySpace, mathDomain));\n\t}\n\n\tEXPORT int _AbsMin(double& min, const MemoryBuffer& x);\n\tinline EXPORT int _AbsMinRaw(double& min, const ptr_t x, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)\n\t{\n\t\treturn _AbsMin(min, MemoryBuffer(x, size, memorySpace, mathDomain));\n\t}\n\n\tEXPORT int _AbsMax(double& max, const MemoryBuffer& x);\n\tinline EXPORT int _AbsMaxRaw(double& max, const ptr_t x, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)\n\t{\n\t\treturn _AbsMax(max, MemoryBuffer(x, size, memorySpace, mathDomain));\n\t}\n}","avg_line_length":40.1666666667,"max_line_length":138,"alphanum_fraction":0.7688203912} {"size":29781,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \n#include \n#include \"model_gpu_utils.h\"\n\n#include \"mixed_tentusscher_myo_epi_2004_S2_3.h\"\n\nextern \"C\" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) \n{\n\n print_to_stdout_and_file(\"Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\\n\\n\");\n\n \/\/ execution configuration\n const int GRID = (num_volumes + BLOCK_SIZE - 1)\/BLOCK_SIZE;\n\n size_t size = num_volumes*sizeof(real);\n\n check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));\n check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));\n\n \/\/ Get the mapping array\n uint32_t *mapping = NULL;\n uint32_t *mapping_device = NULL;\n if(extra_data) \n {\n mapping = (uint32_t*)extra_data;\n check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));\n check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));\n }\n\n kernel_set_model_inital_conditions <<>>(*sv, mapping_device, num_volumes);\n\n check_cuda_error( cudaPeekAtLastError() );\n cudaDeviceSynchronize();\n \n check_cuda_error(cudaFree(mapping_device));\n\n return pitch_h;\n\n}\n\nextern \"C\" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) \n{\n\n \/\/ execution configuration\n const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)\/BLOCK_SIZE;\n\n size_t stim_currents_size = sizeof(real)*num_cells_to_solve;\n size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;\n\n real *stims_currents_device;\n check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));\n check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));\n\n\n \/\/the array cells to solve is passed when we are using and adapative mesh\n uint32_t *cells_to_solve_device = NULL;\n if(cells_to_solve != NULL) \n {\n check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));\n check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));\n }\n\n \/\/ Get the mapping array\n uint32_t *mapping = NULL;\n uint32_t *mapping_device = NULL;\n if(extra_data) \n {\n mapping = (uint32_t*)extra_data;\n check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));\n check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));\n }\n else \n {\n print_to_stderr_and_file_and_exit(\"You need to specify a mask function when using a mixed model!\\n\");\n }\n\n solve_gpu <<>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);\n\n check_cuda_error( cudaPeekAtLastError() );\n\n check_cuda_error(cudaFree(stims_currents_device));\n if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));\n if(mapping_device) check_cuda_error(cudaFree(mapping_device));\n\n}\n\n__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes) \n{\n int threadID = blockDim.x * blockIdx.x + threadIdx.x;\n\n if (threadID < num_volumes) \n {\n\n \/\/ Initial conditions for TenTusscher 2004 myocardium\n if (mapping[threadID] == 0)\n {\n \/\/ Default initial conditions\n \/*\n *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; \/\/ V; millivolt\n *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; \/\/M\n *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; \/\/H\n *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; \/\/J\n *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; \/\/Xr1\n *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; \/\/Xr2\n *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; \/\/Xs\n *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; \/\/S\n *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; \/\/R\n *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; \/\/D\n *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; \/\/F\n *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; \/\/FCa\n *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; \/\/G\n *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; \/\/Cai\n *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; \/\/CaSR\n *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; \/\/Nai\n *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; \/\/Ki\n *\/\n \/\/ Elnaz's steady-state initial conditions\n real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};\n for (uint32_t i = 0; i < NEQ; i++)\n *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];\n }\n \/\/ Initial conditions for TenTusscher 2004 epicardium\n else\n {\n \/\/ Default initial conditions\n \/*\n *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; \/\/ V; millivolt\n *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; \/\/M\n *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; \/\/H\n *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; \/\/J\n *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; \/\/Xr1\n *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; \/\/Xr2\n *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; \/\/Xs\n *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; \/\/S\n *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; \/\/R\n *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; \/\/D\n *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; \/\/F\n *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; \/\/FCa\n *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; \/\/G\n *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; \/\/Cai\n *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; \/\/CaSR\n *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; \/\/Nai\n *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; \/\/Ki\n *\/\n \/\/ Elnaz's steady-state initial conditions\n real sv_sst[]={-86.6407442866583,0.00127024730863006,0.781477837871060,0.781226285372551,0.000173058844459830,0.485844316142820,0.00292517461971129,0.999998371825952,1.91031873007277e-08,1.87288135192733e-05,0.999773522474666,1.00766286802375,0.999999451356628,3.16576129409975e-05,0.737961690357158,10.2441215797546,139.210514590526}; for (uint32_t i = 0; i < NEQ; i++)\n *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];\n }\n }\n}\n\n\/\/ Solving the model for each cell in the tissue matrix ni x nj\n__global__ void solve_gpu(real dt, real *sv, real* stim_currents,\n uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,\n int num_steps)\n{\n int threadID = blockDim.x * blockIdx.x + threadIdx.x;\n int sv_id;\n\n \/\/ Each thread solves one cell model\n if(threadID < num_cells_to_solve) \n {\n if(cells_to_solve)\n sv_id = cells_to_solve[threadID];\n else\n sv_id = threadID;\n\n real rDY[NEQ];\n\n for (int n = 0; n < num_steps; ++n) \n {\n\n if (mapping[sv_id] == 0)\n {\n RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);\n\n for(int i = 0; i < NEQ; i++) \n {\n *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);\n }\n }\n else\n {\n RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);\n\n for (int i = 0; i < NEQ; i++)\n {\n *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);\n }\n }\n \n }\n }\n}\n\ninline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) \n{\n\n \/\/ State variables\n real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);\n real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);\n real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);\n real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);\n real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);\n real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);\n real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);\n real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);\n real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);\n real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);\n real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);\n real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);\n real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);\n real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);\n real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);\n real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);\n real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);\n\n \/\/External concentrations\n real Ko=5.4;\n real Cao=2.0;\n real Nao=140.0;\n\n \/\/Intracellular volumes\n real Vc=0.016404;\n real Vsr=0.001094;\n\n \/\/Calcium dynamics\n real Bufc=0.15f;\n real Kbufc=0.001f;\n real Bufsr=10.f;\n real Kbufsr=0.3f;\n real taufca=2.f;\n real taug=2.f;\n real Vmaxup=0.000425f;\n real Kup=0.00025f;\n\n \/\/Constants\n const real R = 8314.472f;\n const real F = 96485.3415f;\n const real T =310.0f;\n real RTONF =(R*T)\/F;\n\n \/\/Cellular capacitance \n real CAPACITANCE=0.185;\n\n \/\/Parameters for currents\n \/\/Parameters for IKr\n real Gkr=0.096;\n \/\/Parameters for Iks\n real pKNa=0.03;\n\n\/\/ [!] Myocardium cell\n real Gks=0.062;\n\/\/Parameters for Ik1\n real GK1=5.405;\n\/\/Parameters for Ito\n\/\/ [!] Myocardium cell\n real Gto=0.294;\n\/\/Parameters for INa\n real GNa=14.838;\n\/\/Parameters for IbNa\n real GbNa=0.00029;\n\/\/Parameters for INaK\n real KmK=1.0;\n real KmNa=40.0;\n real knak=1.362;\n\/\/Parameters for ICaL\n real GCaL=0.000175;\n\/\/Parameters for IbCa\n real GbCa=0.000592;\n\/\/Parameters for INaCa\n real knaca=1000;\n real KmNai=87.5;\n real KmCa=1.38;\n real ksat=0.1;\n real n=0.35;\n\/\/Parameters for IpCa\n real GpCa=0.825;\n real KpCa=0.0005;\n\/\/Parameters for IpK;\n real GpK=0.0146;\n\n\n real IKr;\n real IKs;\n real IK1;\n real Ito;\n real INa;\n real IbNa;\n real ICaL;\n real IbCa;\n real INaCa;\n real IpCa;\n real IpK;\n real INaK;\n real Irel;\n real Ileak;\n\n\n real dNai;\n real dKi;\n real dCai;\n real dCaSR;\n\n real A;\n\/\/ real BufferFactorc;\n\/\/ real BufferFactorsr;\n real SERCA;\n real Caisquare;\n real CaSRsquare;\n real CaCurrent;\n real CaSRCurrent;\n\n\n real fcaold;\n real gold;\n real Ek;\n real Ena;\n real Eks;\n real Eca;\n real CaCSQN;\n real bjsr;\n real cjsr;\n real CaBuf;\n real bc;\n real cc;\n real Ak1;\n real Bk1;\n real rec_iK1;\n real rec_ipK;\n real rec_iNaK;\n real AM;\n real BM;\n real AH_1;\n real BH_1;\n real AH_2;\n real BH_2;\n real AJ_1;\n real BJ_1;\n real AJ_2;\n real BJ_2;\n real M_INF;\n real H_INF;\n real J_INF;\n real TAU_M;\n real TAU_H;\n real TAU_J;\n real axr1;\n real bxr1;\n real axr2;\n real bxr2;\n real Xr1_INF;\n real Xr2_INF;\n real TAU_Xr1;\n real TAU_Xr2;\n real Axs;\n real Bxs;\n real Xs_INF;\n real TAU_Xs;\n real R_INF;\n real TAU_R;\n real S_INF;\n real TAU_S;\n real Ad;\n real Bd;\n real Cd;\n real TAU_D;\n real D_INF;\n real TAU_F;\n real F_INF;\n real FCa_INF;\n real G_INF;\n\n real inverseVcF2=1\/(2*Vc*F);\n real inverseVcF=1.\/(Vc*F);\n real Kupsquare=Kup*Kup;\n\/\/ real BufcKbufc=Bufc*Kbufc;\n\/\/ real Kbufcsquare=Kbufc*Kbufc;\n\/\/ real Kbufc2=2*Kbufc;\n\/\/ real BufsrKbufsr=Bufsr*Kbufsr;\n\/\/ const real Kbufsrsquare=Kbufsr*Kbufsr;\n\/\/ const real Kbufsr2=2*Kbufsr;\n const real exptaufca=exp(-dt\/taufca);\n const real exptaug=exp(-dt\/taug);\n\n real sItot;\n\n \/\/Needed to compute currents\n Ek=RTONF*(log((Ko\/Ki)));\n Ena=RTONF*(log((Nao\/Nai)));\n Eks=RTONF*(log((Ko+pKNa*Nao)\/(Ki+pKNa*Nai)));\n Eca=0.5*RTONF*(log((Cao\/Cai)));\n Ak1=0.1\/(1.+exp(0.06*(svolt-Ek-200)));\n Bk1=(3.*exp(0.0002*(svolt-Ek+100))+\n exp(0.1*(svolt-Ek-10)))\/(1.+exp(-0.5*(svolt-Ek)));\n rec_iK1=Ak1\/(Ak1+Bk1);\n rec_iNaK=(1.\/(1.+0.1245*exp(-0.1*svolt*F\/(R*T))+0.0353*exp(-svolt*F\/(R*T))));\n rec_ipK=1.\/(1.+exp((25-svolt)\/5.98));\n\n\n \/\/Compute currents\n INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);\n ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F\/(R*T))*\n (exp(2*svolt*F\/(R*T))*Cai-0.341*Cao)\/(exp(2*svolt*F\/(R*T))-1.);\n Ito=Gto*sr*ss*(svolt-Ek);\n IKr=Gkr*sqrt(Ko\/5.4)*sxr1*sxr2*(svolt-Ek);\n IKs=Gks*sxs*sxs*(svolt-Eks);\n IK1=GK1*rec_iK1*(svolt-Ek);\n INaCa=knaca*(1.\/(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1.\/(KmCa+Cao))*\n (1.\/(1+ksat*exp((n-1)*svolt*F\/(R*T))))*\n (exp(n*svolt*F\/(R*T))*Nai*Nai*Nai*Cao-\n exp((n-1)*svolt*F\/(R*T))*Nao*Nao*Nao*Cai*2.5);\n INaK=knak*(Ko\/(Ko+KmK))*(Nai\/(Nai+KmNa))*rec_iNaK;\n IpCa=GpCa*Cai\/(KpCa+Cai);\n IpK=GpK*rec_ipK*(svolt-Ek);\n IbNa=GbNa*(svolt-Ena);\n IbCa=GbCa*(svolt-Eca);\n\n\n \/\/Determine total current\n (sItot) = IKr +\n IKs +\n IK1 +\n Ito +\n INa +\n IbNa +\n ICaL +\n IbCa +\n INaK +\n INaCa +\n IpCa +\n IpK +\n stim_current;\n\n\n \/\/update concentrations\n Caisquare=Cai*Cai;\n CaSRsquare=CaSR*CaSR;\n CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;\n A=0.016464f*CaSRsquare\/(0.0625f+CaSRsquare)+0.008232f;\n Irel=A*sd*sg;\n Ileak=0.00008f*(CaSR-Cai);\n SERCA=Vmaxup\/(1.f+(Kupsquare\/Caisquare));\n CaSRCurrent=SERCA-Irel-Ileak;\n CaCSQN=Bufsr*CaSR\/(CaSR+Kbufsr);\n dCaSR=dt*(Vc\/Vsr)*CaSRCurrent;\n bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;\n cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);\n CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)\/2.;\n CaBuf=Bufc*Cai\/(Cai+Kbufc);\n dCai=dt*(CaCurrent-CaSRCurrent);\n bc=Bufc-CaBuf-dCai-Cai+Kbufc;\n cc=Kbufc*(CaBuf+dCai+Cai);\n Cai=(sqrt(bc*bc+4*cc)-bc)\/2;\n\n\n\n dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;\n Nai+=dt*dNai;\n\n dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;\n Ki+=dt*dKi;\n\n \/\/compute steady state values and time constants\n AM=1.\/(1.+exp((-60.-svolt)\/5.));\n BM=0.1\/(1.+exp((svolt+35.)\/5.))+0.10\/(1.+exp((svolt-50.)\/200.));\n TAU_M=AM*BM;\n M_INF=1.\/((1.+exp((-56.86-svolt)\/9.03))*(1.+exp((-56.86-svolt)\/9.03)));\n if (svolt>=-40.)\n {\n AH_1=0.;\n BH_1=(0.77\/(0.13*(1.+exp(-(svolt+10.66)\/11.1))));\n TAU_H= 1.0\/(AH_1+BH_1);\n }\n else\n {\n AH_2=(0.057*exp(-(svolt+80.)\/6.8));\n BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));\n TAU_H=1.0\/(AH_2+BH_2);\n }\n H_INF=1.\/((1.+exp((svolt+71.55)\/7.43))*(1.+exp((svolt+71.55)\/7.43)));\n if(svolt>=-40.)\n {\n AJ_1=0.;\n BJ_1=(0.6*exp((0.057)*svolt)\/(1.+exp(-0.1*(svolt+32.))));\n TAU_J= 1.0\/(AJ_1+BJ_1);\n }\n else\n {\n AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*\n exp(-0.04391*svolt))*(svolt+37.78)\/\n (1.+exp(0.311*(svolt+79.23))));\n BJ_2=(0.02424*exp(-0.01052*svolt)\/(1.+exp(-0.1378*(svolt+40.14))));\n TAU_J= 1.0\/(AJ_2+BJ_2);\n }\n J_INF=H_INF;\n\n Xr1_INF=1.\/(1.+exp((-26.-svolt)\/7.));\n axr1=450.\/(1.+exp((-45.-svolt)\/10.));\n bxr1=6.\/(1.+exp((svolt-(-30.))\/11.5));\n TAU_Xr1=axr1*bxr1;\n Xr2_INF=1.\/(1.+exp((svolt-(-88.))\/24.));\n axr2=3.\/(1.+exp((-60.-svolt)\/20.));\n bxr2=1.12\/(1.+exp((svolt-60.)\/20.));\n TAU_Xr2=axr2*bxr2;\n\n Xs_INF=1.\/(1.+exp((-5.-svolt)\/14.));\n Axs=1100.\/(sqrt(1.+exp((-10.-svolt)\/6)));\n Bxs=1.\/(1.+exp((svolt-60.)\/20.));\n TAU_Xs=Axs*Bxs;\n\n\/\/ [!] Myocardium cell\n R_INF=1.\/(1.+exp((20-svolt)\/6.));\n S_INF=1.\/(1.+exp((svolt+20)\/5.));\n TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)\/1800.)+0.8;\n TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)\/320.)+5.\/(1.+exp((svolt-20.)\/5.))+3.;\n\n\n D_INF=1.\/(1.+exp((-5-svolt)\/7.5));\n Ad=1.4\/(1.+exp((-35-svolt)\/13))+0.25;\n Bd=1.4\/(1.+exp((svolt+5)\/5));\n Cd=1.\/(1.+exp((50-svolt)\/20));\n TAU_D=Ad*Bd+Cd;\n F_INF=1.\/(1.+exp((svolt+20)\/7));\n \/\/TAU_F=1125*exp(-(svolt+27)*(svolt+27)\/300)+80+165\/(1.+exp((25-svolt)\/10));\n TAU_F=1125*exp(-(svolt+27)*(svolt+27)\/240)+80+165\/(1.+exp((25-svolt)\/10)); \/\/ Updated from CellML\n\n\n FCa_INF=(1.\/(1.+pow((Cai\/0.000325),8))+\n 0.1\/(1.+exp((Cai-0.0005)\/0.0001))+\n 0.20\/(1.+exp((Cai-0.00075)\/0.0008))+\n 0.23 )\/1.46;\n if(Cai<0.00035)\n G_INF=1.\/(1.+pow((Cai\/0.00035),6));\n else\n G_INF=1.\/(1.+pow((Cai\/0.00035),16));\n\n \/\/Update gates\n rDY_[1] = M_INF-(M_INF-sm)*exp(-dt\/TAU_M);\n rDY_[2] = H_INF-(H_INF-sh)*exp(-dt\/TAU_H);\n rDY_[3] = J_INF-(J_INF-sj)*exp(-dt\/TAU_J);\n rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt\/TAU_Xr1);\n rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt\/TAU_Xr2);\n rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt\/TAU_Xs);\n rDY_[7] = S_INF-(S_INF-ss)*exp(-dt\/TAU_S);\n rDY_[8] = R_INF-(R_INF-sr)*exp(-dt\/TAU_R);\n rDY_[9] = D_INF-(D_INF-sd)*exp(-dt\/TAU_D);\n rDY_[10] = F_INF-(F_INF-sf)*exp(-dt\/TAU_F);\n fcaold= sfca;\n sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;\n if(sfca>fcaold && (svolt)>-37.0)\n sfca = fcaold;\n gold = sg;\n sg = G_INF-(G_INF-sg)*exptaug;\n\n if(sg>gold && (svolt)>-37.0)\n sg=gold;\n\n \/\/update voltage\n rDY_[0] = svolt + dt*(-sItot);\n rDY_[11] = sfca;\n rDY_[12] = sg;\n rDY_[13] = Cai;\n rDY_[14] = CaSR;\n rDY_[15] = Nai;\n rDY_[16] = Ki;\n\n}\n\ninline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)\n{\n \/\/ State variables\n real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);\n real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);\n real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);\n real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);\n real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);\n real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);\n real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);\n real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);\n real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);\n real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);\n real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);\n real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);\n real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);\n real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);\n real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);\n real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);\n real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);\n\n \/\/External concentrations\n real Ko=5.4;\n real Cao=2.0;\n real Nao=140.0;\n\n \/\/Intracellular volumes\n real Vc=0.016404;\n real Vsr=0.001094;\n\n \/\/Calcium dynamics\n real Bufc=0.15f;\n real Kbufc=0.001f;\n real Bufsr=10.f;\n real Kbufsr=0.3f;\n real taufca=2.f;\n real taug=2.f;\n real Vmaxup=0.000425f;\n real Kup=0.00025f;\n\n \/\/Constants\n const real R = 8314.472f;\n const real F = 96485.3415f;\n const real T =310.0f;\n real RTONF =(R*T)\/F;\n\n \/\/Cellular capacitance \n real CAPACITANCE=0.185;\n\n \/\/Parameters for currents\n \/\/Parameters for IKr\n real Gkr=0.096;\n \/\/Parameters for Iks\n real pKNa=0.03;\n \/\/ [!] Epicardium cell\n real Gks=0.245;\n \/\/Parameters for Ik1\n real GK1=5.405;\n \/\/Parameters for Ito\n\/\/ [!] Epicardium cell\n real Gto=0.294;\n\/\/Parameters for INa\n real GNa=14.838;\n\/\/Parameters for IbNa\n real GbNa=0.00029;\n\/\/Parameters for INaK\n real KmK=1.0;\n real KmNa=40.0;\n real knak=1.362;\n\/\/Parameters for ICaL\n real GCaL=0.000175;\n\/\/Parameters for IbCa\n real GbCa=0.000592;\n\/\/Parameters for INaCa\n real knaca=1000;\n real KmNai=87.5;\n real KmCa=1.38;\n real ksat=0.1;\n real n=0.35;\n\/\/Parameters for IpCa\n real GpCa=0.825;\n real KpCa=0.0005;\n\/\/Parameters for IpK;\n real GpK=0.0146;\n\n real parameters []={14.5383636643555,0.000359007183612285,0.000154135859579797,0.000217532604523131,0.265156052763393,0.186639850277223,0.149365610424309,3.43320580539409,0.0166941723782826,1.45123160724562,1094.13527370174,0.000494385096732911,0.269171393030809,0.0183256017779276,0.00468024174172971,1.50869252254344e-05};\n GNa=parameters[0];\n GbNa=parameters[1];\n GCaL=parameters[2];\n GbCa=parameters[3];\n Gto=parameters[4];\n Gkr=parameters[5];\n Gks=parameters[6];\n GK1=parameters[7];\n GpK=parameters[8];\n knak=parameters[9];\n knaca=parameters[10];\n Vmaxup=parameters[11];\n GpCa=parameters[12];\n real arel=parameters[13];\n real crel=parameters[14];\n real Vleak=parameters[15];\n\n real IKr;\n real IKs;\n real IK1;\n real Ito;\n real INa;\n real IbNa;\n real ICaL;\n real IbCa;\n real INaCa;\n real IpCa;\n real IpK;\n real INaK;\n real Irel;\n real Ileak;\n\n\n real dNai;\n real dKi;\n real dCai;\n real dCaSR;\n\n real A;\n\/\/ real BufferFactorc;\n\/\/ real BufferFactorsr;\n real SERCA;\n real Caisquare;\n real CaSRsquare;\n real CaCurrent;\n real CaSRCurrent;\n\n\n real fcaold;\n real gold;\n real Ek;\n real Ena;\n real Eks;\n real Eca;\n real CaCSQN;\n real bjsr;\n real cjsr;\n real CaBuf;\n real bc;\n real cc;\n real Ak1;\n real Bk1;\n real rec_iK1;\n real rec_ipK;\n real rec_iNaK;\n real AM;\n real BM;\n real AH_1;\n real BH_1;\n real AH_2;\n real BH_2;\n real AJ_1;\n real BJ_1;\n real AJ_2;\n real BJ_2;\n real M_INF;\n real H_INF;\n real J_INF;\n real TAU_M;\n real TAU_H;\n real TAU_J;\n real axr1;\n real bxr1;\n real axr2;\n real bxr2;\n real Xr1_INF;\n real Xr2_INF;\n real TAU_Xr1;\n real TAU_Xr2;\n real Axs;\n real Bxs;\n real Xs_INF;\n real TAU_Xs;\n real R_INF;\n real TAU_R;\n real S_INF;\n real TAU_S;\n real Ad;\n real Bd;\n real Cd;\n real TAU_D;\n real D_INF;\n real TAU_F;\n real F_INF;\n real FCa_INF;\n real G_INF;\n\n real inverseVcF2=1\/(2*Vc*F);\n real inverseVcF=1.\/(Vc*F);\n real Kupsquare=Kup*Kup;\n\/\/ real BufcKbufc=Bufc*Kbufc;\n\/\/ real Kbufcsquare=Kbufc*Kbufc;\n\/\/ real Kbufc2=2*Kbufc;\n\/\/ real BufsrKbufsr=Bufsr*Kbufsr;\n\/\/ const real Kbufsrsquare=Kbufsr*Kbufsr;\n\/\/ const real Kbufsr2=2*Kbufsr;\n const real exptaufca=exp(-dt\/taufca);\n const real exptaug=exp(-dt\/taug);\n\n real sItot;\n\n \/\/Needed to compute currents\n Ek=RTONF*(log((Ko\/Ki)));\n Ena=RTONF*(log((Nao\/Nai)));\n Eks=RTONF*(log((Ko+pKNa*Nao)\/(Ki+pKNa*Nai)));\n Eca=0.5*RTONF*(log((Cao\/Cai)));\n Ak1=0.1\/(1.+exp(0.06*(svolt-Ek-200)));\n Bk1=(3.*exp(0.0002*(svolt-Ek+100))+\n exp(0.1*(svolt-Ek-10)))\/(1.+exp(-0.5*(svolt-Ek)));\n rec_iK1=Ak1\/(Ak1+Bk1);\n rec_iNaK=(1.\/(1.+0.1245*exp(-0.1*svolt*F\/(R*T))+0.0353*exp(-svolt*F\/(R*T))));\n rec_ipK=1.\/(1.+exp((25-svolt)\/5.98));\n\n\n \/\/Compute currents\n INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);\n ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F\/(R*T))*\n (exp(2*svolt*F\/(R*T))*Cai-0.341*Cao)\/(exp(2*svolt*F\/(R*T))-1.);\n Ito=Gto*sr*ss*(svolt-Ek);\n IKr=Gkr*sqrt(Ko\/5.4)*sxr1*sxr2*(svolt-Ek);\n IKs=Gks*sxs*sxs*(svolt-Eks);\n IK1=GK1*rec_iK1*(svolt-Ek);\n INaCa=knaca*(1.\/(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1.\/(KmCa+Cao))*\n (1.\/(1+ksat*exp((n-1)*svolt*F\/(R*T))))*\n (exp(n*svolt*F\/(R*T))*Nai*Nai*Nai*Cao-\n exp((n-1)*svolt*F\/(R*T))*Nao*Nao*Nao*Cai*2.5);\n INaK=knak*(Ko\/(Ko+KmK))*(Nai\/(Nai+KmNa))*rec_iNaK;\n IpCa=GpCa*Cai\/(KpCa+Cai);\n IpK=GpK*rec_ipK*(svolt-Ek);\n IbNa=GbNa*(svolt-Ena);\n IbCa=GbCa*(svolt-Eca);\n\n\n \/\/Determine total current\n (sItot) = IKr +\n IKs +\n IK1 +\n Ito +\n INa +\n IbNa +\n ICaL +\n IbCa +\n INaK +\n INaCa +\n IpCa +\n IpK +\n stim_current;\n\n\n \/\/update concentrations\n Caisquare=Cai*Cai;\n CaSRsquare=CaSR*CaSR;\n CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;\n A=arel*CaSRsquare\/(0.0625f+CaSRsquare)+crel;\n Irel=A*sd*sg;\n Ileak=Vleak*(CaSR-Cai);\n SERCA=Vmaxup\/(1.f+(Kupsquare\/Caisquare));\n CaSRCurrent=SERCA-Irel-Ileak;\n CaCSQN=Bufsr*CaSR\/(CaSR+Kbufsr);\n dCaSR=dt*(Vc\/Vsr)*CaSRCurrent;\n bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;\n cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);\n CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)\/2.;\n CaBuf=Bufc*Cai\/(Cai+Kbufc);\n dCai=dt*(CaCurrent-CaSRCurrent);\n bc=Bufc-CaBuf-dCai-Cai+Kbufc;\n cc=Kbufc*(CaBuf+dCai+Cai);\n Cai=(sqrt(bc*bc+4*cc)-bc)\/2;\n\n\n\n dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;\n Nai+=dt*dNai;\n\n dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;\n Ki+=dt*dKi;\n\n \/\/compute steady state values and time constants\n AM=1.\/(1.+exp((-60.-svolt)\/5.));\n BM=0.1\/(1.+exp((svolt+35.)\/5.))+0.10\/(1.+exp((svolt-50.)\/200.));\n TAU_M=AM*BM;\n M_INF=1.\/((1.+exp((-56.86-svolt)\/9.03))*(1.+exp((-56.86-svolt)\/9.03)));\n if (svolt>=-40.)\n {\n AH_1=0.;\n BH_1=(0.77\/(0.13*(1.+exp(-(svolt+10.66)\/11.1))));\n TAU_H= 1.0\/(AH_1+BH_1);\n }\n else\n {\n AH_2=(0.057*exp(-(svolt+80.)\/6.8));\n BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));\n TAU_H=1.0\/(AH_2+BH_2);\n }\n H_INF=1.\/((1.+exp((svolt+71.55)\/7.43))*(1.+exp((svolt+71.55)\/7.43)));\n if(svolt>=-40.)\n {\n AJ_1=0.;\n BJ_1=(0.6*exp((0.057)*svolt)\/(1.+exp(-0.1*(svolt+32.))));\n TAU_J= 1.0\/(AJ_1+BJ_1);\n }\n else\n {\n AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*\n exp(-0.04391*svolt))*(svolt+37.78)\/\n (1.+exp(0.311*(svolt+79.23))));\n BJ_2=(0.02424*exp(-0.01052*svolt)\/(1.+exp(-0.1378*(svolt+40.14))));\n TAU_J= 1.0\/(AJ_2+BJ_2);\n }\n J_INF=H_INF;\n\n Xr1_INF=1.\/(1.+exp((-26.-svolt)\/7.));\n axr1=450.\/(1.+exp((-45.-svolt)\/10.));\n bxr1=6.\/(1.+exp((svolt-(-30.))\/11.5));\n TAU_Xr1=axr1*bxr1;\n Xr2_INF=1.\/(1.+exp((svolt-(-88.))\/24.));\n axr2=3.\/(1.+exp((-60.-svolt)\/20.));\n bxr2=1.12\/(1.+exp((svolt-60.)\/20.));\n TAU_Xr2=axr2*bxr2;\n\n Xs_INF=1.\/(1.+exp((-5.-svolt)\/14.));\n Axs=1100.\/(sqrt(1.+exp((-10.-svolt)\/6)));\n Bxs=1.\/(1.+exp((svolt-60.)\/20.));\n TAU_Xs=Axs*Bxs;\n\n R_INF=1.\/(1.+exp((20-svolt)\/6.));\n S_INF=1.\/(1.+exp((svolt+20)\/5.));\n TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)\/1800.)+0.8;\n TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)\/320.)+5.\/(1.+exp((svolt-20.)\/5.))+3.;\n\n\n D_INF=1.\/(1.+exp((-5-svolt)\/7.5));\n Ad=1.4\/(1.+exp((-35-svolt)\/13))+0.25;\n Bd=1.4\/(1.+exp((svolt+5)\/5));\n Cd=1.\/(1.+exp((50-svolt)\/20));\n TAU_D=Ad*Bd+Cd;\n F_INF=1.\/(1.+exp((svolt+20)\/7));\n \/\/TAU_F=1125*exp(-(svolt+27)*(svolt+27)\/300)+80+165\/(1.+exp((25-svolt)\/10));\n TAU_F=1125*exp(-(svolt+27)*(svolt+27)\/240)+80+165\/(1.+exp((25-svolt)\/10)); \/\/ Updated from CellML\n\n\n FCa_INF=(1.\/(1.+pow((Cai\/0.000325),8))+\n 0.1\/(1.+exp((Cai-0.0005)\/0.0001))+\n 0.20\/(1.+exp((Cai-0.00075)\/0.0008))+\n 0.23 )\/1.46;\n if(Cai<0.00035)\n G_INF=1.\/(1.+pow((Cai\/0.00035),6));\n else\n G_INF=1.\/(1.+pow((Cai\/0.00035),16));\n\n \/\/Update gates\n rDY_[1] = M_INF-(M_INF-sm)*exp(-dt\/TAU_M);\n rDY_[2] = H_INF-(H_INF-sh)*exp(-dt\/TAU_H);\n rDY_[3] = J_INF-(J_INF-sj)*exp(-dt\/TAU_J);\n rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt\/TAU_Xr1);\n rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt\/TAU_Xr2);\n rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt\/TAU_Xs);\n rDY_[7] = S_INF-(S_INF-ss)*exp(-dt\/TAU_S);\n rDY_[8] = R_INF-(R_INF-sr)*exp(-dt\/TAU_R);\n rDY_[9] = D_INF-(D_INF-sd)*exp(-dt\/TAU_D);\n rDY_[10] = F_INF-(F_INF-sf)*exp(-dt\/TAU_F);\n fcaold= sfca;\n sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;\n if(sfca>fcaold && (svolt)>-37.0)\n sfca = fcaold;\n gold = sg;\n sg = G_INF-(G_INF-sg)*exptaug;\n\n if(sg>gold && (svolt)>-37.0)\n sg=gold;\n\n \/\/update voltage\n rDY_[0] = svolt + dt*(-sItot);\n rDY_[11] = sfca;\n rDY_[12] = sg;\n rDY_[13] = Cai;\n rDY_[14] = CaSR;\n rDY_[15] = Nai;\n rDY_[16] = Ki; \n\n}\n\n","avg_line_length":31.0866388309,"max_line_length":393,"alphanum_fraction":0.5501494241} {"size":1375,"ext":"cu","lang":"Cuda","max_stars_count":3.0,"content":"\n\/* This is a automatically generated test. Do not modify *\/\n\n#include \n#include \n#include \n\n__global__\nvoid compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10) {\nfloat tmp_1 = +1.3526E-43f;\ncomp = tmp_1 - (-1.6277E-35f * +0.0f \/ var_1);\nif (comp >= -1.4369E-43f \/ var_2 * +0.0f - -1.1795E-27f - (var_3 + var_4)) {\n comp += (-1.3787E7f + var_5);\n}\nif (comp > (+1.7559E3f \/ var_6 * var_7)) {\n comp += sinhf((var_8 + -0.0f * -1.1070E-30f * var_9 * var_10));\ncomp = (+1.8618E34f \/ (+0.0f - -1.1786E-37f + +1.3612E35f * +1.0141E35f + -1.5017E19f));\n}\n printf(\"%.17g\\n\", comp);\n\n}\n\nfloat* initPointer(float v) {\n float *ret = (float*) malloc(sizeof(float)*10);\n for(int i=0; i < 10; ++i)\n ret[i] = v;\n return ret;\n}\n\nint main(int argc, char** argv) {\n\/* Program variables *\/\n\n float tmp_1 = atof(argv[1]);\n float tmp_2 = atof(argv[2]);\n float tmp_3 = atof(argv[3]);\n float tmp_4 = atof(argv[4]);\n float tmp_5 = atof(argv[5]);\n float tmp_6 = atof(argv[6]);\n float tmp_7 = atof(argv[7]);\n float tmp_8 = atof(argv[8]);\n float tmp_9 = atof(argv[9]);\n float tmp_10 = atof(argv[10]);\n float tmp_11 = atof(argv[11]);\n\n compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);\n cudaDeviceSynchronize();\n\n return 0;\n}\n","avg_line_length":27.5,"max_line_length":148,"alphanum_fraction":0.6327272727} {"size":2487,"ext":"cuh","lang":"Cuda","max_stars_count":5168.0,"content":"\/**\n * \\file dnn\/src\/cuda\/convolution3d\/chanwise\/kern.cuh\n * MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n *\n * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n *\/\n#pragma once\n\n#include \"src\/cuda\/utils.cuh\"\n\n#include \n#include \n\n#if MEGDNN_CC_HOST\n#include \"src\/cuda\/convolution3d\/helper.h\"\n#endif\n\nnamespace megdnn {\nnamespace cuda {\nnamespace convolution3d {\nnamespace chanwise {\n\nstruct Param {\n uint32_t batch, src_chl, src_d, src_h, src_w, chl_mul, flt_d, flt_h, flt_w, out_d,\n out_h, out_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d,\n dilation_h, dilation_w;\n#if MEGDNN_CC_HOST\n static Param from_fwd_args(const ForwardSizeArgs& args) {\n#define U(v) static_cast(v)\n auto&& src = args.src_layout->shape;\n auto&& dst = args.dst_layout->shape;\n auto&& fm = args.filter_meta;\n size_t c_pos, hw_pos;\n if (fm.format == param::Convolution3D::Format::NCDHW) {\n c_pos = 1;\n hw_pos = 2;\n } else { \/\/ NDHWC\n c_pos = 4;\n hw_pos = 1;\n }\n return {\n U(src[0]), U(src[c_pos]), U(src[hw_pos]),\n U(src[hw_pos + 1]), U(src[hw_pos + 2]), U(fm.ocpg),\n U(fm.spatial[0]), U(fm.spatial[1]), U(fm.spatial[2]),\n U(dst[hw_pos]), U(dst[hw_pos + 1]), U(dst[hw_pos + 2]),\n U(fm.padding[0]), U(fm.padding[1]), U(fm.padding[2]),\n U(fm.stride[0]), U(fm.stride[1]), U(fm.stride[2]),\n U(fm.dilation[0]), U(fm.dilation[1]), U(fm.dilation[2]),\n };\n#undef U\n }\n#endif\n};\n\ntemplate \nvoid run_fwd(\n T* dst, const T* src, const T* flt, const Param& param, cudaStream_t stream);\n\ntemplate \nvoid run_bwd_data(\n T* src_grad, const T* dst_grad, const T* flt, const Param& param,\n cudaStream_t stream);\n\ntemplate \nvoid run_bwd_filter(\n T* filter_grad, const T* src, const T* dst_grad, const Param& param,\n cudaStream_t stream);\n\n} \/\/ namespace chanwise\n} \/\/ namespace convolution3d\n} \/\/ namespace cuda\n} \/\/ namespace megdnn\n\n\/\/ vim: ft=cpp syntax=cpp.doxygen\n","avg_line_length":31.4810126582,"max_line_length":89,"alphanum_fraction":0.6099718536} {"size":5657,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"\/* NiuTrans.Tensor - an open-source tensor library\n * Copyright (C) 2017, Natural Language Processing Lab, Northestern University.\n * All rights reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n\/*\n * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-11-27\n *\/\n\n#include \"Gather.cuh\"\n#include \"CopyBlocksSelected.cuh\"\n#include \"..\/..\/XDevice.h\"\n#include \"..\/..\/XUtility.h\"\n\nnamespace nts { \/\/ namespace nts(NiuTrans.Tensor)\n\n#ifdef USE_CUDA\n\n\/*\ngather indexed sub-tensors(cuda version)\n\n>> sData - the data pointer of the source tensor\n>> tData - the data pointer of the target tensor\n>> sIndex - the index of the source tensor\n>> indexSize - the size of the srcIndex\n>> stride - stride of a data block\n*\/\n__global__\nvoid KernelGather(DTYPE * sData, DTYPE * tData, int * sIndex, int indexSize, int stride)\n{\n __shared__ DTYPE * sp[MAX_CUDA_THREAD_NUM_PER_BLOCK];\n __shared__ DTYPE * tp[MAX_CUDA_THREAD_NUM_PER_BLOCK];\n\n \/* block id *\/\n int i = blockDim.x * blockIdx.x + threadIdx.x;\n\n \/* offset in each block *\/\n int offset = blockDim.y * blockIdx.y + threadIdx.y;\n\n if(i >= indexSize || offset >= stride)\n return;\n\n if(threadIdx.y == 0){\n sp[threadIdx.x] = sData + sIndex[i] * stride;\n tp[threadIdx.x] = tData + i * stride;\n }\n\n __syncthreads();\n\n DTYPE * s = sp[threadIdx.x];\n DTYPE * t = tp[threadIdx.x];\n\n t[offset] = s[offset];\n}\n\n\/*\ngather indexed sub-tensors(cuda version)\n\n>> sData - the data pointer of the source tensor\n>> tData - the data pointer of the target tensor\n>> sIndex - the index of the source tensor\n>> indexSize - the size of the srcIndex\n>> stride - stride of a data block\n>> strideNum - strideNum of a data block\n>> blockNum - block size of data\n*\/\n__global__\nvoid KernelGather(DTYPE * sData, DTYPE * tData, int * sIndex, int stride, int strideNum, int blockNum)\n{\n int idx = blockDim.x * blockIdx.x + threadIdx.x;\n int idy = blockDim.y * blockIdx.y + threadIdx.y;\n int blockIndex = idy \/ stride;\n int offsetInBlock = idy % stride;\n\n int size = stride * strideNum * blockNum; \n\n#pragma unroll\n for (int i = idx * stride + stride * strideNum * blockIndex + offsetInBlock;\n i < stride * strideNum * blockIndex + offsetInBlock + stride * strideNum && i < size;\n i += stride * blockDim.x) {\n tData[i] = sData[sIndex[i]];\n }\n}\n\n\/*\ngather indexed sub-tensors(cuda version)\n\n>> s - the source tensor\n>> t - the target tensor\n>> srcIndex - the tensor to save the index of the source tensor\n*\/\nvoid _CudaGather(const XTensor * s, XTensor * t, XTensor * srcIndex)\n{\n int devID = s->devID;\n XMem * mem = s->mem;\n\n int stride = s->GetDim(1);\n int indexSize = srcIndex->unitNum;\n\n int cudaGrids[3];\n int cudaBlocks[3];\n\n int devIDBackup;\n ProtectCudaDev(devID, devIDBackup);\n\n GDevs.GetCudaThread2D(devID, indexSize, stride, MAX_INT, cudaGrids, cudaBlocks);\n\n dim3 blocks(cudaGrids[0], cudaGrids[1]);\n dim3 threads(cudaBlocks[0], cudaBlocks[1]);\n\n DTYPE * sData = (DTYPE*)s->data;\n DTYPE * tData = (DTYPE*)t->data;\n\n int * sIndex = NULL;\n \n if (srcIndex->devID < 0) {\n sIndex = mem != NULL ? \n (int*)mem->AllocBuf(mem->devID, sizeof(int) * indexSize) : \n (int*)XMemAlloc(mem->devID, sizeof(int) * indexSize);\n XMemCopy(sIndex, devID, srcIndex, -1, sizeof(int) * indexSize);\n }\n else\n sIndex = (int *)srcIndex->data;\n\n KernelGather<<>>(sData, tData, sIndex, indexSize, stride);\n\n if (srcIndex->devID < 0) {\n if(mem != NULL)\n mem->ReleaseBuf(mem->devID, sizeof(int) * indexSize);\n else\n XMemFree(mem->devID, sIndex);\n }\n\n BacktoCudaDev(devID, devIDBackup);\n}\n\n\/*\ngather indexed sub-tensors(cuda version)\n\n>> s - the source tensor\n>> t - the target tensor\n>> srcIndex - the tensor to save the index of the source tensor\n>> dim - the leading dimension to define \"sub-tensors\"\n*\/\nvoid _CudaGather(const XTensor * s, XTensor * t, XTensor * srcIndex, int dim)\n{\n int devID = srcIndex->devID;\n XMem * mem = s->mem;\n\n int stride = 1;\n int blockNum = 1;\n int indexSize = srcIndex->unitNum;\n int strideNum = srcIndex->dimSize[dim];\n for (int i = 0; i < dim; i++)\n blockNum *= srcIndex->dimSize[i];\n for (int i = dim + 1; i < srcIndex->order; i++)\n stride *= srcIndex->dimSize[i];\n\n int * sIndex = NULL;\n if (srcIndex->devID < 0) {\n sIndex = mem != NULL ?\n (int*)mem->AllocBuf(mem->devID, sizeof(int) * indexSize) :\n (int*)XMemAlloc(mem->devID, sizeof(int) * indexSize);\n XMemCopy(sIndex, devID, srcIndex, -1, sizeof(int) * indexSize);\n }\n else\n sIndex = (int *)srcIndex->data;\n\n int cudaGrids[3];\n int cudaBlocks[3];\n GDevs.GetCudaThread2D(devID, max(32, strideNum), stride*blockNum, MAX_INT, cudaGrids, cudaBlocks);\n\n KernelGather << > > ((DTYPE *)s->data, (DTYPE *)t->data, sIndex, stride, strideNum, blockNum);\n}\n#endif \/\/ USE_CUDA\n\n} \/\/ namespace nts(NiuTrans.Tensor)","avg_line_length":29.9312169312,"max_line_length":169,"alphanum_fraction":0.6452183136} {"size":3463,"ext":"cu","lang":"Cuda","max_stars_count":130.0,"content":"\/*\n * Copyright (c) 2021, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \"HugeCTR\/include\/embeddings\/sparse_embedding_functors.hpp\"\n\nnamespace HugeCTR {\n\nnamespace {\n\n\/\/ for one-hot, the value_index mapping is linear (no need to use hashtable)\ntemplate \n__global__ void hash_key_value_index_mapping_kernel(size_t nnz, int slot_num,\n const uint32_t *mapping_offsets,\n const TypeKey *hash_key,\n size_t *hash_value_index) {\n size_t gid = blockIdx.x * blockDim.x + threadIdx.x;\n if (gid < nnz) {\n int slot_id = gid % slot_num;\n hash_value_index[gid] = hash_key[gid] - mapping_offsets[slot_id];\n }\n}\n\n} \/\/ namespace\n\n\/**\n * forward propagation on each GPU for LocalizedSlotSparseEmbeddingOneHot.\n * Because there is no hashtable in this class, so there must be a mapping table\n * between input valud_index and local value_index.\n * @param batch_size batch size for the current mini-batch computation.\n * @param slot_num the number of slots for current GPU\n * @param row_offset row_offset (CSR format of input sparse tensors)\n * @param hash_key value (CSR format of input sparse tensors)\n * @param nnz non-zero feature number per batch\n * @param mapping_offsets the mapping between input value_index and local value_index\n * @param hash_value_index hash table value_index(row index of embedding)\n * @param stream cuda stream\n *\/\ntemplate \nvoid SparseEmbeddingFunctors::forward_mapping_per_gpu(size_t batch_size, size_t slot_num,\n const Tensor2 &hash_key,\n size_t nnz,\n const Tensor2 &mapping_offsets,\n Tensor2 &hash_value_index,\n cudaStream_t stream) {\n \/\/ remove hashtable get_insert(), and do linear mapping between key and value_index\n if (nnz > 0) {\n hash_key_value_index_mapping_kernel<<<(nnz + 255) \/ 256, 256, 0, stream>>>(\n nnz, slot_num, mapping_offsets.get_ptr(), hash_key.get_ptr(), hash_value_index.get_ptr());\n }\n\n return;\n}\n\ntemplate void SparseEmbeddingFunctors::forward_mapping_per_gpu(\n size_t batch_size, size_t slot_num, const Tensor2 &hash_key, size_t nnz,\n const Tensor2 &mapping_offsets, Tensor2 &hash_value_index,\n cudaStream_t stream);\n\ntemplate void SparseEmbeddingFunctors::forward_mapping_per_gpu(\n size_t batch_size, size_t slot_num, const Tensor2 &hash_key, size_t nnz,\n const Tensor2 &mapping_offsets, Tensor2 &hash_value_index,\n cudaStream_t stream);\n\n} \/\/ namespace HugeCTR\n","avg_line_length":44.3974358974,"max_line_length":98,"alphanum_fraction":0.6699393589} {"size":1803,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \"plugin\/device\/gpu\/kernel\/cuda_impl\/uniform_candidate_sampler_impl.cuh\"\n\ntemplate \n__global__ void AssignToOutput(const int64_t size, const S prob_val, S *output_array) {\n for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {\n output_array[pos] = prob_val;\n }\n}\n\ntemplate \nvoid CalUniformCandidateSampler(const int64_t true_size, const int64_t num_sampled, const S prob_val,\n S *true_expected_count, S *sampled_expected_count, cudaStream_t cuda_stream) {\n AssignToOutput<<>>(true_size, prob_val, true_expected_count);\n AssignToOutput<<>>(num_sampled, prob_val,\n sampled_expected_count);\n}\n\ntemplate void CalUniformCandidateSampler(const int64_t true_size, const int64_t num_sampled,\n const float prob_val, float *true_expected_count,\n float *sampled_expected_count, cudaStream_t cuda_stream);\n","avg_line_length":48.7297297297,"max_line_length":115,"alphanum_fraction":0.6882972823} {"size":7159,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"\/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License. *\/\n\n#include \"paddle\/fluid\/operators\/elementwise\/elementwise_op_broadcast.cu.h\"\n#include \"paddle\/fluid\/operators\/elementwise\/elementwise_sub_op.h\"\n#include \"paddle\/fluid\/operators\/reduce_ops\/reduce_op.cu.h\"\n#include \"paddle\/fluid\/platform\/complex.h\"\n#include \"paddle\/fluid\/platform\/float16.h\"\n\nnamespace ops = paddle::operators;\nnamespace plat = paddle::platform;\n\nnamespace paddle {\nnamespace operators {\n\ntemplate \nstatic __global__ void SimpleElemwiseSubGradCUDAKernel(const T* dout,\n int64_t size, T* dx,\n T* dy) {\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n while (col < size) {\n if (dx != nullptr) {\n dx[col] = dout[col];\n }\n dy[col] = -dout[col];\n col += blockDim.x * gridDim.x;\n }\n}\n\ntemplate \ntypename std::enable_if<\n std::is_same::value>::type\ndefault_elementwise_sub_grad(const framework::ExecutionContext& ctx,\n const framework::Tensor* x,\n const framework::Tensor* y,\n const framework::Tensor* out,\n const framework::Tensor* dout,\n framework::Tensor* dx, framework::Tensor* dy) {\n int axis = ctx.Attr(\"axis\");\n auto* dout_data = dout->data();\n \/\/ dx\n if (dx != nullptr) {\n auto* dx_data = dx->mutable_data(ctx.GetPlace());\n if (dx->dims() == dout->dims()) {\n if (dx_data != dout_data) {\n framework::TensorCopy(\n *dout, ctx.GetPlace(),\n ctx.template device_context(), dx);\n }\n } else {\n \/\/ For inplace strategy, dx will be stored in addr of dout, which makes\n \/\/ the result of dy wrong.\n if (dx->IsSharedBufferWith(*dout)) {\n dx->clear();\n dx->mutable_data(x->dims(), ctx.GetPlace());\n }\n std::vector reduce_dims = GetReduceDim(x->dims(), out->dims(), axis);\n gpuStream_t stream = ctx.cuda_device_context().stream();\n TensorReduceFunctorImpl>(\n *dout, dx, kps::IdentityFunctor(), reduce_dims, stream);\n }\n }\n \/\/ dy\n if (dy != nullptr) {\n auto* dy_data = dy->mutable_data(ctx.GetPlace());\n if (dy->dims() == dout->dims()) {\n if (dy_data != dout_data) {\n dim3 block_size = dim3(ELEMENTWISE_BLOCK_SIZE, 1);\n auto size = dy->numel();\n dim3 grid_size = dim3(\n (size + ELEMENTWISE_BLOCK_SIZE - 1) \/ ELEMENTWISE_BLOCK_SIZE, 1);\n SimpleElemwiseSubGradCUDAKernel<<<\n grid_size, block_size, 0,\n ctx.template device_context().stream()>>>(\n dout->data(), size, nullptr,\n dy->mutable_data(ctx.GetPlace()));\n }\n } else {\n std::vector reduce_dims = GetReduceDim(y->dims(), out->dims(), axis);\n gpuStream_t stream = ctx.cuda_device_context().stream();\n TensorReduceFunctorImpl>(\n *dout, dy, kps::InverseFunctor(), reduce_dims, stream);\n }\n }\n}\n\ntemplate \ntypename std::enable_if<\n std::is_same::value>::type\nelementwise_sub_grad(const framework::ExecutionContext& ctx,\n const framework::Tensor* x, const framework::Tensor* y,\n const framework::Tensor* out,\n const framework::Tensor* dout, framework::Tensor* dx,\n framework::Tensor* dy) {\n dim3 block_size = dim3(ELEMENTWISE_BLOCK_SIZE, 1);\n auto size = x->numel();\n dim3 grid_size =\n dim3((size + ELEMENTWISE_BLOCK_SIZE - 1) \/ ELEMENTWISE_BLOCK_SIZE, 1);\n SimpleElemwiseSubGradCUDAKernel<\n T><<().stream()>>>(\n dout->data(), size, dx->mutable_data(ctx.GetPlace()),\n dy->mutable_data(ctx.GetPlace()));\n}\n\n} \/\/ namespace operators\n} \/\/ namespace paddle\n\nREGISTER_OP_CUDA_KERNEL(\n elementwise_sub,\n ops::ElementwiseSubKernel,\n ops::ElementwiseSubKernel,\n ops::ElementwiseSubKernel,\n ops::ElementwiseSubKernel,\n ops::ElementwiseSubKernel,\n ops::ElementwiseSubKernel>,\n ops::ElementwiseSubKernel>);\nREGISTER_OP_CUDA_KERNEL(\n elementwise_sub_grad,\n ops::ElementwiseSubGradKernel,\n ops::ElementwiseSubGradKernel,\n ops::ElementwiseSubGradKernel,\n ops::ElementwiseSubGradKernel,\n ops::ElementwiseSubGradKernel,\n ops::ElementwiseSubGradKernel>,\n ops::ElementwiseSubGradKernel>);\nREGISTER_OP_CUDA_KERNEL(\n elementwise_sub_grad_grad,\n ops::ElementwiseSubDoubleGradKernel,\n ops::ElementwiseSubDoubleGradKernel,\n ops::ElementwiseSubDoubleGradKernel,\n ops::ElementwiseSubDoubleGradKernel,\n ops::ElementwiseSubDoubleGradKernel>,\n ops::ElementwiseSubDoubleGradKernel>);\n","avg_line_length":45.0251572327,"max_line_length":80,"alphanum_fraction":0.6365414164} {"size":1635,"ext":"cu","lang":"Cuda","max_stars_count":1.0,"content":"\/**\n * @brief SSSP test program\n * @file\n *\/\n#include \"Static\/ShortestPath\/SSSP.cuh\"\n#include \n#include \n#include \n#include \n\nint exec(int argc, char* argv[]) {\n using namespace timer;\n using namespace hornets_nest;\n\n graph::GraphStd graph;\n CommandLineParam cmd(graph, argc, argv,false);\n\n auto h_weights = new weight_t[graph.nE()];\n host::generate_randoms(h_weights, graph.nE(), 0, 100, 1);\n\n HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),\n graph.csr_out_edges());\n hornet_init.insertEdgeData(h_weights);\n\n HornetGraph hornet_graph(hornet_init);\n\n vid_t root = 0;\n if(argc==3) \n root = atoi(argv[2]);\n\n SSSP sssp(hornet_graph);\n sssp.set_parameters(root);\n\n Timer TM;\n TM.start();\n\n sssp.run();\n\n TM.stop();\n TM.print(\"SSSP\");\n return 1;\n}\n\nint main(int argc, char* argv[]) {\n int ret = 0;\n hornets_nest::gpu::initializeRMMPoolAllocation();\/\/update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.\n {\/\/scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.\n\n ret = exec(argc, argv);\n\n }\/\/scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.\n hornets_nest::gpu::finalizeRMMPoolAllocation();\n\n return ret;\n}\n\n","avg_line_length":28.6842105263,"max_line_length":246,"alphanum_fraction":0.6911314985} {"size":305,"ext":"cu","lang":"Cuda","max_stars_count":25.0,"content":"\n#include \"cuda_sort.h\"\n\n#include \n\n#include \n\n#include \"..\/modern_gpu\/include\/kernels_ext\/segmentedsort_ext.cuh\"\n\nnamespace Mochimazui {\n\n\tvoid cuda_seg_sort_int_by_int(int* key,int* data,int n,int* segs,int nsegs){\n\t\tmgpu_ext::SegSortPairsFromIndices(key, data, n, segs,nsegs);\n\t}\n\n}\n","avg_line_length":17.9411764706,"max_line_length":77,"alphanum_fraction":0.7475409836} {"size":3795,"ext":"cu","lang":"Cuda","max_stars_count":58.0,"content":"#include \n#include \n#include \n#include \n\n#define p_IJWID 6\n#define p_JID 4\n#define p_JWID 5\n#define p_Np 512\n#define p_Nq 8\n#define p_Nvgeo 12\n#define p_RXID 0\n#define p_RYID 1\n#define p_RZID 7\n#define p_SXID 2\n#define p_SYID 3\n#define p_SZID 8\n#define p_TXID 9\n#define p_TYID 10\n#define p_TZID 11\n#define p_cubNp 4096\n#define p_cubNq 16\n\n\/\/ kernel\n#include \"adv.cu\"\n\ndfloat *drandAlloc(int N){\n dfloat *v = (dfloat*) calloc(N, sizeof(dfloat));\n for(int n = 0; n < N; ++n) v[n] = drand48();\n return v;\n}\n\nint main(int argc, char **argv) {\n\n if (argc < 4) {\n printf(\"Usage: .\/adv N cubN numElements [nRepetitions]\\n\");\n exit(-1);\n }\n\n const int N = atoi(argv[1]);\n const int cubN = atoi(argv[2]);\n const dlong Nelements = atoi(argv[3]);\n int Ntests = 1;\n\n if(argc >= 5) Ntests = atoi(argv[4]);\n\n const int Nq = N+1;\n const int cubNq = cubN+1;\n const int Np = Nq*Nq*Nq;\n const int cubNp = cubNq*cubNq*cubNq;\n const dlong offset = Nelements*Np;\n\n printf(\"Data type in bytes: %zu\\n\", sizeof(dfloat));\n\n srand48(123);\n dfloat *vgeo = drandAlloc(Np*Nelements*p_Nvgeo);\n dfloat *cubvgeo = drandAlloc(cubNp*Nelements*p_Nvgeo);\n dfloat *cubDiffInterpT = drandAlloc(3*cubNp*Nelements);\n dfloat *cubInterpT = drandAlloc(Np*cubNp);\n dfloat *u = drandAlloc(3*Np*Nelements);\n dfloat *adv = drandAlloc(3*Np*Nelements);\n\n dfloat *d_vgeo, *d_cubvgeo, *d_cubDiffInterpT, *d_cubInterpT, *d_u, *d_adv;\n hipMalloc((void**)&d_vgeo, Np*Nelements*p_Nvgeo*sizeof(dfloat));\n hipMalloc((void**)&d_cubvgeo, cubNp*Nelements*p_Nvgeo*sizeof(dfloat));\n hipMalloc((void**)&d_cubDiffInterpT,3*cubNp*Nelements*sizeof(dfloat));\n hipMalloc((void**)&d_cubInterpT, Np*cubNp*sizeof(dfloat));\n hipMalloc((void**)&d_u, 3*Np*Nelements*sizeof(dfloat));\n hipMalloc((void**)&d_adv, 3*Np*Nelements*sizeof(dfloat));\n\n hipMemcpy(d_vgeo, vgeo, Np*Nelements*p_Nvgeo*sizeof(dfloat), hipMemcpyHostToDevice);\n hipMemcpy(d_cubvgeo, cubvgeo, cubNp*Nelements*p_Nvgeo*sizeof(dfloat), hipMemcpyHostToDevice);\n hipMemcpy(d_cubDiffInterpT, cubDiffInterpT, 3*cubNp*Nelements*sizeof(dfloat), hipMemcpyHostToDevice);\n hipMemcpy(d_cubInterpT, cubInterpT, Np*cubNp*sizeof(dfloat), hipMemcpyHostToDevice);\n hipMemcpy(d_u, u, 3*Np*Nelements*sizeof(dfloat), hipMemcpyHostToDevice);\n hipMemcpy(d_adv, adv, 3*Np*Nelements*sizeof(dfloat), hipMemcpyHostToDevice);\n\n hipDeviceSynchronize();\n auto start = std::chrono::high_resolution_clock::now();\n\n \/\/ run kernel\n for(int test=0;test(end - start).count() \/ Ntests;\n\n hipMemcpy(adv, d_adv, 3*Np*Nelements*sizeof(dfloat), hipMemcpyDeviceToHost);\n\n hipFree(d_vgeo);\n hipFree(d_cubvgeo);\n hipFree(d_cubDiffInterpT);\n hipFree(d_cubInterpT);\n hipFree(d_u);\n hipFree(d_adv);\n\n\n#ifdef OUTPUT\n for (int i = 0; i < 3*Np*Nelements; i++)\n std::cout << adv[i] << \"\\n\";\n#endif\n\n \/\/ statistics\n const dfloat GDOFPerSecond = (N*N*N)*Nelements\/elapsed;\n std::cout << \" NRepetitions=\" << Ntests\n << \" N=\" << N\n << \" cubN=\" << cubN\n << \" Nelements=\" << Nelements\n << \" elapsed time=\" << elapsed\n << \" GDOF\/s=\" << GDOFPerSecond\n << \"\\n\";\n\n free(vgeo );\n free(cubvgeo );\n free(cubDiffInterpT);\n free(cubInterpT );\n free(u );\n free(adv );\n return 0;\n}\n\n","avg_line_length":28.9694656489,"max_line_length":108,"alphanum_fraction":0.6534914361} {"size":866585,"ext":"cu","lang":"Cuda","max_stars_count":7.0,"content":"#include \n#define CU_NUM 60\n\n__device__ __forceinline__ bool is_first_thread() {\n return threadIdx.x == 0;\n}\n\n__device__ __forceinline__ unsigned int get_cu_id() {\n return blockIdx.x % CU_NUM;\n}\n\n__device__ __forceinline__ dim3 get_3d_idx(int idx, dim3 dim) {\n dim3 result;\n result.x = idx % dim.x;\n result.y = idx \/ dim.x % dim.y;\n result.z = idx \/ (dim.x * dim.y);\n return result;\n}\n\n__device__ void fused_nn_dense_add_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_add, float* __restrict__ placeholder2){\n float T_dense_rf[1];\n float red_buf0[1];\n __shared__ float T_dense[1];\n T_dense_rf[(0)] = 0.000000e+00f;\n for (int k_outer = 0; k_outer < 32; ++k_outer) {\n T_dense_rf[(0)] = __ocml_fma_f32(placeholder[(((k_outer * 64) + ((int)threadIdx.x)))], placeholder1[((((((int)task_idx.x) * 2048) + (k_outer * 64)) + ((int)threadIdx.x)))], T_dense_rf[(0)]);\n }\n unsigned int mask[1];\n float t0[1];\n red_buf0[(0)] = T_dense_rf[(0)];\n ((int*)mask)[(0)] = 0;\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 32) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 32)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 16) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 16)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 8) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 8)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 4) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 4)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 2) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 2)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 1) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 1)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);\n red_buf0[(0)] = __hip_ds_bpermute(((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & (~63)) << 2), red_buf0[(0)]);\n if (((int)threadIdx.x) == 0) {\n T_dense[(0)] = red_buf0[(0)];\n }\n if (((int)threadIdx.x) == 0) {\n T_add[(((int)task_idx.x))] = (T_dense[(0)] + placeholder2[(((int)task_idx.x))]);\n }\n}\n\n__device__ void fused_nn_conv2d_add_2_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_add, float* __restrict__ placeholder2){\n float compute[1];\n __shared__ float pad_temp_shared[2048];\n __shared__ float placeholder_shared[2048];\n compute[(0)] = 0.000000e+00f;\n pad_temp_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder[(((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 3))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder[(((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder1[(((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))], placeholder_shared[((((int)threadIdx.z) * 128))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 128) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 128) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 128) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 128) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))], placeholder_shared[(((((int)threadIdx.z) * 128) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))], placeholder_shared[(((((int)threadIdx.z) * 128) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))], placeholder_shared[(((((int)threadIdx.z) * 128) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 128) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))], placeholder_shared[(((((int)threadIdx.z) * 128) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))], placeholder_shared[(((((int)threadIdx.z) * 128) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))], placeholder_shared[(((((int)threadIdx.z) * 128) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 128) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))], placeholder_shared[(((((int)threadIdx.z) * 128) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))], placeholder_shared[(((((int)threadIdx.z) * 128) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))], placeholder_shared[(((((int)threadIdx.z) * 128) + 15))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 128) + 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))], placeholder_shared[(((((int)threadIdx.z) * 128) + 17))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))], placeholder_shared[(((((int)threadIdx.z) * 128) + 18))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))], placeholder_shared[(((((int)threadIdx.z) * 128) + 19))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 128) + 20))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))], placeholder_shared[(((((int)threadIdx.z) * 128) + 21))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))], placeholder_shared[(((((int)threadIdx.z) * 128) + 22))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))], placeholder_shared[(((((int)threadIdx.z) * 128) + 23))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 128) + 24))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))], placeholder_shared[(((((int)threadIdx.z) * 128) + 25))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))], placeholder_shared[(((((int)threadIdx.z) * 128) + 26))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))], placeholder_shared[(((((int)threadIdx.z) * 128) + 27))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 128) + 28))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))], placeholder_shared[(((((int)threadIdx.z) * 128) + 29))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))], placeholder_shared[(((((int)threadIdx.z) * 128) + 30))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))], placeholder_shared[(((((int)threadIdx.z) * 128) + 31))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 128) + 32))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))], placeholder_shared[(((((int)threadIdx.z) * 128) + 33))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))], placeholder_shared[(((((int)threadIdx.z) * 128) + 34))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))], placeholder_shared[(((((int)threadIdx.z) * 128) + 35))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 128) + 36))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))], placeholder_shared[(((((int)threadIdx.z) * 128) + 37))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))], placeholder_shared[(((((int)threadIdx.z) * 128) + 38))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))], placeholder_shared[(((((int)threadIdx.z) * 128) + 39))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 128) + 40))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))], placeholder_shared[(((((int)threadIdx.z) * 128) + 41))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))], placeholder_shared[(((((int)threadIdx.z) * 128) + 42))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 128) + 43))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 128) + 44))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))], placeholder_shared[(((((int)threadIdx.z) * 128) + 45))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))], placeholder_shared[(((((int)threadIdx.z) * 128) + 46))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))], placeholder_shared[(((((int)threadIdx.z) * 128) + 47))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 128) + 48))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))], placeholder_shared[(((((int)threadIdx.z) * 128) + 49))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))], placeholder_shared[(((((int)threadIdx.z) * 128) + 50))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))], placeholder_shared[(((((int)threadIdx.z) * 128) + 51))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 128) + 52))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))], placeholder_shared[(((((int)threadIdx.z) * 128) + 53))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))], placeholder_shared[(((((int)threadIdx.z) * 128) + 54))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))], placeholder_shared[(((((int)threadIdx.z) * 128) + 55))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 128) + 56))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))], placeholder_shared[(((((int)threadIdx.z) * 128) + 57))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))], placeholder_shared[(((((int)threadIdx.z) * 128) + 58))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))], placeholder_shared[(((((int)threadIdx.z) * 128) + 59))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 128) + 60))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))], placeholder_shared[(((((int)threadIdx.z) * 128) + 61))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))], placeholder_shared[(((((int)threadIdx.z) * 128) + 62))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))], placeholder_shared[(((((int)threadIdx.z) * 128) + 63))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))], placeholder_shared[(((((int)threadIdx.z) * 128) + 64))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))], placeholder_shared[(((((int)threadIdx.z) * 128) + 65))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))], placeholder_shared[(((((int)threadIdx.z) * 128) + 66))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))], placeholder_shared[(((((int)threadIdx.z) * 128) + 67))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))], placeholder_shared[(((((int)threadIdx.z) * 128) + 68))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))], placeholder_shared[(((((int)threadIdx.z) * 128) + 69))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))], placeholder_shared[(((((int)threadIdx.z) * 128) + 70))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))], placeholder_shared[(((((int)threadIdx.z) * 128) + 71))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))], placeholder_shared[(((((int)threadIdx.z) * 128) + 72))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))], placeholder_shared[(((((int)threadIdx.z) * 128) + 73))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))], placeholder_shared[(((((int)threadIdx.z) * 128) + 74))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))], placeholder_shared[(((((int)threadIdx.z) * 128) + 75))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))], placeholder_shared[(((((int)threadIdx.z) * 128) + 76))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))], placeholder_shared[(((((int)threadIdx.z) * 128) + 77))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))], placeholder_shared[(((((int)threadIdx.z) * 128) + 78))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))], placeholder_shared[(((((int)threadIdx.z) * 128) + 79))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))], placeholder_shared[(((((int)threadIdx.z) * 128) + 80))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))], placeholder_shared[(((((int)threadIdx.z) * 128) + 81))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))], placeholder_shared[(((((int)threadIdx.z) * 128) + 82))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))], placeholder_shared[(((((int)threadIdx.z) * 128) + 83))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))], placeholder_shared[(((((int)threadIdx.z) * 128) + 84))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))], placeholder_shared[(((((int)threadIdx.z) * 128) + 85))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))], placeholder_shared[(((((int)threadIdx.z) * 128) + 86))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))], placeholder_shared[(((((int)threadIdx.z) * 128) + 87))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))], placeholder_shared[(((((int)threadIdx.z) * 128) + 88))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))], placeholder_shared[(((((int)threadIdx.z) * 128) + 89))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))], placeholder_shared[(((((int)threadIdx.z) * 128) + 90))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))], placeholder_shared[(((((int)threadIdx.z) * 128) + 91))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))], placeholder_shared[(((((int)threadIdx.z) * 128) + 92))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))], placeholder_shared[(((((int)threadIdx.z) * 128) + 93))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))], placeholder_shared[(((((int)threadIdx.z) * 128) + 94))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))], placeholder_shared[(((((int)threadIdx.z) * 128) + 95))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))], placeholder_shared[(((((int)threadIdx.z) * 128) + 96))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))], placeholder_shared[(((((int)threadIdx.z) * 128) + 97))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))], placeholder_shared[(((((int)threadIdx.z) * 128) + 98))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))], placeholder_shared[(((((int)threadIdx.z) * 128) + 99))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))], placeholder_shared[(((((int)threadIdx.z) * 128) + 100))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))], placeholder_shared[(((((int)threadIdx.z) * 128) + 101))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))], placeholder_shared[(((((int)threadIdx.z) * 128) + 102))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))], placeholder_shared[(((((int)threadIdx.z) * 128) + 103))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))], placeholder_shared[(((((int)threadIdx.z) * 128) + 104))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))], placeholder_shared[(((((int)threadIdx.z) * 128) + 105))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))], placeholder_shared[(((((int)threadIdx.z) * 128) + 106))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))], placeholder_shared[(((((int)threadIdx.z) * 128) + 107))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))], placeholder_shared[(((((int)threadIdx.z) * 128) + 108))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))], placeholder_shared[(((((int)threadIdx.z) * 128) + 109))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))], placeholder_shared[(((((int)threadIdx.z) * 128) + 110))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))], placeholder_shared[(((((int)threadIdx.z) * 128) + 111))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))], placeholder_shared[(((((int)threadIdx.z) * 128) + 112))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))], placeholder_shared[(((((int)threadIdx.z) * 128) + 113))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))], placeholder_shared[(((((int)threadIdx.z) * 128) + 114))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))], placeholder_shared[(((((int)threadIdx.z) * 128) + 115))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))], placeholder_shared[(((((int)threadIdx.z) * 128) + 116))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))], placeholder_shared[(((((int)threadIdx.z) * 128) + 117))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))], placeholder_shared[(((((int)threadIdx.z) * 128) + 118))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))], placeholder_shared[(((((int)threadIdx.z) * 128) + 119))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))], placeholder_shared[(((((int)threadIdx.z) * 128) + 120))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))], placeholder_shared[(((((int)threadIdx.z) * 128) + 121))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))], placeholder_shared[(((((int)threadIdx.z) * 128) + 122))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))], placeholder_shared[(((((int)threadIdx.z) * 128) + 123))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))], placeholder_shared[(((((int)threadIdx.z) * 128) + 124))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))], placeholder_shared[(((((int)threadIdx.z) * 128) + 125))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))], placeholder_shared[(((((int)threadIdx.z) * 128) + 126))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))], placeholder_shared[(((((int)threadIdx.z) * 128) + 127))], compute[(0)]);\n T_add[(((((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 64)) + (((int)task_idx.y) * 32)) + (((int)threadIdx.y) * 8)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))] = (compute[(0)] + placeholder2[(((((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 64)) + (((int)task_idx.y) * 32)) + (((int)threadIdx.y) * 8)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))]);\n}\n\n__device__ void fused_nn_conv2d_2_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ compute){\n float compute_local[4];\n __shared__ float pad_temp_shared[784];\n __shared__ float placeholder_shared[512];\n compute_local[(0)] = 0.000000e+00f;\n compute_local[(2)] = 0.000000e+00f;\n compute_local[(1)] = 0.000000e+00f;\n compute_local[(3)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 32; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)))] = placeholder[((((((rc_outer * 1024) + (((int)threadIdx.z) * 64)) + (((((int)threadIdx.y) * 13) \/ 7) * 8)) + (((int)threadIdx.x) * 8)) + ((((int)threadIdx.y) * 13) % 7)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 1))] = placeholder[((((((rc_outer * 1024) + (((int)threadIdx.z) * 64)) + ((((((int)threadIdx.y) * 13) + 1) \/ 7) * 8)) + (((int)threadIdx.x) * 8)) + (((((int)threadIdx.y) * 13) + 1) % 7)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 2))] = placeholder[((((((rc_outer * 1024) + (((int)threadIdx.z) * 64)) + ((((((int)threadIdx.y) * 13) + 2) \/ 7) * 8)) + (((int)threadIdx.x) * 8)) + (((((int)threadIdx.y) * 13) + 2) % 7)))];\n if (((((((((int)threadIdx.y) * 13) + 3) \/ 7) + ((int)threadIdx.x)) \/ 7) + ((int)threadIdx.z)) < 16) {\n if ((((((int)threadIdx.z) * 7) + (((((int)threadIdx.y) * 13) + 3) \/ 7)) + ((int)threadIdx.x)) < 112) {\n if ((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) < 781) {\n if (((((int)threadIdx.y) * 13) + (((int)threadIdx.x) * 7)) < 46) {\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 3))] = placeholder[((((((rc_outer * 1024) + ((((((((int)threadIdx.y) * 13) + 3) \/ 7) + ((int)threadIdx.x)) \/ 7) * 64)) + (((int)threadIdx.z) * 64)) + ((((((((int)threadIdx.y) * 13) + 3) \/ 7) + ((int)threadIdx.x)) % 7) * 8)) + (((((int)threadIdx.y) * 13) + 3) % 7)))];\n }\n }\n }\n }\n if (((((((((int)threadIdx.y) * 13) + 4) \/ 7) + ((int)threadIdx.x)) \/ 7) + ((int)threadIdx.z)) < 16) {\n if ((((((int)threadIdx.z) * 7) + (((((int)threadIdx.y) * 13) + 4) \/ 7)) + ((int)threadIdx.x)) < 112) {\n if ((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) < 780) {\n if (((((int)threadIdx.y) * 13) + (((int)threadIdx.x) * 7)) < 45) {\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 4))] = placeholder[((((((rc_outer * 1024) + ((((((((int)threadIdx.y) * 13) + 4) \/ 7) + ((int)threadIdx.x)) \/ 7) * 64)) + (((int)threadIdx.z) * 64)) + ((((((((int)threadIdx.y) * 13) + 4) \/ 7) + ((int)threadIdx.x)) % 7) * 8)) + (((((int)threadIdx.y) * 13) + 4) % 7)))];\n }\n }\n }\n }\n if (((((((((int)threadIdx.y) * 13) + 5) \/ 7) + ((int)threadIdx.x)) \/ 7) + ((int)threadIdx.z)) < 16) {\n if ((((((int)threadIdx.z) * 7) + (((((int)threadIdx.y) * 13) + 5) \/ 7)) + ((int)threadIdx.x)) < 112) {\n if ((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) < 779) {\n if (((((int)threadIdx.y) * 13) + (((int)threadIdx.x) * 7)) < 44) {\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 5))] = placeholder[((((((rc_outer * 1024) + ((((((((int)threadIdx.y) * 13) + 5) \/ 7) + ((int)threadIdx.x)) \/ 7) * 64)) + (((int)threadIdx.z) * 64)) + ((((((((int)threadIdx.y) * 13) + 5) \/ 7) + ((int)threadIdx.x)) % 7) * 8)) + (((((int)threadIdx.y) * 13) + 5) % 7)))];\n }\n }\n }\n }\n if (((((((((int)threadIdx.y) * 13) + 6) \/ 7) + ((int)threadIdx.x)) \/ 7) + ((int)threadIdx.z)) < 16) {\n if ((((((int)threadIdx.z) * 7) + (((((int)threadIdx.y) * 13) + 6) \/ 7)) + ((int)threadIdx.x)) < 112) {\n if ((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) < 778) {\n if (((((int)threadIdx.y) * 13) + (((int)threadIdx.x) * 7)) < 43) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 6))] = placeholder[((((((rc_outer * 1024) + (((int)threadIdx.z) * 64)) + ((((((int)threadIdx.y) * 13) + 6) \/ 7) * 8)) + (((int)threadIdx.x) * 8)) + (((((int)threadIdx.y) * 13) + 6) % 7)))];\n }\n }\n }\n }\n }\n placeholder_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 1024)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) >> 4) * 512)) + (rc_outer * 16)) + (((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 1024)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 1) >> 4) * 512)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 1) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 1024)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 2) >> 4) * 512)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 2) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 1024)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 3) >> 4) * 512)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 3) & 15)))];\n __syncthreads();\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[((((int)threadIdx.z) * 16))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 49))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 49))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 51))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 51))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 98))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 98))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 100))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 100))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 147))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 147))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 149))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 149))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 196))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 196))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 198))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 198))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 245))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 245))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 247))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 247))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 294))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 294))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 296))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 296))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 343))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 343))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 345))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 345))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 392))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 392))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 394))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 394))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 441))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 441))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 443))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 443))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 490))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 490))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 492))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 492))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 539))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 539))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 541))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 541))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 588))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 588))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 590))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 590))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 637))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 637))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 639))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 639))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 686))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 686))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute_local[(3)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 735))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute_local[(0)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 735))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute_local[(2)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 737))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute_local[(1)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 737))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute_local[(3)]);\n }\n compute[(((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)];\n compute[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 256))] = compute_local[(2)];\n compute[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)];\n compute[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 257))] = compute_local[(3)];\n}\n\n__device__ void fused_nn_conv2d_add_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_add, float* __restrict__ placeholder2){\n float compute[2];\n __shared__ float pad_temp_shared[128];\n __shared__ float placeholder_shared[2048];\n for (int xx_init = 0; xx_init < 2; ++xx_init) {\n compute[(xx_init)] = 0.000000e+00f;\n }\n for (int rc_outer = 0; rc_outer < 16; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[(((((int)threadIdx.z) * 2) + ((int)threadIdx.y)))] = placeholder[((((rc_outer * 128) + (((int)threadIdx.z) * 2)) + ((int)threadIdx.y)))];\n for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 16; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {\n placeholder_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = placeholder1[((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 512)) + (rc_outer * 32)) + (((int)threadIdx.y) * 16)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))];\n }\n __syncthreads();\n for (int rc_inner = 0; rc_inner < 32; ++rc_inner) {\n for (int xx = 0; xx < 2; ++xx) {\n compute[(xx)] = __ocml_fma_f32(pad_temp_shared[((((rc_inner * 4) + (((int)threadIdx.y) * 2)) + xx))], placeholder_shared[(((((int)threadIdx.z) * 32) + rc_inner))], compute[(xx)]);\n }\n }\n }\n for (int ax3_inner_inner_inner = 0; ax3_inner_inner_inner < 2; ++ax3_inner_inner_inner) {\n T_add[(((((((int)task_idx.z) * 256) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ax3_inner_inner_inner))] = (compute[(ax3_inner_inner_inner)] + placeholder2[(((((((int)task_idx.z) * 256) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ax3_inner_inner_inner))]);\n }\n}\n\n__device__ void fused_nn_conv2d_add_multiply_add_nn_relu_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2, float* __restrict__ placeholder3, float* __restrict__ placeholder4){\n float compute[2];\n __shared__ float pad_temp_shared[128];\n __shared__ float placeholder_shared[2048];\n for (int xx_init = 0; xx_init < 2; ++xx_init) {\n compute[(xx_init)] = 0.000000e+00f;\n }\n for (int rc_outer = 0; rc_outer < 16; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[(((((int)threadIdx.z) * 2) + ((int)threadIdx.y)))] = placeholder[((((rc_outer * 128) + (((int)threadIdx.z) * 2)) + ((int)threadIdx.y)))];\n for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 16; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {\n placeholder_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = placeholder1[((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 512)) + (rc_outer * 32)) + (((int)threadIdx.y) * 16)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))];\n }\n __syncthreads();\n for (int rc_inner = 0; rc_inner < 32; ++rc_inner) {\n for (int xx = 0; xx < 2; ++xx) {\n compute[(xx)] = __ocml_fma_f32(pad_temp_shared[((((rc_inner * 4) + (((int)threadIdx.y) * 2)) + xx))], placeholder_shared[(((((int)threadIdx.z) * 32) + rc_inner))], compute[(xx)]);\n }\n }\n }\n for (int ax3_inner_inner_inner = 0; ax3_inner_inner_inner < 2; ++ax3_inner_inner_inner) {\n T_relu[(((((((int)task_idx.z) * 256) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ax3_inner_inner_inner))] = max(__ocml_fma_f32((compute[(ax3_inner_inner_inner)] + placeholder2[(((((((int)task_idx.z) * 256) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ax3_inner_inner_inner))]), placeholder3[(((((int)task_idx.z) * 64) + ((int)threadIdx.z)))], placeholder4[(((((int)task_idx.z) * 64) + ((int)threadIdx.z)))]), 0.000000e+00f);\n }\n}\n\n__device__ void fused_nn_max_pool2d_add_nn_relu_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ T_relu, float* __restrict__ placeholder1){\n float tensor[1];\n tensor[(0)] = -3.402823e+38f;\n for (int dh = 0; dh < 3; ++dh) {\n for (int dw = 0; dw < 3; ++dw) {\n tensor[(0)] = max(tensor[(0)], (((1 <= (((((int)threadIdx.x) >> 4) * 2) + dh)) && (1 <= (((((int)threadIdx.x) & 15) * 2) + dw))) ? placeholder[(((((((((int)task_idx.x) * 1024) + ((((int)threadIdx.x) >> 4) * 64)) + (dh * 32)) + ((((int)threadIdx.x) & 15) * 2)) + dw) - 33))] : -3.402823e+38f));\n }\n }\n T_relu[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] = max((tensor[(0)] + placeholder1[(((int)task_idx.x))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_3_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[4];\n __shared__ float pad_temp_shared[288];\n __shared__ float placeholder_shared[2304];\n compute[(0)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n compute[(2)] = 0.000000e+00f;\n compute[(3)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 32; ++rc_outer) {\n __syncthreads();\n if (((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) < 288) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[(((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)))] = (((((6 <= (((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) % 36)) && ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) % 36) < 30)) && (1 <= (((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) % 6))) && ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) % 6) < 5)) ? placeholder[((((((rc_outer * 128) + ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) \/ 36) * 16)) + (((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) % 36) \/ 6) * 4)) + (((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) % 6)) - 5))] : 0.000000e+00f);\n }\n }\n if (((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) < 287) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 1))] = (((((6 <= ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 1) % 36)) && (((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 1) % 36) < 30)) && (1 <= ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 1) % 6))) && (((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 1) % 6) < 5)) ? placeholder[((((((rc_outer * 128) + (((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 1) \/ 36) * 16)) + ((((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 1) % 36) \/ 6) * 4)) + ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 1) % 6)) - 5))] : 0.000000e+00f);\n }\n }\n if (((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) < 286) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 2))] = (((((6 <= ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 2) % 36)) && (((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 2) % 36) < 30)) && (1 <= ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 2) % 6))) && (((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 2) % 6) < 5)) ? placeholder[((((((rc_outer * 128) + (((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 2) \/ 36) * 16)) + ((((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 2) % 36) \/ 6) * 4)) + ((((((int)threadIdx.z) * 9) + (((int)threadIdx.x) * 3)) + 2) % 6)) - 5))] : 0.000000e+00f);\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)))] = placeholder1[(((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 1))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 1))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 2))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 2))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 3))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 4))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 4))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 5))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 5))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 6))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 6))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 7))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 7))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 8))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 8))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 9))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 9))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 10))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 10))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 11))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 11))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 12))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 12))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 13))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 13))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 14))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 14))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 15))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 15))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 16))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 16))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.x) * 18)) + 17))] = placeholder1[((((((((int)task_idx.z) * 73728) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (((int)threadIdx.x) * 18)) + 17))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 72))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 6))], placeholder_shared[((((int)threadIdx.z) * 72))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[((((int)threadIdx.z) * 72))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 18))], placeholder_shared[((((int)threadIdx.z) * 72))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 1))], placeholder_shared[(((((int)threadIdx.z) * 72) + 1))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 7))], placeholder_shared[(((((int)threadIdx.z) * 72) + 1))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 13))], placeholder_shared[(((((int)threadIdx.z) * 72) + 1))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 19))], placeholder_shared[(((((int)threadIdx.z) * 72) + 1))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 2))], placeholder_shared[(((((int)threadIdx.z) * 72) + 2))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 72) + 2))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 14))], placeholder_shared[(((((int)threadIdx.z) * 72) + 2))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 72) + 2))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 6))], placeholder_shared[(((((int)threadIdx.z) * 72) + 3))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 72) + 3))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 18))], placeholder_shared[(((((int)threadIdx.z) * 72) + 3))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 72) + 3))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 7))], placeholder_shared[(((((int)threadIdx.z) * 72) + 4))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 13))], placeholder_shared[(((((int)threadIdx.z) * 72) + 4))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 19))], placeholder_shared[(((((int)threadIdx.z) * 72) + 4))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 25))], placeholder_shared[(((((int)threadIdx.z) * 72) + 4))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 72) + 5))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 14))], placeholder_shared[(((((int)threadIdx.z) * 72) + 5))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 72) + 5))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 26))], placeholder_shared[(((((int)threadIdx.z) * 72) + 5))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 72) + 6))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 18))], placeholder_shared[(((((int)threadIdx.z) * 72) + 6))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 72) + 6))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 30))], placeholder_shared[(((((int)threadIdx.z) * 72) + 6))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 13))], placeholder_shared[(((((int)threadIdx.z) * 72) + 7))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 19))], placeholder_shared[(((((int)threadIdx.z) * 72) + 7))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 25))], placeholder_shared[(((((int)threadIdx.z) * 72) + 7))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 31))], placeholder_shared[(((((int)threadIdx.z) * 72) + 7))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 14))], placeholder_shared[(((((int)threadIdx.z) * 72) + 8))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 72) + 8))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 26))], placeholder_shared[(((((int)threadIdx.z) * 72) + 8))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 72) + 8))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 72) + 9))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 42))], placeholder_shared[(((((int)threadIdx.z) * 72) + 9))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 72) + 9))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 54))], placeholder_shared[(((((int)threadIdx.z) * 72) + 9))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 37))], placeholder_shared[(((((int)threadIdx.z) * 72) + 10))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 43))], placeholder_shared[(((((int)threadIdx.z) * 72) + 10))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 49))], placeholder_shared[(((((int)threadIdx.z) * 72) + 10))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 55))], placeholder_shared[(((((int)threadIdx.z) * 72) + 10))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 38))], placeholder_shared[(((((int)threadIdx.z) * 72) + 11))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 72) + 11))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 50))], placeholder_shared[(((((int)threadIdx.z) * 72) + 11))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 72) + 11))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 42))], placeholder_shared[(((((int)threadIdx.z) * 72) + 12))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 72) + 12))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 54))], placeholder_shared[(((((int)threadIdx.z) * 72) + 12))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 72) + 12))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 43))], placeholder_shared[(((((int)threadIdx.z) * 72) + 13))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 49))], placeholder_shared[(((((int)threadIdx.z) * 72) + 13))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 55))], placeholder_shared[(((((int)threadIdx.z) * 72) + 13))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 61))], placeholder_shared[(((((int)threadIdx.z) * 72) + 13))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 72) + 14))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 50))], placeholder_shared[(((((int)threadIdx.z) * 72) + 14))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 72) + 14))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 62))], placeholder_shared[(((((int)threadIdx.z) * 72) + 14))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 72) + 15))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 54))], placeholder_shared[(((((int)threadIdx.z) * 72) + 15))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 72) + 15))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 66))], placeholder_shared[(((((int)threadIdx.z) * 72) + 15))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 49))], placeholder_shared[(((((int)threadIdx.z) * 72) + 16))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 55))], placeholder_shared[(((((int)threadIdx.z) * 72) + 16))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 61))], placeholder_shared[(((((int)threadIdx.z) * 72) + 16))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 67))], placeholder_shared[(((((int)threadIdx.z) * 72) + 16))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 50))], placeholder_shared[(((((int)threadIdx.z) * 72) + 17))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 72) + 17))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 62))], placeholder_shared[(((((int)threadIdx.z) * 72) + 17))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 68))], placeholder_shared[(((((int)threadIdx.z) * 72) + 17))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 72))], placeholder_shared[(((((int)threadIdx.z) * 72) + 18))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 78))], placeholder_shared[(((((int)threadIdx.z) * 72) + 18))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 84))], placeholder_shared[(((((int)threadIdx.z) * 72) + 18))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 90))], placeholder_shared[(((((int)threadIdx.z) * 72) + 18))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 73))], placeholder_shared[(((((int)threadIdx.z) * 72) + 19))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 79))], placeholder_shared[(((((int)threadIdx.z) * 72) + 19))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 85))], placeholder_shared[(((((int)threadIdx.z) * 72) + 19))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 91))], placeholder_shared[(((((int)threadIdx.z) * 72) + 19))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 74))], placeholder_shared[(((((int)threadIdx.z) * 72) + 20))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 80))], placeholder_shared[(((((int)threadIdx.z) * 72) + 20))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 86))], placeholder_shared[(((((int)threadIdx.z) * 72) + 20))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 92))], placeholder_shared[(((((int)threadIdx.z) * 72) + 20))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 78))], placeholder_shared[(((((int)threadIdx.z) * 72) + 21))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 84))], placeholder_shared[(((((int)threadIdx.z) * 72) + 21))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 90))], placeholder_shared[(((((int)threadIdx.z) * 72) + 21))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 96))], placeholder_shared[(((((int)threadIdx.z) * 72) + 21))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 79))], placeholder_shared[(((((int)threadIdx.z) * 72) + 22))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 85))], placeholder_shared[(((((int)threadIdx.z) * 72) + 22))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 91))], placeholder_shared[(((((int)threadIdx.z) * 72) + 22))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 97))], placeholder_shared[(((((int)threadIdx.z) * 72) + 22))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 80))], placeholder_shared[(((((int)threadIdx.z) * 72) + 23))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 86))], placeholder_shared[(((((int)threadIdx.z) * 72) + 23))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 92))], placeholder_shared[(((((int)threadIdx.z) * 72) + 23))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 98))], placeholder_shared[(((((int)threadIdx.z) * 72) + 23))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 84))], placeholder_shared[(((((int)threadIdx.z) * 72) + 24))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 90))], placeholder_shared[(((((int)threadIdx.z) * 72) + 24))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 96))], placeholder_shared[(((((int)threadIdx.z) * 72) + 24))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 102))], placeholder_shared[(((((int)threadIdx.z) * 72) + 24))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 85))], placeholder_shared[(((((int)threadIdx.z) * 72) + 25))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 91))], placeholder_shared[(((((int)threadIdx.z) * 72) + 25))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 97))], placeholder_shared[(((((int)threadIdx.z) * 72) + 25))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 103))], placeholder_shared[(((((int)threadIdx.z) * 72) + 25))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 86))], placeholder_shared[(((((int)threadIdx.z) * 72) + 26))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 92))], placeholder_shared[(((((int)threadIdx.z) * 72) + 26))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 98))], placeholder_shared[(((((int)threadIdx.z) * 72) + 26))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 104))], placeholder_shared[(((((int)threadIdx.z) * 72) + 26))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 108))], placeholder_shared[(((((int)threadIdx.z) * 72) + 27))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 114))], placeholder_shared[(((((int)threadIdx.z) * 72) + 27))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 120))], placeholder_shared[(((((int)threadIdx.z) * 72) + 27))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 126))], placeholder_shared[(((((int)threadIdx.z) * 72) + 27))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 109))], placeholder_shared[(((((int)threadIdx.z) * 72) + 28))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 115))], placeholder_shared[(((((int)threadIdx.z) * 72) + 28))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 121))], placeholder_shared[(((((int)threadIdx.z) * 72) + 28))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 127))], placeholder_shared[(((((int)threadIdx.z) * 72) + 28))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 110))], placeholder_shared[(((((int)threadIdx.z) * 72) + 29))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 116))], placeholder_shared[(((((int)threadIdx.z) * 72) + 29))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 122))], placeholder_shared[(((((int)threadIdx.z) * 72) + 29))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 128))], placeholder_shared[(((((int)threadIdx.z) * 72) + 29))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 114))], placeholder_shared[(((((int)threadIdx.z) * 72) + 30))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 120))], placeholder_shared[(((((int)threadIdx.z) * 72) + 30))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 126))], placeholder_shared[(((((int)threadIdx.z) * 72) + 30))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 132))], placeholder_shared[(((((int)threadIdx.z) * 72) + 30))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 115))], placeholder_shared[(((((int)threadIdx.z) * 72) + 31))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 121))], placeholder_shared[(((((int)threadIdx.z) * 72) + 31))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 127))], placeholder_shared[(((((int)threadIdx.z) * 72) + 31))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 133))], placeholder_shared[(((((int)threadIdx.z) * 72) + 31))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 116))], placeholder_shared[(((((int)threadIdx.z) * 72) + 32))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 122))], placeholder_shared[(((((int)threadIdx.z) * 72) + 32))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 128))], placeholder_shared[(((((int)threadIdx.z) * 72) + 32))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 134))], placeholder_shared[(((((int)threadIdx.z) * 72) + 32))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 120))], placeholder_shared[(((((int)threadIdx.z) * 72) + 33))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 126))], placeholder_shared[(((((int)threadIdx.z) * 72) + 33))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 132))], placeholder_shared[(((((int)threadIdx.z) * 72) + 33))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 138))], placeholder_shared[(((((int)threadIdx.z) * 72) + 33))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 121))], placeholder_shared[(((((int)threadIdx.z) * 72) + 34))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 127))], placeholder_shared[(((((int)threadIdx.z) * 72) + 34))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 133))], placeholder_shared[(((((int)threadIdx.z) * 72) + 34))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 139))], placeholder_shared[(((((int)threadIdx.z) * 72) + 34))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 122))], placeholder_shared[(((((int)threadIdx.z) * 72) + 35))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 128))], placeholder_shared[(((((int)threadIdx.z) * 72) + 35))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 134))], placeholder_shared[(((((int)threadIdx.z) * 72) + 35))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 140))], placeholder_shared[(((((int)threadIdx.z) * 72) + 35))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 144))], placeholder_shared[(((((int)threadIdx.z) * 72) + 36))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 150))], placeholder_shared[(((((int)threadIdx.z) * 72) + 36))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 156))], placeholder_shared[(((((int)threadIdx.z) * 72) + 36))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 162))], placeholder_shared[(((((int)threadIdx.z) * 72) + 36))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 145))], placeholder_shared[(((((int)threadIdx.z) * 72) + 37))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 151))], placeholder_shared[(((((int)threadIdx.z) * 72) + 37))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 157))], placeholder_shared[(((((int)threadIdx.z) * 72) + 37))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 163))], placeholder_shared[(((((int)threadIdx.z) * 72) + 37))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 146))], placeholder_shared[(((((int)threadIdx.z) * 72) + 38))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 152))], placeholder_shared[(((((int)threadIdx.z) * 72) + 38))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 158))], placeholder_shared[(((((int)threadIdx.z) * 72) + 38))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 164))], placeholder_shared[(((((int)threadIdx.z) * 72) + 38))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 150))], placeholder_shared[(((((int)threadIdx.z) * 72) + 39))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 156))], placeholder_shared[(((((int)threadIdx.z) * 72) + 39))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 162))], placeholder_shared[(((((int)threadIdx.z) * 72) + 39))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 168))], placeholder_shared[(((((int)threadIdx.z) * 72) + 39))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 151))], placeholder_shared[(((((int)threadIdx.z) * 72) + 40))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 157))], placeholder_shared[(((((int)threadIdx.z) * 72) + 40))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 163))], placeholder_shared[(((((int)threadIdx.z) * 72) + 40))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 169))], placeholder_shared[(((((int)threadIdx.z) * 72) + 40))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 152))], placeholder_shared[(((((int)threadIdx.z) * 72) + 41))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 158))], placeholder_shared[(((((int)threadIdx.z) * 72) + 41))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 164))], placeholder_shared[(((((int)threadIdx.z) * 72) + 41))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 170))], placeholder_shared[(((((int)threadIdx.z) * 72) + 41))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 156))], placeholder_shared[(((((int)threadIdx.z) * 72) + 42))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 162))], placeholder_shared[(((((int)threadIdx.z) * 72) + 42))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 168))], placeholder_shared[(((((int)threadIdx.z) * 72) + 42))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 174))], placeholder_shared[(((((int)threadIdx.z) * 72) + 42))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 157))], placeholder_shared[(((((int)threadIdx.z) * 72) + 43))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 163))], placeholder_shared[(((((int)threadIdx.z) * 72) + 43))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 169))], placeholder_shared[(((((int)threadIdx.z) * 72) + 43))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 175))], placeholder_shared[(((((int)threadIdx.z) * 72) + 43))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 158))], placeholder_shared[(((((int)threadIdx.z) * 72) + 44))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 164))], placeholder_shared[(((((int)threadIdx.z) * 72) + 44))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 170))], placeholder_shared[(((((int)threadIdx.z) * 72) + 44))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 176))], placeholder_shared[(((((int)threadIdx.z) * 72) + 44))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 180))], placeholder_shared[(((((int)threadIdx.z) * 72) + 45))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 186))], placeholder_shared[(((((int)threadIdx.z) * 72) + 45))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 192))], placeholder_shared[(((((int)threadIdx.z) * 72) + 45))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 198))], placeholder_shared[(((((int)threadIdx.z) * 72) + 45))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 181))], placeholder_shared[(((((int)threadIdx.z) * 72) + 46))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 187))], placeholder_shared[(((((int)threadIdx.z) * 72) + 46))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 193))], placeholder_shared[(((((int)threadIdx.z) * 72) + 46))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 199))], placeholder_shared[(((((int)threadIdx.z) * 72) + 46))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 182))], placeholder_shared[(((((int)threadIdx.z) * 72) + 47))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 188))], placeholder_shared[(((((int)threadIdx.z) * 72) + 47))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 194))], placeholder_shared[(((((int)threadIdx.z) * 72) + 47))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 200))], placeholder_shared[(((((int)threadIdx.z) * 72) + 47))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 186))], placeholder_shared[(((((int)threadIdx.z) * 72) + 48))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 192))], placeholder_shared[(((((int)threadIdx.z) * 72) + 48))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 198))], placeholder_shared[(((((int)threadIdx.z) * 72) + 48))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 204))], placeholder_shared[(((((int)threadIdx.z) * 72) + 48))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 187))], placeholder_shared[(((((int)threadIdx.z) * 72) + 49))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 193))], placeholder_shared[(((((int)threadIdx.z) * 72) + 49))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 199))], placeholder_shared[(((((int)threadIdx.z) * 72) + 49))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 205))], placeholder_shared[(((((int)threadIdx.z) * 72) + 49))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 188))], placeholder_shared[(((((int)threadIdx.z) * 72) + 50))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 194))], placeholder_shared[(((((int)threadIdx.z) * 72) + 50))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 200))], placeholder_shared[(((((int)threadIdx.z) * 72) + 50))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 206))], placeholder_shared[(((((int)threadIdx.z) * 72) + 50))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 192))], placeholder_shared[(((((int)threadIdx.z) * 72) + 51))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 198))], placeholder_shared[(((((int)threadIdx.z) * 72) + 51))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 204))], placeholder_shared[(((((int)threadIdx.z) * 72) + 51))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 210))], placeholder_shared[(((((int)threadIdx.z) * 72) + 51))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 193))], placeholder_shared[(((((int)threadIdx.z) * 72) + 52))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 199))], placeholder_shared[(((((int)threadIdx.z) * 72) + 52))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 205))], placeholder_shared[(((((int)threadIdx.z) * 72) + 52))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 211))], placeholder_shared[(((((int)threadIdx.z) * 72) + 52))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 194))], placeholder_shared[(((((int)threadIdx.z) * 72) + 53))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 200))], placeholder_shared[(((((int)threadIdx.z) * 72) + 53))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 206))], placeholder_shared[(((((int)threadIdx.z) * 72) + 53))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 212))], placeholder_shared[(((((int)threadIdx.z) * 72) + 53))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 216))], placeholder_shared[(((((int)threadIdx.z) * 72) + 54))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 222))], placeholder_shared[(((((int)threadIdx.z) * 72) + 54))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 228))], placeholder_shared[(((((int)threadIdx.z) * 72) + 54))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 234))], placeholder_shared[(((((int)threadIdx.z) * 72) + 54))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 217))], placeholder_shared[(((((int)threadIdx.z) * 72) + 55))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 223))], placeholder_shared[(((((int)threadIdx.z) * 72) + 55))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 229))], placeholder_shared[(((((int)threadIdx.z) * 72) + 55))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 235))], placeholder_shared[(((((int)threadIdx.z) * 72) + 55))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 218))], placeholder_shared[(((((int)threadIdx.z) * 72) + 56))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 224))], placeholder_shared[(((((int)threadIdx.z) * 72) + 56))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 230))], placeholder_shared[(((((int)threadIdx.z) * 72) + 56))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 236))], placeholder_shared[(((((int)threadIdx.z) * 72) + 56))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 222))], placeholder_shared[(((((int)threadIdx.z) * 72) + 57))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 228))], placeholder_shared[(((((int)threadIdx.z) * 72) + 57))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 234))], placeholder_shared[(((((int)threadIdx.z) * 72) + 57))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 240))], placeholder_shared[(((((int)threadIdx.z) * 72) + 57))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 223))], placeholder_shared[(((((int)threadIdx.z) * 72) + 58))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 229))], placeholder_shared[(((((int)threadIdx.z) * 72) + 58))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 235))], placeholder_shared[(((((int)threadIdx.z) * 72) + 58))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 241))], placeholder_shared[(((((int)threadIdx.z) * 72) + 58))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 224))], placeholder_shared[(((((int)threadIdx.z) * 72) + 59))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 230))], placeholder_shared[(((((int)threadIdx.z) * 72) + 59))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 236))], placeholder_shared[(((((int)threadIdx.z) * 72) + 59))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 242))], placeholder_shared[(((((int)threadIdx.z) * 72) + 59))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 228))], placeholder_shared[(((((int)threadIdx.z) * 72) + 60))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 234))], placeholder_shared[(((((int)threadIdx.z) * 72) + 60))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 240))], placeholder_shared[(((((int)threadIdx.z) * 72) + 60))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 246))], placeholder_shared[(((((int)threadIdx.z) * 72) + 60))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 229))], placeholder_shared[(((((int)threadIdx.z) * 72) + 61))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 235))], placeholder_shared[(((((int)threadIdx.z) * 72) + 61))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 241))], placeholder_shared[(((((int)threadIdx.z) * 72) + 61))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 247))], placeholder_shared[(((((int)threadIdx.z) * 72) + 61))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 230))], placeholder_shared[(((((int)threadIdx.z) * 72) + 62))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 236))], placeholder_shared[(((((int)threadIdx.z) * 72) + 62))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 242))], placeholder_shared[(((((int)threadIdx.z) * 72) + 62))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 248))], placeholder_shared[(((((int)threadIdx.z) * 72) + 62))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 252))], placeholder_shared[(((((int)threadIdx.z) * 72) + 63))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 258))], placeholder_shared[(((((int)threadIdx.z) * 72) + 63))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 264))], placeholder_shared[(((((int)threadIdx.z) * 72) + 63))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 270))], placeholder_shared[(((((int)threadIdx.z) * 72) + 63))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 253))], placeholder_shared[(((((int)threadIdx.z) * 72) + 64))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 259))], placeholder_shared[(((((int)threadIdx.z) * 72) + 64))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 265))], placeholder_shared[(((((int)threadIdx.z) * 72) + 64))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 271))], placeholder_shared[(((((int)threadIdx.z) * 72) + 64))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 254))], placeholder_shared[(((((int)threadIdx.z) * 72) + 65))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 260))], placeholder_shared[(((((int)threadIdx.z) * 72) + 65))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 266))], placeholder_shared[(((((int)threadIdx.z) * 72) + 65))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 272))], placeholder_shared[(((((int)threadIdx.z) * 72) + 65))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 258))], placeholder_shared[(((((int)threadIdx.z) * 72) + 66))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 264))], placeholder_shared[(((((int)threadIdx.z) * 72) + 66))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 270))], placeholder_shared[(((((int)threadIdx.z) * 72) + 66))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 276))], placeholder_shared[(((((int)threadIdx.z) * 72) + 66))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 259))], placeholder_shared[(((((int)threadIdx.z) * 72) + 67))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 265))], placeholder_shared[(((((int)threadIdx.z) * 72) + 67))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 271))], placeholder_shared[(((((int)threadIdx.z) * 72) + 67))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 277))], placeholder_shared[(((((int)threadIdx.z) * 72) + 67))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 260))], placeholder_shared[(((((int)threadIdx.z) * 72) + 68))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 266))], placeholder_shared[(((((int)threadIdx.z) * 72) + 68))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 272))], placeholder_shared[(((((int)threadIdx.z) * 72) + 68))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 278))], placeholder_shared[(((((int)threadIdx.z) * 72) + 68))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 264))], placeholder_shared[(((((int)threadIdx.z) * 72) + 69))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 270))], placeholder_shared[(((((int)threadIdx.z) * 72) + 69))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 276))], placeholder_shared[(((((int)threadIdx.z) * 72) + 69))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 282))], placeholder_shared[(((((int)threadIdx.z) * 72) + 69))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 265))], placeholder_shared[(((((int)threadIdx.z) * 72) + 70))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 271))], placeholder_shared[(((((int)threadIdx.z) * 72) + 70))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 277))], placeholder_shared[(((((int)threadIdx.z) * 72) + 70))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 283))], placeholder_shared[(((((int)threadIdx.z) * 72) + 70))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 266))], placeholder_shared[(((((int)threadIdx.z) * 72) + 71))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 272))], placeholder_shared[(((((int)threadIdx.z) * 72) + 71))], compute[(1)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 278))], placeholder_shared[(((((int)threadIdx.z) * 72) + 71))], compute[(2)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 284))], placeholder_shared[(((((int)threadIdx.z) * 72) + 71))], compute[(3)]);\n }\n T_relu[((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + ((int)threadIdx.x)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[(((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + ((int)threadIdx.x)) + 4))] = max((compute[(1)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[(((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + ((int)threadIdx.x)) + 8))] = max((compute[(2)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[(((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + ((int)threadIdx.x)) + 12))] = max((compute[(3)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_1_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[2];\n __shared__ float pad_temp_shared[64];\n __shared__ float placeholder_shared[256];\n compute[(0)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 128; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)))] = placeholder[(((((rc_outer * 64) + (((int)threadIdx.z) * 8)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 1))] = placeholder[((((((rc_outer * 64) + (((int)threadIdx.z) * 8)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 1))];\n placeholder_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)))] = placeholder1[((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 4096)) + (((int)threadIdx.y) * 2048)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 4096)) + (((int)threadIdx.y) * 2048)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 4096)) + (((int)threadIdx.y) * 2048)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 4096)) + (((int)threadIdx.y) * 2048)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 3))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 4096)) + (((int)threadIdx.y) * 2048)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 4))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 4096)) + (((int)threadIdx.y) * 2048)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 5))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 4096)) + (((int)threadIdx.y) * 2048)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 6))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 4096)) + (((int)threadIdx.y) * 2048)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 7))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 2) + ((int)threadIdx.x)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 2) + ((int)threadIdx.x)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 128))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 129))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 130))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 131))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 132))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 133))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 134))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 135))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 136))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 137))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 138))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 139))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 140))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 141))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 142))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 143))], compute[(1)]);\n }\n T_relu[(((((((int)task_idx.z) * 64) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 16) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[((((((((int)task_idx.z) * 64) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)) + 32))] = max((compute[(1)] + placeholder2[((((((int)task_idx.z) * 16) + ((int)threadIdx.z)) + 8))]), 0.000000e+00f);\n}\n\n__device__ void fused_add_nn_relu_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ T_relu, float* __restrict__ placeholder, float* __restrict__ placeholder1){\n T_relu[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] = max((placeholder[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] + placeholder1[(((((int)task_idx.x) * 64) + (((int)threadIdx.x) >> 2)))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_softmax_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ T_softmax_norm){\n float normal_reduce_temp0[1];\n float red_buf0[1];\n float T_softmax_exp[16];\n float normal_reduce_temp01[1];\n float red_buf01[1];\n normal_reduce_temp0[(0)] = -3.402823e+38f;\n for (int k_inner = 0; k_inner < 16; ++k_inner) {\n if (((((int)threadIdx.x) * 16) + k_inner) < 1000) {\n normal_reduce_temp0[(0)] = max(normal_reduce_temp0[(0)], placeholder[(((((int)threadIdx.x) * 16) + k_inner))]);\n }\n }\n unsigned int mask[1];\n float t0[1];\n red_buf0[(0)] = normal_reduce_temp0[(0)];\n ((int*)mask)[(0)] = 0;\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 32) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 32)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = max(red_buf0[(0)], t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 16) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 16)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = max(red_buf0[(0)], t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 8) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 8)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = max(red_buf0[(0)], t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 4) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 4)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = max(red_buf0[(0)], t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 2) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 2)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = max(red_buf0[(0)], t0[(0)]);\n t0[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 1) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 1)) << 2), red_buf0[(0)]);\n red_buf0[(0)] = max(red_buf0[(0)], t0[(0)]);\n red_buf0[(0)] = __hip_ds_bpermute(((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & (~63)) << 2), red_buf0[(0)]);\n for (int i1_inner_outer = 0; i1_inner_outer < 4; ++i1_inner_outer) {\n for (int i1_inner_inner_s = 0; i1_inner_inner_s < 4; ++i1_inner_inner_s) {\n if ((((((int)threadIdx.x) * 16) + (i1_inner_outer * 4)) + i1_inner_inner_s) < 1000) {\n T_softmax_exp[(((i1_inner_outer * 4) + i1_inner_inner_s))] = __ocml_exp_f32((placeholder[((((((int)threadIdx.x) * 16) + (i1_inner_outer * 4)) + i1_inner_inner_s))] - red_buf0[(0)]));\n }\n }\n }\n normal_reduce_temp01[(0)] = 0.000000e+00f;\n for (int k_inner1 = 0; k_inner1 < 16; ++k_inner1) {\n if (((((int)threadIdx.x) * 16) + k_inner1) < 1000) {\n normal_reduce_temp01[(0)] = (normal_reduce_temp01[(0)] + __hip_ds_bpermute(((((int)threadIdx.x) + (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & (~63))) << 2), T_softmax_exp[(k_inner1)]));\n }\n }\n unsigned int mask1[1];\n float t01[1];\n red_buf01[(0)] = normal_reduce_temp01[(0)];\n ((int*)mask1)[(0)] = 0;\n t01[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 32) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 32)) << 2), red_buf01[(0)]);\n red_buf01[(0)] = (red_buf01[(0)] + t01[(0)]);\n t01[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 16) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 16)) << 2), red_buf01[(0)]);\n red_buf01[(0)] = (red_buf01[(0)] + t01[(0)]);\n t01[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 8) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 8)) << 2), red_buf01[(0)]);\n red_buf01[(0)] = (red_buf01[(0)] + t01[(0)]);\n t01[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 4) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 4)) << 2), red_buf01[(0)]);\n red_buf01[(0)] = (red_buf01[(0)] + t01[(0)]);\n t01[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 2) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 2)) << 2), red_buf01[(0)]);\n red_buf01[(0)] = (red_buf01[(0)] + t01[(0)]);\n t01[(0)] = __hip_ds_bpermute((((((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & 63) + 1) >= 64) ? __mbcnt_hi(-1, __mbcnt_lo(-1, 0)) : (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) + 1)) << 2), red_buf01[(0)]);\n red_buf01[(0)] = (red_buf01[(0)] + t01[(0)]);\n red_buf01[(0)] = __hip_ds_bpermute(((__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & (~63)) << 2), red_buf01[(0)]);\n for (int i1_inner_outer1 = 0; i1_inner_outer1 < 4; ++i1_inner_outer1) {\n for (int i1_inner_inner_s1 = 0; i1_inner_inner_s1 < 4; ++i1_inner_inner_s1) {\n if ((((((int)threadIdx.x) * 16) + (i1_inner_outer1 * 4)) + i1_inner_inner_s1) < 1000) {\n T_softmax_norm[((((((int)threadIdx.x) * 16) + (i1_inner_outer1 * 4)) + i1_inner_inner_s1))] = (__hip_ds_bpermute(((((int)threadIdx.x) + (__mbcnt_hi(-1, __mbcnt_lo(-1, 0)) & (~63))) << 2), T_softmax_exp[(((i1_inner_outer1 * 4) + i1_inner_inner_s1))]) \/ red_buf01[(0)]);\n }\n }\n }\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_7_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[1];\n __shared__ float pad_temp_shared[2048];\n __shared__ float placeholder_shared[2048];\n compute[(0)] = 0.000000e+00f;\n pad_temp_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder[(((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 3))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder[(((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder1[(((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))], placeholder_shared[((((int)threadIdx.z) * 128))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 128) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 128) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 128) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 128) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))], placeholder_shared[(((((int)threadIdx.z) * 128) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))], placeholder_shared[(((((int)threadIdx.z) * 128) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))], placeholder_shared[(((((int)threadIdx.z) * 128) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 128) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))], placeholder_shared[(((((int)threadIdx.z) * 128) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))], placeholder_shared[(((((int)threadIdx.z) * 128) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))], placeholder_shared[(((((int)threadIdx.z) * 128) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 128) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))], placeholder_shared[(((((int)threadIdx.z) * 128) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))], placeholder_shared[(((((int)threadIdx.z) * 128) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))], placeholder_shared[(((((int)threadIdx.z) * 128) + 15))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 128) + 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))], placeholder_shared[(((((int)threadIdx.z) * 128) + 17))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))], placeholder_shared[(((((int)threadIdx.z) * 128) + 18))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))], placeholder_shared[(((((int)threadIdx.z) * 128) + 19))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 128) + 20))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))], placeholder_shared[(((((int)threadIdx.z) * 128) + 21))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))], placeholder_shared[(((((int)threadIdx.z) * 128) + 22))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))], placeholder_shared[(((((int)threadIdx.z) * 128) + 23))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 128) + 24))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))], placeholder_shared[(((((int)threadIdx.z) * 128) + 25))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))], placeholder_shared[(((((int)threadIdx.z) * 128) + 26))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))], placeholder_shared[(((((int)threadIdx.z) * 128) + 27))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 128) + 28))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))], placeholder_shared[(((((int)threadIdx.z) * 128) + 29))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))], placeholder_shared[(((((int)threadIdx.z) * 128) + 30))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))], placeholder_shared[(((((int)threadIdx.z) * 128) + 31))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 128) + 32))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))], placeholder_shared[(((((int)threadIdx.z) * 128) + 33))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))], placeholder_shared[(((((int)threadIdx.z) * 128) + 34))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))], placeholder_shared[(((((int)threadIdx.z) * 128) + 35))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 128) + 36))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))], placeholder_shared[(((((int)threadIdx.z) * 128) + 37))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))], placeholder_shared[(((((int)threadIdx.z) * 128) + 38))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))], placeholder_shared[(((((int)threadIdx.z) * 128) + 39))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 128) + 40))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))], placeholder_shared[(((((int)threadIdx.z) * 128) + 41))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))], placeholder_shared[(((((int)threadIdx.z) * 128) + 42))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 128) + 43))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 128) + 44))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))], placeholder_shared[(((((int)threadIdx.z) * 128) + 45))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))], placeholder_shared[(((((int)threadIdx.z) * 128) + 46))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))], placeholder_shared[(((((int)threadIdx.z) * 128) + 47))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 128) + 48))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))], placeholder_shared[(((((int)threadIdx.z) * 128) + 49))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))], placeholder_shared[(((((int)threadIdx.z) * 128) + 50))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))], placeholder_shared[(((((int)threadIdx.z) * 128) + 51))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 128) + 52))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))], placeholder_shared[(((((int)threadIdx.z) * 128) + 53))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))], placeholder_shared[(((((int)threadIdx.z) * 128) + 54))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))], placeholder_shared[(((((int)threadIdx.z) * 128) + 55))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 128) + 56))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))], placeholder_shared[(((((int)threadIdx.z) * 128) + 57))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))], placeholder_shared[(((((int)threadIdx.z) * 128) + 58))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))], placeholder_shared[(((((int)threadIdx.z) * 128) + 59))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 128) + 60))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))], placeholder_shared[(((((int)threadIdx.z) * 128) + 61))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))], placeholder_shared[(((((int)threadIdx.z) * 128) + 62))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))], placeholder_shared[(((((int)threadIdx.z) * 128) + 63))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))], placeholder_shared[(((((int)threadIdx.z) * 128) + 64))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))], placeholder_shared[(((((int)threadIdx.z) * 128) + 65))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))], placeholder_shared[(((((int)threadIdx.z) * 128) + 66))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))], placeholder_shared[(((((int)threadIdx.z) * 128) + 67))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))], placeholder_shared[(((((int)threadIdx.z) * 128) + 68))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))], placeholder_shared[(((((int)threadIdx.z) * 128) + 69))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))], placeholder_shared[(((((int)threadIdx.z) * 128) + 70))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))], placeholder_shared[(((((int)threadIdx.z) * 128) + 71))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))], placeholder_shared[(((((int)threadIdx.z) * 128) + 72))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))], placeholder_shared[(((((int)threadIdx.z) * 128) + 73))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))], placeholder_shared[(((((int)threadIdx.z) * 128) + 74))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))], placeholder_shared[(((((int)threadIdx.z) * 128) + 75))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))], placeholder_shared[(((((int)threadIdx.z) * 128) + 76))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))], placeholder_shared[(((((int)threadIdx.z) * 128) + 77))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))], placeholder_shared[(((((int)threadIdx.z) * 128) + 78))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))], placeholder_shared[(((((int)threadIdx.z) * 128) + 79))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))], placeholder_shared[(((((int)threadIdx.z) * 128) + 80))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))], placeholder_shared[(((((int)threadIdx.z) * 128) + 81))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))], placeholder_shared[(((((int)threadIdx.z) * 128) + 82))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))], placeholder_shared[(((((int)threadIdx.z) * 128) + 83))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))], placeholder_shared[(((((int)threadIdx.z) * 128) + 84))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))], placeholder_shared[(((((int)threadIdx.z) * 128) + 85))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))], placeholder_shared[(((((int)threadIdx.z) * 128) + 86))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))], placeholder_shared[(((((int)threadIdx.z) * 128) + 87))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))], placeholder_shared[(((((int)threadIdx.z) * 128) + 88))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))], placeholder_shared[(((((int)threadIdx.z) * 128) + 89))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))], placeholder_shared[(((((int)threadIdx.z) * 128) + 90))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))], placeholder_shared[(((((int)threadIdx.z) * 128) + 91))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))], placeholder_shared[(((((int)threadIdx.z) * 128) + 92))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))], placeholder_shared[(((((int)threadIdx.z) * 128) + 93))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))], placeholder_shared[(((((int)threadIdx.z) * 128) + 94))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))], placeholder_shared[(((((int)threadIdx.z) * 128) + 95))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))], placeholder_shared[(((((int)threadIdx.z) * 128) + 96))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))], placeholder_shared[(((((int)threadIdx.z) * 128) + 97))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))], placeholder_shared[(((((int)threadIdx.z) * 128) + 98))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))], placeholder_shared[(((((int)threadIdx.z) * 128) + 99))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))], placeholder_shared[(((((int)threadIdx.z) * 128) + 100))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))], placeholder_shared[(((((int)threadIdx.z) * 128) + 101))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))], placeholder_shared[(((((int)threadIdx.z) * 128) + 102))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))], placeholder_shared[(((((int)threadIdx.z) * 128) + 103))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))], placeholder_shared[(((((int)threadIdx.z) * 128) + 104))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))], placeholder_shared[(((((int)threadIdx.z) * 128) + 105))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))], placeholder_shared[(((((int)threadIdx.z) * 128) + 106))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))], placeholder_shared[(((((int)threadIdx.z) * 128) + 107))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))], placeholder_shared[(((((int)threadIdx.z) * 128) + 108))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))], placeholder_shared[(((((int)threadIdx.z) * 128) + 109))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))], placeholder_shared[(((((int)threadIdx.z) * 128) + 110))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))], placeholder_shared[(((((int)threadIdx.z) * 128) + 111))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))], placeholder_shared[(((((int)threadIdx.z) * 128) + 112))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))], placeholder_shared[(((((int)threadIdx.z) * 128) + 113))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))], placeholder_shared[(((((int)threadIdx.z) * 128) + 114))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))], placeholder_shared[(((((int)threadIdx.z) * 128) + 115))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))], placeholder_shared[(((((int)threadIdx.z) * 128) + 116))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))], placeholder_shared[(((((int)threadIdx.z) * 128) + 117))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))], placeholder_shared[(((((int)threadIdx.z) * 128) + 118))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))], placeholder_shared[(((((int)threadIdx.z) * 128) + 119))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))], placeholder_shared[(((((int)threadIdx.z) * 128) + 120))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))], placeholder_shared[(((((int)threadIdx.z) * 128) + 121))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))], placeholder_shared[(((((int)threadIdx.z) * 128) + 122))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))], placeholder_shared[(((((int)threadIdx.z) * 128) + 123))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))], placeholder_shared[(((((int)threadIdx.z) * 128) + 124))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))], placeholder_shared[(((((int)threadIdx.z) * 128) + 125))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))], placeholder_shared[(((((int)threadIdx.z) * 128) + 126))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))], placeholder_shared[(((((int)threadIdx.z) * 128) + 127))], compute[(0)]);\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 8192))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 8193))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 8194))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 8195))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 8192))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 8193))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 8194))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 8195))];\n placeholder_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 128))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 129))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 130))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 131))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 132))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 133))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 134))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 135))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))], placeholder_shared[((((int)threadIdx.z) * 128))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 128) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 128) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 128) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 128) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))], placeholder_shared[(((((int)threadIdx.z) * 128) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))], placeholder_shared[(((((int)threadIdx.z) * 128) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))], placeholder_shared[(((((int)threadIdx.z) * 128) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 128) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))], placeholder_shared[(((((int)threadIdx.z) * 128) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))], placeholder_shared[(((((int)threadIdx.z) * 128) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))], placeholder_shared[(((((int)threadIdx.z) * 128) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 128) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))], placeholder_shared[(((((int)threadIdx.z) * 128) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))], placeholder_shared[(((((int)threadIdx.z) * 128) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))], placeholder_shared[(((((int)threadIdx.z) * 128) + 15))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 128) + 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))], placeholder_shared[(((((int)threadIdx.z) * 128) + 17))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))], placeholder_shared[(((((int)threadIdx.z) * 128) + 18))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))], placeholder_shared[(((((int)threadIdx.z) * 128) + 19))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 128) + 20))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))], placeholder_shared[(((((int)threadIdx.z) * 128) + 21))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))], placeholder_shared[(((((int)threadIdx.z) * 128) + 22))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))], placeholder_shared[(((((int)threadIdx.z) * 128) + 23))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 128) + 24))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))], placeholder_shared[(((((int)threadIdx.z) * 128) + 25))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))], placeholder_shared[(((((int)threadIdx.z) * 128) + 26))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))], placeholder_shared[(((((int)threadIdx.z) * 128) + 27))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 128) + 28))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))], placeholder_shared[(((((int)threadIdx.z) * 128) + 29))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))], placeholder_shared[(((((int)threadIdx.z) * 128) + 30))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))], placeholder_shared[(((((int)threadIdx.z) * 128) + 31))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 128) + 32))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))], placeholder_shared[(((((int)threadIdx.z) * 128) + 33))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))], placeholder_shared[(((((int)threadIdx.z) * 128) + 34))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))], placeholder_shared[(((((int)threadIdx.z) * 128) + 35))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 128) + 36))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))], placeholder_shared[(((((int)threadIdx.z) * 128) + 37))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))], placeholder_shared[(((((int)threadIdx.z) * 128) + 38))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))], placeholder_shared[(((((int)threadIdx.z) * 128) + 39))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 128) + 40))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))], placeholder_shared[(((((int)threadIdx.z) * 128) + 41))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))], placeholder_shared[(((((int)threadIdx.z) * 128) + 42))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 128) + 43))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 128) + 44))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))], placeholder_shared[(((((int)threadIdx.z) * 128) + 45))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))], placeholder_shared[(((((int)threadIdx.z) * 128) + 46))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))], placeholder_shared[(((((int)threadIdx.z) * 128) + 47))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 128) + 48))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))], placeholder_shared[(((((int)threadIdx.z) * 128) + 49))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))], placeholder_shared[(((((int)threadIdx.z) * 128) + 50))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))], placeholder_shared[(((((int)threadIdx.z) * 128) + 51))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 128) + 52))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))], placeholder_shared[(((((int)threadIdx.z) * 128) + 53))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))], placeholder_shared[(((((int)threadIdx.z) * 128) + 54))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))], placeholder_shared[(((((int)threadIdx.z) * 128) + 55))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 128) + 56))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))], placeholder_shared[(((((int)threadIdx.z) * 128) + 57))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))], placeholder_shared[(((((int)threadIdx.z) * 128) + 58))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))], placeholder_shared[(((((int)threadIdx.z) * 128) + 59))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 128) + 60))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))], placeholder_shared[(((((int)threadIdx.z) * 128) + 61))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))], placeholder_shared[(((((int)threadIdx.z) * 128) + 62))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))], placeholder_shared[(((((int)threadIdx.z) * 128) + 63))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))], placeholder_shared[(((((int)threadIdx.z) * 128) + 64))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))], placeholder_shared[(((((int)threadIdx.z) * 128) + 65))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))], placeholder_shared[(((((int)threadIdx.z) * 128) + 66))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))], placeholder_shared[(((((int)threadIdx.z) * 128) + 67))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))], placeholder_shared[(((((int)threadIdx.z) * 128) + 68))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))], placeholder_shared[(((((int)threadIdx.z) * 128) + 69))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))], placeholder_shared[(((((int)threadIdx.z) * 128) + 70))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))], placeholder_shared[(((((int)threadIdx.z) * 128) + 71))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))], placeholder_shared[(((((int)threadIdx.z) * 128) + 72))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))], placeholder_shared[(((((int)threadIdx.z) * 128) + 73))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))], placeholder_shared[(((((int)threadIdx.z) * 128) + 74))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))], placeholder_shared[(((((int)threadIdx.z) * 128) + 75))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))], placeholder_shared[(((((int)threadIdx.z) * 128) + 76))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))], placeholder_shared[(((((int)threadIdx.z) * 128) + 77))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))], placeholder_shared[(((((int)threadIdx.z) * 128) + 78))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))], placeholder_shared[(((((int)threadIdx.z) * 128) + 79))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))], placeholder_shared[(((((int)threadIdx.z) * 128) + 80))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))], placeholder_shared[(((((int)threadIdx.z) * 128) + 81))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))], placeholder_shared[(((((int)threadIdx.z) * 128) + 82))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))], placeholder_shared[(((((int)threadIdx.z) * 128) + 83))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))], placeholder_shared[(((((int)threadIdx.z) * 128) + 84))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))], placeholder_shared[(((((int)threadIdx.z) * 128) + 85))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))], placeholder_shared[(((((int)threadIdx.z) * 128) + 86))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))], placeholder_shared[(((((int)threadIdx.z) * 128) + 87))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))], placeholder_shared[(((((int)threadIdx.z) * 128) + 88))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))], placeholder_shared[(((((int)threadIdx.z) * 128) + 89))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))], placeholder_shared[(((((int)threadIdx.z) * 128) + 90))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))], placeholder_shared[(((((int)threadIdx.z) * 128) + 91))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))], placeholder_shared[(((((int)threadIdx.z) * 128) + 92))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))], placeholder_shared[(((((int)threadIdx.z) * 128) + 93))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))], placeholder_shared[(((((int)threadIdx.z) * 128) + 94))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))], placeholder_shared[(((((int)threadIdx.z) * 128) + 95))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))], placeholder_shared[(((((int)threadIdx.z) * 128) + 96))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))], placeholder_shared[(((((int)threadIdx.z) * 128) + 97))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))], placeholder_shared[(((((int)threadIdx.z) * 128) + 98))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))], placeholder_shared[(((((int)threadIdx.z) * 128) + 99))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))], placeholder_shared[(((((int)threadIdx.z) * 128) + 100))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))], placeholder_shared[(((((int)threadIdx.z) * 128) + 101))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))], placeholder_shared[(((((int)threadIdx.z) * 128) + 102))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))], placeholder_shared[(((((int)threadIdx.z) * 128) + 103))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))], placeholder_shared[(((((int)threadIdx.z) * 128) + 104))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))], placeholder_shared[(((((int)threadIdx.z) * 128) + 105))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))], placeholder_shared[(((((int)threadIdx.z) * 128) + 106))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))], placeholder_shared[(((((int)threadIdx.z) * 128) + 107))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))], placeholder_shared[(((((int)threadIdx.z) * 128) + 108))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))], placeholder_shared[(((((int)threadIdx.z) * 128) + 109))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))], placeholder_shared[(((((int)threadIdx.z) * 128) + 110))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))], placeholder_shared[(((((int)threadIdx.z) * 128) + 111))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))], placeholder_shared[(((((int)threadIdx.z) * 128) + 112))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))], placeholder_shared[(((((int)threadIdx.z) * 128) + 113))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))], placeholder_shared[(((((int)threadIdx.z) * 128) + 114))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))], placeholder_shared[(((((int)threadIdx.z) * 128) + 115))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))], placeholder_shared[(((((int)threadIdx.z) * 128) + 116))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))], placeholder_shared[(((((int)threadIdx.z) * 128) + 117))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))], placeholder_shared[(((((int)threadIdx.z) * 128) + 118))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))], placeholder_shared[(((((int)threadIdx.z) * 128) + 119))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))], placeholder_shared[(((((int)threadIdx.z) * 128) + 120))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))], placeholder_shared[(((((int)threadIdx.z) * 128) + 121))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))], placeholder_shared[(((((int)threadIdx.z) * 128) + 122))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))], placeholder_shared[(((((int)threadIdx.z) * 128) + 123))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))], placeholder_shared[(((((int)threadIdx.z) * 128) + 124))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))], placeholder_shared[(((((int)threadIdx.z) * 128) + 125))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))], placeholder_shared[(((((int)threadIdx.z) * 128) + 126))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))], placeholder_shared[(((((int)threadIdx.z) * 128) + 127))], compute[(0)]);\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 16384))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 16385))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 16386))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 16387))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 16384))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 16385))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 16386))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 16387))];\n placeholder_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 256))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 257))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 258))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 259))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 260))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 261))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 262))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 263))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))], placeholder_shared[((((int)threadIdx.z) * 128))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 128) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 128) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 128) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 128) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))], placeholder_shared[(((((int)threadIdx.z) * 128) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))], placeholder_shared[(((((int)threadIdx.z) * 128) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))], placeholder_shared[(((((int)threadIdx.z) * 128) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 128) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))], placeholder_shared[(((((int)threadIdx.z) * 128) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))], placeholder_shared[(((((int)threadIdx.z) * 128) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))], placeholder_shared[(((((int)threadIdx.z) * 128) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 128) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))], placeholder_shared[(((((int)threadIdx.z) * 128) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))], placeholder_shared[(((((int)threadIdx.z) * 128) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))], placeholder_shared[(((((int)threadIdx.z) * 128) + 15))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 128) + 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))], placeholder_shared[(((((int)threadIdx.z) * 128) + 17))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))], placeholder_shared[(((((int)threadIdx.z) * 128) + 18))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))], placeholder_shared[(((((int)threadIdx.z) * 128) + 19))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 128) + 20))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))], placeholder_shared[(((((int)threadIdx.z) * 128) + 21))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))], placeholder_shared[(((((int)threadIdx.z) * 128) + 22))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))], placeholder_shared[(((((int)threadIdx.z) * 128) + 23))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 128) + 24))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))], placeholder_shared[(((((int)threadIdx.z) * 128) + 25))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))], placeholder_shared[(((((int)threadIdx.z) * 128) + 26))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))], placeholder_shared[(((((int)threadIdx.z) * 128) + 27))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 128) + 28))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))], placeholder_shared[(((((int)threadIdx.z) * 128) + 29))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))], placeholder_shared[(((((int)threadIdx.z) * 128) + 30))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))], placeholder_shared[(((((int)threadIdx.z) * 128) + 31))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 128) + 32))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))], placeholder_shared[(((((int)threadIdx.z) * 128) + 33))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))], placeholder_shared[(((((int)threadIdx.z) * 128) + 34))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))], placeholder_shared[(((((int)threadIdx.z) * 128) + 35))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 128) + 36))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))], placeholder_shared[(((((int)threadIdx.z) * 128) + 37))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))], placeholder_shared[(((((int)threadIdx.z) * 128) + 38))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))], placeholder_shared[(((((int)threadIdx.z) * 128) + 39))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 128) + 40))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))], placeholder_shared[(((((int)threadIdx.z) * 128) + 41))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))], placeholder_shared[(((((int)threadIdx.z) * 128) + 42))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 128) + 43))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 128) + 44))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))], placeholder_shared[(((((int)threadIdx.z) * 128) + 45))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))], placeholder_shared[(((((int)threadIdx.z) * 128) + 46))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))], placeholder_shared[(((((int)threadIdx.z) * 128) + 47))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 128) + 48))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))], placeholder_shared[(((((int)threadIdx.z) * 128) + 49))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))], placeholder_shared[(((((int)threadIdx.z) * 128) + 50))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))], placeholder_shared[(((((int)threadIdx.z) * 128) + 51))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 128) + 52))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))], placeholder_shared[(((((int)threadIdx.z) * 128) + 53))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))], placeholder_shared[(((((int)threadIdx.z) * 128) + 54))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))], placeholder_shared[(((((int)threadIdx.z) * 128) + 55))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 128) + 56))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))], placeholder_shared[(((((int)threadIdx.z) * 128) + 57))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))], placeholder_shared[(((((int)threadIdx.z) * 128) + 58))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))], placeholder_shared[(((((int)threadIdx.z) * 128) + 59))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 128) + 60))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))], placeholder_shared[(((((int)threadIdx.z) * 128) + 61))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))], placeholder_shared[(((((int)threadIdx.z) * 128) + 62))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))], placeholder_shared[(((((int)threadIdx.z) * 128) + 63))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))], placeholder_shared[(((((int)threadIdx.z) * 128) + 64))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))], placeholder_shared[(((((int)threadIdx.z) * 128) + 65))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))], placeholder_shared[(((((int)threadIdx.z) * 128) + 66))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))], placeholder_shared[(((((int)threadIdx.z) * 128) + 67))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))], placeholder_shared[(((((int)threadIdx.z) * 128) + 68))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))], placeholder_shared[(((((int)threadIdx.z) * 128) + 69))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))], placeholder_shared[(((((int)threadIdx.z) * 128) + 70))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))], placeholder_shared[(((((int)threadIdx.z) * 128) + 71))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))], placeholder_shared[(((((int)threadIdx.z) * 128) + 72))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))], placeholder_shared[(((((int)threadIdx.z) * 128) + 73))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))], placeholder_shared[(((((int)threadIdx.z) * 128) + 74))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))], placeholder_shared[(((((int)threadIdx.z) * 128) + 75))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))], placeholder_shared[(((((int)threadIdx.z) * 128) + 76))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))], placeholder_shared[(((((int)threadIdx.z) * 128) + 77))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))], placeholder_shared[(((((int)threadIdx.z) * 128) + 78))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))], placeholder_shared[(((((int)threadIdx.z) * 128) + 79))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))], placeholder_shared[(((((int)threadIdx.z) * 128) + 80))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))], placeholder_shared[(((((int)threadIdx.z) * 128) + 81))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))], placeholder_shared[(((((int)threadIdx.z) * 128) + 82))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))], placeholder_shared[(((((int)threadIdx.z) * 128) + 83))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))], placeholder_shared[(((((int)threadIdx.z) * 128) + 84))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))], placeholder_shared[(((((int)threadIdx.z) * 128) + 85))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))], placeholder_shared[(((((int)threadIdx.z) * 128) + 86))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))], placeholder_shared[(((((int)threadIdx.z) * 128) + 87))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))], placeholder_shared[(((((int)threadIdx.z) * 128) + 88))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))], placeholder_shared[(((((int)threadIdx.z) * 128) + 89))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))], placeholder_shared[(((((int)threadIdx.z) * 128) + 90))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))], placeholder_shared[(((((int)threadIdx.z) * 128) + 91))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))], placeholder_shared[(((((int)threadIdx.z) * 128) + 92))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))], placeholder_shared[(((((int)threadIdx.z) * 128) + 93))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))], placeholder_shared[(((((int)threadIdx.z) * 128) + 94))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))], placeholder_shared[(((((int)threadIdx.z) * 128) + 95))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))], placeholder_shared[(((((int)threadIdx.z) * 128) + 96))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))], placeholder_shared[(((((int)threadIdx.z) * 128) + 97))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))], placeholder_shared[(((((int)threadIdx.z) * 128) + 98))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))], placeholder_shared[(((((int)threadIdx.z) * 128) + 99))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))], placeholder_shared[(((((int)threadIdx.z) * 128) + 100))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))], placeholder_shared[(((((int)threadIdx.z) * 128) + 101))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))], placeholder_shared[(((((int)threadIdx.z) * 128) + 102))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))], placeholder_shared[(((((int)threadIdx.z) * 128) + 103))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))], placeholder_shared[(((((int)threadIdx.z) * 128) + 104))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))], placeholder_shared[(((((int)threadIdx.z) * 128) + 105))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))], placeholder_shared[(((((int)threadIdx.z) * 128) + 106))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))], placeholder_shared[(((((int)threadIdx.z) * 128) + 107))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))], placeholder_shared[(((((int)threadIdx.z) * 128) + 108))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))], placeholder_shared[(((((int)threadIdx.z) * 128) + 109))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))], placeholder_shared[(((((int)threadIdx.z) * 128) + 110))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))], placeholder_shared[(((((int)threadIdx.z) * 128) + 111))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))], placeholder_shared[(((((int)threadIdx.z) * 128) + 112))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))], placeholder_shared[(((((int)threadIdx.z) * 128) + 113))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))], placeholder_shared[(((((int)threadIdx.z) * 128) + 114))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))], placeholder_shared[(((((int)threadIdx.z) * 128) + 115))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))], placeholder_shared[(((((int)threadIdx.z) * 128) + 116))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))], placeholder_shared[(((((int)threadIdx.z) * 128) + 117))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))], placeholder_shared[(((((int)threadIdx.z) * 128) + 118))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))], placeholder_shared[(((((int)threadIdx.z) * 128) + 119))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))], placeholder_shared[(((((int)threadIdx.z) * 128) + 120))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))], placeholder_shared[(((((int)threadIdx.z) * 128) + 121))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))], placeholder_shared[(((((int)threadIdx.z) * 128) + 122))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))], placeholder_shared[(((((int)threadIdx.z) * 128) + 123))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))], placeholder_shared[(((((int)threadIdx.z) * 128) + 124))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))], placeholder_shared[(((((int)threadIdx.z) * 128) + 125))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))], placeholder_shared[(((((int)threadIdx.z) * 128) + 126))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))], placeholder_shared[(((((int)threadIdx.z) * 128) + 127))], compute[(0)]);\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 24576))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 24577))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 24578))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 24579))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 24576))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 24577))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 24578))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 24579))];\n placeholder_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 384))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 385))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 386))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 387))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 388))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 389))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 390))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 512)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 391))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))], placeholder_shared[((((int)threadIdx.z) * 128))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 128) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 128) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 128) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 128) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))], placeholder_shared[(((((int)threadIdx.z) * 128) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))], placeholder_shared[(((((int)threadIdx.z) * 128) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))], placeholder_shared[(((((int)threadIdx.z) * 128) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 128) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))], placeholder_shared[(((((int)threadIdx.z) * 128) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))], placeholder_shared[(((((int)threadIdx.z) * 128) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))], placeholder_shared[(((((int)threadIdx.z) * 128) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 128) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))], placeholder_shared[(((((int)threadIdx.z) * 128) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))], placeholder_shared[(((((int)threadIdx.z) * 128) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))], placeholder_shared[(((((int)threadIdx.z) * 128) + 15))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 128) + 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))], placeholder_shared[(((((int)threadIdx.z) * 128) + 17))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))], placeholder_shared[(((((int)threadIdx.z) * 128) + 18))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))], placeholder_shared[(((((int)threadIdx.z) * 128) + 19))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 128) + 20))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))], placeholder_shared[(((((int)threadIdx.z) * 128) + 21))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))], placeholder_shared[(((((int)threadIdx.z) * 128) + 22))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))], placeholder_shared[(((((int)threadIdx.z) * 128) + 23))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 128) + 24))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))], placeholder_shared[(((((int)threadIdx.z) * 128) + 25))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))], placeholder_shared[(((((int)threadIdx.z) * 128) + 26))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))], placeholder_shared[(((((int)threadIdx.z) * 128) + 27))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 128) + 28))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))], placeholder_shared[(((((int)threadIdx.z) * 128) + 29))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))], placeholder_shared[(((((int)threadIdx.z) * 128) + 30))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))], placeholder_shared[(((((int)threadIdx.z) * 128) + 31))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 128) + 32))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))], placeholder_shared[(((((int)threadIdx.z) * 128) + 33))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))], placeholder_shared[(((((int)threadIdx.z) * 128) + 34))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))], placeholder_shared[(((((int)threadIdx.z) * 128) + 35))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 128) + 36))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))], placeholder_shared[(((((int)threadIdx.z) * 128) + 37))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))], placeholder_shared[(((((int)threadIdx.z) * 128) + 38))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))], placeholder_shared[(((((int)threadIdx.z) * 128) + 39))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 128) + 40))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))], placeholder_shared[(((((int)threadIdx.z) * 128) + 41))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))], placeholder_shared[(((((int)threadIdx.z) * 128) + 42))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 128) + 43))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 128) + 44))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))], placeholder_shared[(((((int)threadIdx.z) * 128) + 45))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))], placeholder_shared[(((((int)threadIdx.z) * 128) + 46))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))], placeholder_shared[(((((int)threadIdx.z) * 128) + 47))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 128) + 48))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))], placeholder_shared[(((((int)threadIdx.z) * 128) + 49))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))], placeholder_shared[(((((int)threadIdx.z) * 128) + 50))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))], placeholder_shared[(((((int)threadIdx.z) * 128) + 51))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 128) + 52))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))], placeholder_shared[(((((int)threadIdx.z) * 128) + 53))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))], placeholder_shared[(((((int)threadIdx.z) * 128) + 54))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))], placeholder_shared[(((((int)threadIdx.z) * 128) + 55))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 128) + 56))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))], placeholder_shared[(((((int)threadIdx.z) * 128) + 57))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))], placeholder_shared[(((((int)threadIdx.z) * 128) + 58))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))], placeholder_shared[(((((int)threadIdx.z) * 128) + 59))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 128) + 60))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))], placeholder_shared[(((((int)threadIdx.z) * 128) + 61))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))], placeholder_shared[(((((int)threadIdx.z) * 128) + 62))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))], placeholder_shared[(((((int)threadIdx.z) * 128) + 63))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))], placeholder_shared[(((((int)threadIdx.z) * 128) + 64))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))], placeholder_shared[(((((int)threadIdx.z) * 128) + 65))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))], placeholder_shared[(((((int)threadIdx.z) * 128) + 66))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))], placeholder_shared[(((((int)threadIdx.z) * 128) + 67))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))], placeholder_shared[(((((int)threadIdx.z) * 128) + 68))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))], placeholder_shared[(((((int)threadIdx.z) * 128) + 69))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))], placeholder_shared[(((((int)threadIdx.z) * 128) + 70))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))], placeholder_shared[(((((int)threadIdx.z) * 128) + 71))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))], placeholder_shared[(((((int)threadIdx.z) * 128) + 72))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))], placeholder_shared[(((((int)threadIdx.z) * 128) + 73))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))], placeholder_shared[(((((int)threadIdx.z) * 128) + 74))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))], placeholder_shared[(((((int)threadIdx.z) * 128) + 75))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))], placeholder_shared[(((((int)threadIdx.z) * 128) + 76))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))], placeholder_shared[(((((int)threadIdx.z) * 128) + 77))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))], placeholder_shared[(((((int)threadIdx.z) * 128) + 78))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))], placeholder_shared[(((((int)threadIdx.z) * 128) + 79))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))], placeholder_shared[(((((int)threadIdx.z) * 128) + 80))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))], placeholder_shared[(((((int)threadIdx.z) * 128) + 81))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))], placeholder_shared[(((((int)threadIdx.z) * 128) + 82))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))], placeholder_shared[(((((int)threadIdx.z) * 128) + 83))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))], placeholder_shared[(((((int)threadIdx.z) * 128) + 84))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))], placeholder_shared[(((((int)threadIdx.z) * 128) + 85))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))], placeholder_shared[(((((int)threadIdx.z) * 128) + 86))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))], placeholder_shared[(((((int)threadIdx.z) * 128) + 87))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))], placeholder_shared[(((((int)threadIdx.z) * 128) + 88))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))], placeholder_shared[(((((int)threadIdx.z) * 128) + 89))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))], placeholder_shared[(((((int)threadIdx.z) * 128) + 90))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))], placeholder_shared[(((((int)threadIdx.z) * 128) + 91))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))], placeholder_shared[(((((int)threadIdx.z) * 128) + 92))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))], placeholder_shared[(((((int)threadIdx.z) * 128) + 93))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))], placeholder_shared[(((((int)threadIdx.z) * 128) + 94))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))], placeholder_shared[(((((int)threadIdx.z) * 128) + 95))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))], placeholder_shared[(((((int)threadIdx.z) * 128) + 96))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))], placeholder_shared[(((((int)threadIdx.z) * 128) + 97))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))], placeholder_shared[(((((int)threadIdx.z) * 128) + 98))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))], placeholder_shared[(((((int)threadIdx.z) * 128) + 99))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))], placeholder_shared[(((((int)threadIdx.z) * 128) + 100))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))], placeholder_shared[(((((int)threadIdx.z) * 128) + 101))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))], placeholder_shared[(((((int)threadIdx.z) * 128) + 102))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))], placeholder_shared[(((((int)threadIdx.z) * 128) + 103))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))], placeholder_shared[(((((int)threadIdx.z) * 128) + 104))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))], placeholder_shared[(((((int)threadIdx.z) * 128) + 105))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))], placeholder_shared[(((((int)threadIdx.z) * 128) + 106))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))], placeholder_shared[(((((int)threadIdx.z) * 128) + 107))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))], placeholder_shared[(((((int)threadIdx.z) * 128) + 108))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))], placeholder_shared[(((((int)threadIdx.z) * 128) + 109))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))], placeholder_shared[(((((int)threadIdx.z) * 128) + 110))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))], placeholder_shared[(((((int)threadIdx.z) * 128) + 111))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))], placeholder_shared[(((((int)threadIdx.z) * 128) + 112))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))], placeholder_shared[(((((int)threadIdx.z) * 128) + 113))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))], placeholder_shared[(((((int)threadIdx.z) * 128) + 114))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))], placeholder_shared[(((((int)threadIdx.z) * 128) + 115))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))], placeholder_shared[(((((int)threadIdx.z) * 128) + 116))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))], placeholder_shared[(((((int)threadIdx.z) * 128) + 117))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))], placeholder_shared[(((((int)threadIdx.z) * 128) + 118))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))], placeholder_shared[(((((int)threadIdx.z) * 128) + 119))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))], placeholder_shared[(((((int)threadIdx.z) * 128) + 120))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))], placeholder_shared[(((((int)threadIdx.z) * 128) + 121))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))], placeholder_shared[(((((int)threadIdx.z) * 128) + 122))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))], placeholder_shared[(((((int)threadIdx.z) * 128) + 123))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))], placeholder_shared[(((((int)threadIdx.z) * 128) + 124))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))], placeholder_shared[(((((int)threadIdx.z) * 128) + 125))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))], placeholder_shared[(((((int)threadIdx.z) * 128) + 126))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))], placeholder_shared[(((((int)threadIdx.z) * 128) + 127))], compute[(0)]);\n T_relu[(((((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 64)) + (((int)task_idx.y) * 32)) + (((int)threadIdx.y) * 8)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 16) + ((int)threadIdx.z)))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_3_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_add, float* __restrict__ placeholder2){\n float compute[16];\n __shared__ float pad_temp_shared[1024];\n __shared__ float placeholder_shared[1024];\n compute[(0)] = 0.000000e+00f;\n compute[(4)] = 0.000000e+00f;\n compute[(8)] = 0.000000e+00f;\n compute[(12)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n compute[(5)] = 0.000000e+00f;\n compute[(9)] = 0.000000e+00f;\n compute[(13)] = 0.000000e+00f;\n compute[(2)] = 0.000000e+00f;\n compute[(6)] = 0.000000e+00f;\n compute[(10)] = 0.000000e+00f;\n compute[(14)] = 0.000000e+00f;\n compute[(3)] = 0.000000e+00f;\n compute[(7)] = 0.000000e+00f;\n compute[(11)] = 0.000000e+00f;\n compute[(15)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder[((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder1[((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(15)]);\n }\n T_add[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = (compute[(0)] + placeholder2[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4096))] = (compute[(4)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4096))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8192))] = (compute[(8)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8192))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12288))] = (compute[(12)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12288))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = (compute[(1)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4097))] = (compute[(5)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4097))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8193))] = (compute[(9)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8193))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12289))] = (compute[(13)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12289))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = (compute[(2)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4098))] = (compute[(6)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4098))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8194))] = (compute[(10)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8194))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12290))] = (compute[(14)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12290))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = (compute[(3)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4099))] = (compute[(7)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4099))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8195))] = (compute[(11)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8195))]);\n T_add[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12291))] = (compute[(15)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12291))]);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_5_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[4];\n __shared__ float pad_temp_shared[784];\n __shared__ float placeholder_shared[512];\n compute[(0)] = 0.000000e+00f;\n compute[(2)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n compute[(3)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 32; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)))] = placeholder[((((((rc_outer * 1024) + (((int)threadIdx.z) * 64)) + (((((int)threadIdx.y) * 13) \/ 7) * 8)) + (((int)threadIdx.x) * 8)) + ((((int)threadIdx.y) * 13) % 7)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 1))] = placeholder[((((((rc_outer * 1024) + (((int)threadIdx.z) * 64)) + ((((((int)threadIdx.y) * 13) + 1) \/ 7) * 8)) + (((int)threadIdx.x) * 8)) + (((((int)threadIdx.y) * 13) + 1) % 7)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 2))] = placeholder[((((((rc_outer * 1024) + (((int)threadIdx.z) * 64)) + ((((((int)threadIdx.y) * 13) + 2) \/ 7) * 8)) + (((int)threadIdx.x) * 8)) + (((((int)threadIdx.y) * 13) + 2) % 7)))];\n if (((((((((int)threadIdx.y) * 13) + 3) \/ 7) + ((int)threadIdx.x)) \/ 7) + ((int)threadIdx.z)) < 16) {\n if ((((((int)threadIdx.z) * 7) + (((((int)threadIdx.y) * 13) + 3) \/ 7)) + ((int)threadIdx.x)) < 112) {\n if ((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) < 781) {\n if (((((int)threadIdx.y) * 13) + (((int)threadIdx.x) * 7)) < 46) {\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 3))] = placeholder[((((((rc_outer * 1024) + ((((((((int)threadIdx.y) * 13) + 3) \/ 7) + ((int)threadIdx.x)) \/ 7) * 64)) + (((int)threadIdx.z) * 64)) + ((((((((int)threadIdx.y) * 13) + 3) \/ 7) + ((int)threadIdx.x)) % 7) * 8)) + (((((int)threadIdx.y) * 13) + 3) % 7)))];\n }\n }\n }\n }\n if (((((((((int)threadIdx.y) * 13) + 4) \/ 7) + ((int)threadIdx.x)) \/ 7) + ((int)threadIdx.z)) < 16) {\n if ((((((int)threadIdx.z) * 7) + (((((int)threadIdx.y) * 13) + 4) \/ 7)) + ((int)threadIdx.x)) < 112) {\n if ((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) < 780) {\n if (((((int)threadIdx.y) * 13) + (((int)threadIdx.x) * 7)) < 45) {\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 4))] = placeholder[((((((rc_outer * 1024) + ((((((((int)threadIdx.y) * 13) + 4) \/ 7) + ((int)threadIdx.x)) \/ 7) * 64)) + (((int)threadIdx.z) * 64)) + ((((((((int)threadIdx.y) * 13) + 4) \/ 7) + ((int)threadIdx.x)) % 7) * 8)) + (((((int)threadIdx.y) * 13) + 4) % 7)))];\n }\n }\n }\n }\n if (((((((((int)threadIdx.y) * 13) + 5) \/ 7) + ((int)threadIdx.x)) \/ 7) + ((int)threadIdx.z)) < 16) {\n if ((((((int)threadIdx.z) * 7) + (((((int)threadIdx.y) * 13) + 5) \/ 7)) + ((int)threadIdx.x)) < 112) {\n if ((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) < 779) {\n if (((((int)threadIdx.y) * 13) + (((int)threadIdx.x) * 7)) < 44) {\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 5))] = placeholder[((((((rc_outer * 1024) + ((((((((int)threadIdx.y) * 13) + 5) \/ 7) + ((int)threadIdx.x)) \/ 7) * 64)) + (((int)threadIdx.z) * 64)) + ((((((((int)threadIdx.y) * 13) + 5) \/ 7) + ((int)threadIdx.x)) % 7) * 8)) + (((((int)threadIdx.y) * 13) + 5) % 7)))];\n }\n }\n }\n }\n if (((((((((int)threadIdx.y) * 13) + 6) \/ 7) + ((int)threadIdx.x)) \/ 7) + ((int)threadIdx.z)) < 16) {\n if ((((((int)threadIdx.z) * 7) + (((((int)threadIdx.y) * 13) + 6) \/ 7)) + ((int)threadIdx.x)) < 112) {\n if ((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) < 778) {\n if (((((int)threadIdx.y) * 13) + (((int)threadIdx.x) * 7)) < 43) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[(((((((int)threadIdx.z) * 49) + (((int)threadIdx.y) * 13)) + (((int)threadIdx.x) * 7)) + 6))] = placeholder[((((((rc_outer * 1024) + (((int)threadIdx.z) * 64)) + ((((((int)threadIdx.y) * 13) + 6) \/ 7) * 8)) + (((int)threadIdx.x) * 8)) + (((((int)threadIdx.y) * 13) + 6) % 7)))];\n }\n }\n }\n }\n }\n placeholder_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 1024)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) >> 4) * 512)) + (rc_outer * 16)) + (((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 1024)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 1) >> 4) * 512)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 1) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 1024)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 2) >> 4) * 512)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 2) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 1024)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 3) >> 4) * 512)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 3) & 15)))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 49))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 49))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 51))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 51))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 98))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 98))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 100))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 100))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 147))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 147))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 149))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 149))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 196))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 196))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 198))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 198))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 245))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 245))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 247))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 247))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 294))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 294))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 296))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 296))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 343))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 343))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 345))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 345))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 392))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 392))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 394))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 394))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 441))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 441))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 443))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 443))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 490))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 490))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 492))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 492))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 539))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 539))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 541))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 541))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 588))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 588))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 590))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 590))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 637))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 637))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 639))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 639))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 686))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 686))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 735))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 735))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 737))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 4)) + 737))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(3)]);\n }\n T_relu[(((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 256))] = max((compute[(2)] + placeholder2[((((((int)task_idx.z) * 32) + ((int)threadIdx.z)) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 1))] = max((compute[(1)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 257))] = max((compute[(3)] + placeholder2[((((((int)task_idx.z) * 32) + ((int)threadIdx.z)) + 16))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ compute){\n float compute_local[16];\n __shared__ float pad_temp_shared[1024];\n __shared__ float placeholder_shared[1024];\n compute_local[(0)] = 0.000000e+00f;\n compute_local[(4)] = 0.000000e+00f;\n compute_local[(8)] = 0.000000e+00f;\n compute_local[(12)] = 0.000000e+00f;\n compute_local[(1)] = 0.000000e+00f;\n compute_local[(5)] = 0.000000e+00f;\n compute_local[(9)] = 0.000000e+00f;\n compute_local[(13)] = 0.000000e+00f;\n compute_local[(2)] = 0.000000e+00f;\n compute_local[(6)] = 0.000000e+00f;\n compute_local[(10)] = 0.000000e+00f;\n compute_local[(14)] = 0.000000e+00f;\n compute_local[(3)] = 0.000000e+00f;\n compute_local[(7)] = 0.000000e+00f;\n compute_local[(11)] = 0.000000e+00f;\n compute_local[(15)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder[((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder1[((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n __syncthreads();\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[((((int)threadIdx.z) * 16))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[((((int)threadIdx.z) * 16))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[((((int)threadIdx.z) * 16))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute_local[(15)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute_local[(0)]);\n compute_local[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute_local[(4)]);\n compute_local[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute_local[(8)]);\n compute_local[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute_local[(12)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute_local[(1)]);\n compute_local[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute_local[(5)]);\n compute_local[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute_local[(9)]);\n compute_local[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute_local[(13)]);\n compute_local[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute_local[(2)]);\n compute_local[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute_local[(6)]);\n compute_local[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute_local[(10)]);\n compute_local[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute_local[(14)]);\n compute_local[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute_local[(3)]);\n compute_local[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute_local[(7)]);\n compute_local[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute_local[(11)]);\n compute_local[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute_local[(15)]);\n }\n compute[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = compute_local[(0)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4096))] = compute_local[(4)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8192))] = compute_local[(8)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12288))] = compute_local[(12)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = compute_local[(1)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4097))] = compute_local[(5)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8193))] = compute_local[(9)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12289))] = compute_local[(13)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = compute_local[(2)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4098))] = compute_local[(6)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8194))] = compute_local[(10)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12290))] = compute_local[(14)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = compute_local[(3)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4099))] = compute_local[(7)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8195))] = compute_local[(11)];\n compute[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12291))] = compute_local[(15)];\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_12_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[8];\n __shared__ float pad_temp_shared[481];\n __shared__ float placeholder_shared[1568];\n compute[(0)] = 0.000000e+00f;\n compute[(4)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n compute[(5)] = 0.000000e+00f;\n compute[(2)] = 0.000000e+00f;\n compute[(6)] = 0.000000e+00f;\n compute[(3)] = 0.000000e+00f;\n compute[(7)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 3; ++rc_outer) {\n __syncthreads();\n if (((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) < 481) {\n pad_temp_shared[(((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)))] = (((((3 <= ((((int)task_idx.y) * 32) + (((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) \/ 13))) && (((((int)task_idx.y) * 32) + (((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) \/ 13)) < 67)) && (3 <= ((((int)task_idx.x) * 8) + (((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) % 13)))) && (((((int)task_idx.x) * 8) + (((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) % 13)) < 67)) ? placeholder[(((((((rc_outer * 4096) + (((int)task_idx.y) * 2048)) + ((((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) \/ 13) * 64)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) % 13)) - 195))] : 0.000000e+00f);\n }\n if (((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) < 480) {\n if (((int)threadIdx.y) < 15) {\n pad_temp_shared[((((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) + 1))] = (((((3 <= ((((int)task_idx.y) * 32) + ((((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) + 1) \/ 13))) && (((((int)task_idx.y) * 32) + ((((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) + 1) \/ 13)) < 67)) && (3 <= ((((int)task_idx.x) * 8) + ((((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) + 1) % 13)))) && (((((int)task_idx.x) * 8) + ((((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) + 1) % 13)) < 67)) ? placeholder[(((((((rc_outer * 4096) + (((int)task_idx.y) * 2048)) + (((((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) + 1) \/ 13) * 64)) + (((int)task_idx.x) * 8)) + ((((((int)threadIdx.z) * 31) + (((int)threadIdx.y) * 2)) + 1) % 13)) - 195))] : 0.000000e+00f);\n }\n }\n if (((((int)threadIdx.z) * 2) + (((int)threadIdx.y) \/ 7)) < 32) {\n if (((((int)threadIdx.z) * 14) + ((int)threadIdx.y)) < 224) {\n if (((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) < 1568) {\n if (((int)threadIdx.y) < 14) {\n placeholder_shared[(((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)))] = placeholder1[((((((((int)task_idx.z) * 4704) + (((int)threadIdx.z) * 294)) + ((((int)threadIdx.y) \/ 7) * 147)) + (rc_outer * 49)) + ((((int)threadIdx.y) % 7) * 7)))];\n }\n }\n }\n }\n if (((((int)threadIdx.z) * 2) + (((int)threadIdx.y) \/ 7)) < 32) {\n if (((((int)threadIdx.z) * 14) + ((int)threadIdx.y)) < 224) {\n if (((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) < 1567) {\n if (((int)threadIdx.y) < 14) {\n placeholder_shared[((((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) + 1))] = placeholder1[(((((((((int)task_idx.z) * 4704) + (((int)threadIdx.z) * 294)) + ((((int)threadIdx.y) \/ 7) * 147)) + (rc_outer * 49)) + ((((int)threadIdx.y) % 7) * 7)) + 1))];\n }\n }\n }\n }\n if (((((int)threadIdx.z) * 2) + (((int)threadIdx.y) \/ 7)) < 32) {\n if (((((int)threadIdx.z) * 14) + ((int)threadIdx.y)) < 224) {\n if (((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) < 1566) {\n if (((int)threadIdx.y) < 14) {\n placeholder_shared[((((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) + 2))] = placeholder1[(((((((((int)task_idx.z) * 4704) + (((int)threadIdx.z) * 294)) + ((((int)threadIdx.y) \/ 7) * 147)) + (rc_outer * 49)) + ((((int)threadIdx.y) % 7) * 7)) + 2))];\n }\n }\n }\n }\n if (((((int)threadIdx.z) * 2) + (((int)threadIdx.y) \/ 7)) < 32) {\n if (((((int)threadIdx.z) * 14) + ((int)threadIdx.y)) < 224) {\n if (((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) < 1565) {\n if (((int)threadIdx.y) < 14) {\n placeholder_shared[((((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) + 3))] = placeholder1[(((((((((int)task_idx.z) * 4704) + (((int)threadIdx.z) * 294)) + ((((int)threadIdx.y) \/ 7) * 147)) + (rc_outer * 49)) + ((((int)threadIdx.y) % 7) * 7)) + 3))];\n }\n }\n }\n }\n if (((((int)threadIdx.z) * 2) + (((int)threadIdx.y) \/ 7)) < 32) {\n if (((((int)threadIdx.z) * 14) + ((int)threadIdx.y)) < 224) {\n if (((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) < 1564) {\n if (((int)threadIdx.y) < 14) {\n placeholder_shared[((((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) + 4))] = placeholder1[(((((((((int)task_idx.z) * 4704) + (((int)threadIdx.z) * 294)) + ((((int)threadIdx.y) \/ 7) * 147)) + (rc_outer * 49)) + ((((int)threadIdx.y) % 7) * 7)) + 4))];\n }\n }\n }\n }\n if (((((int)threadIdx.z) * 2) + (((int)threadIdx.y) \/ 7)) < 32) {\n if (((((int)threadIdx.z) * 14) + ((int)threadIdx.y)) < 224) {\n if (((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) < 1563) {\n if (((int)threadIdx.y) < 14) {\n placeholder_shared[((((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) + 5))] = placeholder1[(((((((((int)task_idx.z) * 4704) + (((int)threadIdx.z) * 294)) + ((((int)threadIdx.y) \/ 7) * 147)) + (rc_outer * 49)) + ((((int)threadIdx.y) % 7) * 7)) + 5))];\n }\n }\n }\n }\n if (((((int)threadIdx.z) * 2) + (((int)threadIdx.y) \/ 7)) < 32) {\n if (((((int)threadIdx.z) * 14) + ((int)threadIdx.y)) < 224) {\n if (((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) < 1562) {\n if (((int)threadIdx.y) < 14) {\n placeholder_shared[((((((int)threadIdx.z) * 98) + (((int)threadIdx.y) * 7)) + 6))] = placeholder1[(((((((((int)task_idx.z) * 4704) + (((int)threadIdx.z) * 294)) + ((((int)threadIdx.y) \/ 7) * 147)) + (rc_outer * 49)) + ((((int)threadIdx.y) % 7) * 7)) + 6))];\n }\n }\n }\n }\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.y) * 26))], placeholder_shared[((((int)threadIdx.z) * 98))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 4))], placeholder_shared[((((int)threadIdx.z) * 98))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 2))], placeholder_shared[((((int)threadIdx.z) * 98))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 6))], placeholder_shared[((((int)threadIdx.z) * 98))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.y) * 26))], placeholder_shared[(((((int)threadIdx.z) * 98) + 49))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 4))], placeholder_shared[(((((int)threadIdx.z) * 98) + 49))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 2))], placeholder_shared[(((((int)threadIdx.z) * 98) + 49))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 6))], placeholder_shared[(((((int)threadIdx.z) * 98) + 49))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 1))], placeholder_shared[(((((int)threadIdx.z) * 98) + 1))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 5))], placeholder_shared[(((((int)threadIdx.z) * 98) + 1))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 3))], placeholder_shared[(((((int)threadIdx.z) * 98) + 1))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 7))], placeholder_shared[(((((int)threadIdx.z) * 98) + 1))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 1))], placeholder_shared[(((((int)threadIdx.z) * 98) + 50))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 5))], placeholder_shared[(((((int)threadIdx.z) * 98) + 50))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 3))], placeholder_shared[(((((int)threadIdx.z) * 98) + 50))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 7))], placeholder_shared[(((((int)threadIdx.z) * 98) + 50))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 2))], placeholder_shared[(((((int)threadIdx.z) * 98) + 2))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 6))], placeholder_shared[(((((int)threadIdx.z) * 98) + 2))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 4))], placeholder_shared[(((((int)threadIdx.z) * 98) + 2))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 8))], placeholder_shared[(((((int)threadIdx.z) * 98) + 2))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 2))], placeholder_shared[(((((int)threadIdx.z) * 98) + 51))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 6))], placeholder_shared[(((((int)threadIdx.z) * 98) + 51))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 4))], placeholder_shared[(((((int)threadIdx.z) * 98) + 51))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 8))], placeholder_shared[(((((int)threadIdx.z) * 98) + 51))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 3))], placeholder_shared[(((((int)threadIdx.z) * 98) + 3))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 7))], placeholder_shared[(((((int)threadIdx.z) * 98) + 3))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 5))], placeholder_shared[(((((int)threadIdx.z) * 98) + 3))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 9))], placeholder_shared[(((((int)threadIdx.z) * 98) + 3))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 3))], placeholder_shared[(((((int)threadIdx.z) * 98) + 52))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 7))], placeholder_shared[(((((int)threadIdx.z) * 98) + 52))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 5))], placeholder_shared[(((((int)threadIdx.z) * 98) + 52))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 9))], placeholder_shared[(((((int)threadIdx.z) * 98) + 52))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 4))], placeholder_shared[(((((int)threadIdx.z) * 98) + 4))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 8))], placeholder_shared[(((((int)threadIdx.z) * 98) + 4))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 6))], placeholder_shared[(((((int)threadIdx.z) * 98) + 4))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 10))], placeholder_shared[(((((int)threadIdx.z) * 98) + 4))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 4))], placeholder_shared[(((((int)threadIdx.z) * 98) + 53))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 8))], placeholder_shared[(((((int)threadIdx.z) * 98) + 53))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 6))], placeholder_shared[(((((int)threadIdx.z) * 98) + 53))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 10))], placeholder_shared[(((((int)threadIdx.z) * 98) + 53))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 5))], placeholder_shared[(((((int)threadIdx.z) * 98) + 5))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 9))], placeholder_shared[(((((int)threadIdx.z) * 98) + 5))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 7))], placeholder_shared[(((((int)threadIdx.z) * 98) + 5))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 11))], placeholder_shared[(((((int)threadIdx.z) * 98) + 5))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 5))], placeholder_shared[(((((int)threadIdx.z) * 98) + 54))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 9))], placeholder_shared[(((((int)threadIdx.z) * 98) + 54))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 7))], placeholder_shared[(((((int)threadIdx.z) * 98) + 54))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 11))], placeholder_shared[(((((int)threadIdx.z) * 98) + 54))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 6))], placeholder_shared[(((((int)threadIdx.z) * 98) + 6))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 10))], placeholder_shared[(((((int)threadIdx.z) * 98) + 6))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 8))], placeholder_shared[(((((int)threadIdx.z) * 98) + 6))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 12))], placeholder_shared[(((((int)threadIdx.z) * 98) + 6))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 6))], placeholder_shared[(((((int)threadIdx.z) * 98) + 55))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 10))], placeholder_shared[(((((int)threadIdx.z) * 98) + 55))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 8))], placeholder_shared[(((((int)threadIdx.z) * 98) + 55))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 12))], placeholder_shared[(((((int)threadIdx.z) * 98) + 55))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 13))], placeholder_shared[(((((int)threadIdx.z) * 98) + 7))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 17))], placeholder_shared[(((((int)threadIdx.z) * 98) + 7))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 15))], placeholder_shared[(((((int)threadIdx.z) * 98) + 7))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 19))], placeholder_shared[(((((int)threadIdx.z) * 98) + 7))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 13))], placeholder_shared[(((((int)threadIdx.z) * 98) + 56))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 17))], placeholder_shared[(((((int)threadIdx.z) * 98) + 56))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 15))], placeholder_shared[(((((int)threadIdx.z) * 98) + 56))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 19))], placeholder_shared[(((((int)threadIdx.z) * 98) + 56))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 14))], placeholder_shared[(((((int)threadIdx.z) * 98) + 8))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 18))], placeholder_shared[(((((int)threadIdx.z) * 98) + 8))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 16))], placeholder_shared[(((((int)threadIdx.z) * 98) + 8))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 20))], placeholder_shared[(((((int)threadIdx.z) * 98) + 8))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 14))], placeholder_shared[(((((int)threadIdx.z) * 98) + 57))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 18))], placeholder_shared[(((((int)threadIdx.z) * 98) + 57))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 16))], placeholder_shared[(((((int)threadIdx.z) * 98) + 57))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 20))], placeholder_shared[(((((int)threadIdx.z) * 98) + 57))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 15))], placeholder_shared[(((((int)threadIdx.z) * 98) + 9))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 19))], placeholder_shared[(((((int)threadIdx.z) * 98) + 9))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 17))], placeholder_shared[(((((int)threadIdx.z) * 98) + 9))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 21))], placeholder_shared[(((((int)threadIdx.z) * 98) + 9))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 15))], placeholder_shared[(((((int)threadIdx.z) * 98) + 58))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 19))], placeholder_shared[(((((int)threadIdx.z) * 98) + 58))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 17))], placeholder_shared[(((((int)threadIdx.z) * 98) + 58))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 21))], placeholder_shared[(((((int)threadIdx.z) * 98) + 58))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 16))], placeholder_shared[(((((int)threadIdx.z) * 98) + 10))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 20))], placeholder_shared[(((((int)threadIdx.z) * 98) + 10))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 18))], placeholder_shared[(((((int)threadIdx.z) * 98) + 10))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 22))], placeholder_shared[(((((int)threadIdx.z) * 98) + 10))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 16))], placeholder_shared[(((((int)threadIdx.z) * 98) + 59))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 20))], placeholder_shared[(((((int)threadIdx.z) * 98) + 59))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 18))], placeholder_shared[(((((int)threadIdx.z) * 98) + 59))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 22))], placeholder_shared[(((((int)threadIdx.z) * 98) + 59))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 17))], placeholder_shared[(((((int)threadIdx.z) * 98) + 11))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 21))], placeholder_shared[(((((int)threadIdx.z) * 98) + 11))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 19))], placeholder_shared[(((((int)threadIdx.z) * 98) + 11))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 23))], placeholder_shared[(((((int)threadIdx.z) * 98) + 11))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 17))], placeholder_shared[(((((int)threadIdx.z) * 98) + 60))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 21))], placeholder_shared[(((((int)threadIdx.z) * 98) + 60))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 19))], placeholder_shared[(((((int)threadIdx.z) * 98) + 60))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 23))], placeholder_shared[(((((int)threadIdx.z) * 98) + 60))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 18))], placeholder_shared[(((((int)threadIdx.z) * 98) + 12))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 22))], placeholder_shared[(((((int)threadIdx.z) * 98) + 12))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 20))], placeholder_shared[(((((int)threadIdx.z) * 98) + 12))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 24))], placeholder_shared[(((((int)threadIdx.z) * 98) + 12))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 18))], placeholder_shared[(((((int)threadIdx.z) * 98) + 61))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 22))], placeholder_shared[(((((int)threadIdx.z) * 98) + 61))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 20))], placeholder_shared[(((((int)threadIdx.z) * 98) + 61))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 24))], placeholder_shared[(((((int)threadIdx.z) * 98) + 61))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 19))], placeholder_shared[(((((int)threadIdx.z) * 98) + 13))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 23))], placeholder_shared[(((((int)threadIdx.z) * 98) + 13))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 21))], placeholder_shared[(((((int)threadIdx.z) * 98) + 13))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 25))], placeholder_shared[(((((int)threadIdx.z) * 98) + 13))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 19))], placeholder_shared[(((((int)threadIdx.z) * 98) + 62))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 23))], placeholder_shared[(((((int)threadIdx.z) * 98) + 62))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 21))], placeholder_shared[(((((int)threadIdx.z) * 98) + 62))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 25))], placeholder_shared[(((((int)threadIdx.z) * 98) + 62))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 26))], placeholder_shared[(((((int)threadIdx.z) * 98) + 14))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 30))], placeholder_shared[(((((int)threadIdx.z) * 98) + 14))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 28))], placeholder_shared[(((((int)threadIdx.z) * 98) + 14))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 32))], placeholder_shared[(((((int)threadIdx.z) * 98) + 14))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 26))], placeholder_shared[(((((int)threadIdx.z) * 98) + 63))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 30))], placeholder_shared[(((((int)threadIdx.z) * 98) + 63))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 28))], placeholder_shared[(((((int)threadIdx.z) * 98) + 63))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 32))], placeholder_shared[(((((int)threadIdx.z) * 98) + 63))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 27))], placeholder_shared[(((((int)threadIdx.z) * 98) + 15))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 31))], placeholder_shared[(((((int)threadIdx.z) * 98) + 15))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 29))], placeholder_shared[(((((int)threadIdx.z) * 98) + 15))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 33))], placeholder_shared[(((((int)threadIdx.z) * 98) + 15))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 27))], placeholder_shared[(((((int)threadIdx.z) * 98) + 64))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 31))], placeholder_shared[(((((int)threadIdx.z) * 98) + 64))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 29))], placeholder_shared[(((((int)threadIdx.z) * 98) + 64))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 33))], placeholder_shared[(((((int)threadIdx.z) * 98) + 64))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 28))], placeholder_shared[(((((int)threadIdx.z) * 98) + 16))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 32))], placeholder_shared[(((((int)threadIdx.z) * 98) + 16))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 30))], placeholder_shared[(((((int)threadIdx.z) * 98) + 16))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 34))], placeholder_shared[(((((int)threadIdx.z) * 98) + 16))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 28))], placeholder_shared[(((((int)threadIdx.z) * 98) + 65))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 32))], placeholder_shared[(((((int)threadIdx.z) * 98) + 65))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 30))], placeholder_shared[(((((int)threadIdx.z) * 98) + 65))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 34))], placeholder_shared[(((((int)threadIdx.z) * 98) + 65))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 29))], placeholder_shared[(((((int)threadIdx.z) * 98) + 17))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 33))], placeholder_shared[(((((int)threadIdx.z) * 98) + 17))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 31))], placeholder_shared[(((((int)threadIdx.z) * 98) + 17))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 35))], placeholder_shared[(((((int)threadIdx.z) * 98) + 17))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 29))], placeholder_shared[(((((int)threadIdx.z) * 98) + 66))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 33))], placeholder_shared[(((((int)threadIdx.z) * 98) + 66))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 31))], placeholder_shared[(((((int)threadIdx.z) * 98) + 66))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 35))], placeholder_shared[(((((int)threadIdx.z) * 98) + 66))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 30))], placeholder_shared[(((((int)threadIdx.z) * 98) + 18))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 34))], placeholder_shared[(((((int)threadIdx.z) * 98) + 18))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 32))], placeholder_shared[(((((int)threadIdx.z) * 98) + 18))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 36))], placeholder_shared[(((((int)threadIdx.z) * 98) + 18))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 30))], placeholder_shared[(((((int)threadIdx.z) * 98) + 67))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 34))], placeholder_shared[(((((int)threadIdx.z) * 98) + 67))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 32))], placeholder_shared[(((((int)threadIdx.z) * 98) + 67))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 36))], placeholder_shared[(((((int)threadIdx.z) * 98) + 67))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 31))], placeholder_shared[(((((int)threadIdx.z) * 98) + 19))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 35))], placeholder_shared[(((((int)threadIdx.z) * 98) + 19))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 33))], placeholder_shared[(((((int)threadIdx.z) * 98) + 19))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 37))], placeholder_shared[(((((int)threadIdx.z) * 98) + 19))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 31))], placeholder_shared[(((((int)threadIdx.z) * 98) + 68))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 35))], placeholder_shared[(((((int)threadIdx.z) * 98) + 68))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 33))], placeholder_shared[(((((int)threadIdx.z) * 98) + 68))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 37))], placeholder_shared[(((((int)threadIdx.z) * 98) + 68))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 32))], placeholder_shared[(((((int)threadIdx.z) * 98) + 20))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 36))], placeholder_shared[(((((int)threadIdx.z) * 98) + 20))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 34))], placeholder_shared[(((((int)threadIdx.z) * 98) + 20))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 38))], placeholder_shared[(((((int)threadIdx.z) * 98) + 20))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 32))], placeholder_shared[(((((int)threadIdx.z) * 98) + 69))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 36))], placeholder_shared[(((((int)threadIdx.z) * 98) + 69))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 34))], placeholder_shared[(((((int)threadIdx.z) * 98) + 69))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 38))], placeholder_shared[(((((int)threadIdx.z) * 98) + 69))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 39))], placeholder_shared[(((((int)threadIdx.z) * 98) + 21))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 43))], placeholder_shared[(((((int)threadIdx.z) * 98) + 21))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 41))], placeholder_shared[(((((int)threadIdx.z) * 98) + 21))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 45))], placeholder_shared[(((((int)threadIdx.z) * 98) + 21))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 39))], placeholder_shared[(((((int)threadIdx.z) * 98) + 70))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 43))], placeholder_shared[(((((int)threadIdx.z) * 98) + 70))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 41))], placeholder_shared[(((((int)threadIdx.z) * 98) + 70))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 45))], placeholder_shared[(((((int)threadIdx.z) * 98) + 70))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 40))], placeholder_shared[(((((int)threadIdx.z) * 98) + 22))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 44))], placeholder_shared[(((((int)threadIdx.z) * 98) + 22))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 42))], placeholder_shared[(((((int)threadIdx.z) * 98) + 22))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 46))], placeholder_shared[(((((int)threadIdx.z) * 98) + 22))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 40))], placeholder_shared[(((((int)threadIdx.z) * 98) + 71))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 44))], placeholder_shared[(((((int)threadIdx.z) * 98) + 71))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 42))], placeholder_shared[(((((int)threadIdx.z) * 98) + 71))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 46))], placeholder_shared[(((((int)threadIdx.z) * 98) + 71))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 41))], placeholder_shared[(((((int)threadIdx.z) * 98) + 23))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 45))], placeholder_shared[(((((int)threadIdx.z) * 98) + 23))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 43))], placeholder_shared[(((((int)threadIdx.z) * 98) + 23))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 47))], placeholder_shared[(((((int)threadIdx.z) * 98) + 23))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 41))], placeholder_shared[(((((int)threadIdx.z) * 98) + 72))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 45))], placeholder_shared[(((((int)threadIdx.z) * 98) + 72))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 43))], placeholder_shared[(((((int)threadIdx.z) * 98) + 72))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 47))], placeholder_shared[(((((int)threadIdx.z) * 98) + 72))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 42))], placeholder_shared[(((((int)threadIdx.z) * 98) + 24))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 46))], placeholder_shared[(((((int)threadIdx.z) * 98) + 24))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 44))], placeholder_shared[(((((int)threadIdx.z) * 98) + 24))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 48))], placeholder_shared[(((((int)threadIdx.z) * 98) + 24))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 42))], placeholder_shared[(((((int)threadIdx.z) * 98) + 73))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 46))], placeholder_shared[(((((int)threadIdx.z) * 98) + 73))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 44))], placeholder_shared[(((((int)threadIdx.z) * 98) + 73))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 48))], placeholder_shared[(((((int)threadIdx.z) * 98) + 73))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 43))], placeholder_shared[(((((int)threadIdx.z) * 98) + 25))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 47))], placeholder_shared[(((((int)threadIdx.z) * 98) + 25))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 45))], placeholder_shared[(((((int)threadIdx.z) * 98) + 25))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 49))], placeholder_shared[(((((int)threadIdx.z) * 98) + 25))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 43))], placeholder_shared[(((((int)threadIdx.z) * 98) + 74))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 47))], placeholder_shared[(((((int)threadIdx.z) * 98) + 74))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 45))], placeholder_shared[(((((int)threadIdx.z) * 98) + 74))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 49))], placeholder_shared[(((((int)threadIdx.z) * 98) + 74))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 44))], placeholder_shared[(((((int)threadIdx.z) * 98) + 26))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 48))], placeholder_shared[(((((int)threadIdx.z) * 98) + 26))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 46))], placeholder_shared[(((((int)threadIdx.z) * 98) + 26))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 50))], placeholder_shared[(((((int)threadIdx.z) * 98) + 26))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 44))], placeholder_shared[(((((int)threadIdx.z) * 98) + 75))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 48))], placeholder_shared[(((((int)threadIdx.z) * 98) + 75))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 46))], placeholder_shared[(((((int)threadIdx.z) * 98) + 75))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 50))], placeholder_shared[(((((int)threadIdx.z) * 98) + 75))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 45))], placeholder_shared[(((((int)threadIdx.z) * 98) + 27))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 49))], placeholder_shared[(((((int)threadIdx.z) * 98) + 27))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 47))], placeholder_shared[(((((int)threadIdx.z) * 98) + 27))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 51))], placeholder_shared[(((((int)threadIdx.z) * 98) + 27))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 45))], placeholder_shared[(((((int)threadIdx.z) * 98) + 76))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 49))], placeholder_shared[(((((int)threadIdx.z) * 98) + 76))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 47))], placeholder_shared[(((((int)threadIdx.z) * 98) + 76))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 51))], placeholder_shared[(((((int)threadIdx.z) * 98) + 76))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 52))], placeholder_shared[(((((int)threadIdx.z) * 98) + 28))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 56))], placeholder_shared[(((((int)threadIdx.z) * 98) + 28))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 54))], placeholder_shared[(((((int)threadIdx.z) * 98) + 28))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 58))], placeholder_shared[(((((int)threadIdx.z) * 98) + 28))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 52))], placeholder_shared[(((((int)threadIdx.z) * 98) + 77))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 56))], placeholder_shared[(((((int)threadIdx.z) * 98) + 77))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 54))], placeholder_shared[(((((int)threadIdx.z) * 98) + 77))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 58))], placeholder_shared[(((((int)threadIdx.z) * 98) + 77))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 53))], placeholder_shared[(((((int)threadIdx.z) * 98) + 29))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 57))], placeholder_shared[(((((int)threadIdx.z) * 98) + 29))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 55))], placeholder_shared[(((((int)threadIdx.z) * 98) + 29))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 59))], placeholder_shared[(((((int)threadIdx.z) * 98) + 29))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 53))], placeholder_shared[(((((int)threadIdx.z) * 98) + 78))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 57))], placeholder_shared[(((((int)threadIdx.z) * 98) + 78))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 55))], placeholder_shared[(((((int)threadIdx.z) * 98) + 78))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 59))], placeholder_shared[(((((int)threadIdx.z) * 98) + 78))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 54))], placeholder_shared[(((((int)threadIdx.z) * 98) + 30))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 58))], placeholder_shared[(((((int)threadIdx.z) * 98) + 30))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 56))], placeholder_shared[(((((int)threadIdx.z) * 98) + 30))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 60))], placeholder_shared[(((((int)threadIdx.z) * 98) + 30))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 54))], placeholder_shared[(((((int)threadIdx.z) * 98) + 79))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 58))], placeholder_shared[(((((int)threadIdx.z) * 98) + 79))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 56))], placeholder_shared[(((((int)threadIdx.z) * 98) + 79))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 60))], placeholder_shared[(((((int)threadIdx.z) * 98) + 79))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 55))], placeholder_shared[(((((int)threadIdx.z) * 98) + 31))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 59))], placeholder_shared[(((((int)threadIdx.z) * 98) + 31))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 57))], placeholder_shared[(((((int)threadIdx.z) * 98) + 31))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 61))], placeholder_shared[(((((int)threadIdx.z) * 98) + 31))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 55))], placeholder_shared[(((((int)threadIdx.z) * 98) + 80))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 59))], placeholder_shared[(((((int)threadIdx.z) * 98) + 80))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 57))], placeholder_shared[(((((int)threadIdx.z) * 98) + 80))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 61))], placeholder_shared[(((((int)threadIdx.z) * 98) + 80))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 56))], placeholder_shared[(((((int)threadIdx.z) * 98) + 32))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 60))], placeholder_shared[(((((int)threadIdx.z) * 98) + 32))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 58))], placeholder_shared[(((((int)threadIdx.z) * 98) + 32))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 62))], placeholder_shared[(((((int)threadIdx.z) * 98) + 32))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 56))], placeholder_shared[(((((int)threadIdx.z) * 98) + 81))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 60))], placeholder_shared[(((((int)threadIdx.z) * 98) + 81))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 58))], placeholder_shared[(((((int)threadIdx.z) * 98) + 81))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 62))], placeholder_shared[(((((int)threadIdx.z) * 98) + 81))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 57))], placeholder_shared[(((((int)threadIdx.z) * 98) + 33))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 61))], placeholder_shared[(((((int)threadIdx.z) * 98) + 33))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 59))], placeholder_shared[(((((int)threadIdx.z) * 98) + 33))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 63))], placeholder_shared[(((((int)threadIdx.z) * 98) + 33))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 57))], placeholder_shared[(((((int)threadIdx.z) * 98) + 82))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 61))], placeholder_shared[(((((int)threadIdx.z) * 98) + 82))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 59))], placeholder_shared[(((((int)threadIdx.z) * 98) + 82))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 63))], placeholder_shared[(((((int)threadIdx.z) * 98) + 82))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 58))], placeholder_shared[(((((int)threadIdx.z) * 98) + 34))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 62))], placeholder_shared[(((((int)threadIdx.z) * 98) + 34))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 60))], placeholder_shared[(((((int)threadIdx.z) * 98) + 34))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 64))], placeholder_shared[(((((int)threadIdx.z) * 98) + 34))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 58))], placeholder_shared[(((((int)threadIdx.z) * 98) + 83))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 62))], placeholder_shared[(((((int)threadIdx.z) * 98) + 83))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 60))], placeholder_shared[(((((int)threadIdx.z) * 98) + 83))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 64))], placeholder_shared[(((((int)threadIdx.z) * 98) + 83))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 65))], placeholder_shared[(((((int)threadIdx.z) * 98) + 35))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 69))], placeholder_shared[(((((int)threadIdx.z) * 98) + 35))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 67))], placeholder_shared[(((((int)threadIdx.z) * 98) + 35))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 71))], placeholder_shared[(((((int)threadIdx.z) * 98) + 35))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 65))], placeholder_shared[(((((int)threadIdx.z) * 98) + 84))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 69))], placeholder_shared[(((((int)threadIdx.z) * 98) + 84))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 67))], placeholder_shared[(((((int)threadIdx.z) * 98) + 84))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 71))], placeholder_shared[(((((int)threadIdx.z) * 98) + 84))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 66))], placeholder_shared[(((((int)threadIdx.z) * 98) + 36))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 70))], placeholder_shared[(((((int)threadIdx.z) * 98) + 36))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 68))], placeholder_shared[(((((int)threadIdx.z) * 98) + 36))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 72))], placeholder_shared[(((((int)threadIdx.z) * 98) + 36))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 66))], placeholder_shared[(((((int)threadIdx.z) * 98) + 85))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 70))], placeholder_shared[(((((int)threadIdx.z) * 98) + 85))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 68))], placeholder_shared[(((((int)threadIdx.z) * 98) + 85))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 72))], placeholder_shared[(((((int)threadIdx.z) * 98) + 85))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 67))], placeholder_shared[(((((int)threadIdx.z) * 98) + 37))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 71))], placeholder_shared[(((((int)threadIdx.z) * 98) + 37))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 69))], placeholder_shared[(((((int)threadIdx.z) * 98) + 37))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 73))], placeholder_shared[(((((int)threadIdx.z) * 98) + 37))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 67))], placeholder_shared[(((((int)threadIdx.z) * 98) + 86))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 71))], placeholder_shared[(((((int)threadIdx.z) * 98) + 86))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 69))], placeholder_shared[(((((int)threadIdx.z) * 98) + 86))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 73))], placeholder_shared[(((((int)threadIdx.z) * 98) + 86))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 68))], placeholder_shared[(((((int)threadIdx.z) * 98) + 38))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 72))], placeholder_shared[(((((int)threadIdx.z) * 98) + 38))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 70))], placeholder_shared[(((((int)threadIdx.z) * 98) + 38))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 74))], placeholder_shared[(((((int)threadIdx.z) * 98) + 38))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 68))], placeholder_shared[(((((int)threadIdx.z) * 98) + 87))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 72))], placeholder_shared[(((((int)threadIdx.z) * 98) + 87))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 70))], placeholder_shared[(((((int)threadIdx.z) * 98) + 87))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 74))], placeholder_shared[(((((int)threadIdx.z) * 98) + 87))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 69))], placeholder_shared[(((((int)threadIdx.z) * 98) + 39))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 73))], placeholder_shared[(((((int)threadIdx.z) * 98) + 39))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 71))], placeholder_shared[(((((int)threadIdx.z) * 98) + 39))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 75))], placeholder_shared[(((((int)threadIdx.z) * 98) + 39))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 69))], placeholder_shared[(((((int)threadIdx.z) * 98) + 88))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 73))], placeholder_shared[(((((int)threadIdx.z) * 98) + 88))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 71))], placeholder_shared[(((((int)threadIdx.z) * 98) + 88))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 75))], placeholder_shared[(((((int)threadIdx.z) * 98) + 88))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 70))], placeholder_shared[(((((int)threadIdx.z) * 98) + 40))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 74))], placeholder_shared[(((((int)threadIdx.z) * 98) + 40))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 72))], placeholder_shared[(((((int)threadIdx.z) * 98) + 40))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 76))], placeholder_shared[(((((int)threadIdx.z) * 98) + 40))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 70))], placeholder_shared[(((((int)threadIdx.z) * 98) + 89))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 74))], placeholder_shared[(((((int)threadIdx.z) * 98) + 89))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 72))], placeholder_shared[(((((int)threadIdx.z) * 98) + 89))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 76))], placeholder_shared[(((((int)threadIdx.z) * 98) + 89))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 71))], placeholder_shared[(((((int)threadIdx.z) * 98) + 41))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 75))], placeholder_shared[(((((int)threadIdx.z) * 98) + 41))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 73))], placeholder_shared[(((((int)threadIdx.z) * 98) + 41))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 77))], placeholder_shared[(((((int)threadIdx.z) * 98) + 41))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 71))], placeholder_shared[(((((int)threadIdx.z) * 98) + 90))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 75))], placeholder_shared[(((((int)threadIdx.z) * 98) + 90))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 73))], placeholder_shared[(((((int)threadIdx.z) * 98) + 90))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 77))], placeholder_shared[(((((int)threadIdx.z) * 98) + 90))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 78))], placeholder_shared[(((((int)threadIdx.z) * 98) + 42))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 82))], placeholder_shared[(((((int)threadIdx.z) * 98) + 42))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 80))], placeholder_shared[(((((int)threadIdx.z) * 98) + 42))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 84))], placeholder_shared[(((((int)threadIdx.z) * 98) + 42))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 78))], placeholder_shared[(((((int)threadIdx.z) * 98) + 91))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 82))], placeholder_shared[(((((int)threadIdx.z) * 98) + 91))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 80))], placeholder_shared[(((((int)threadIdx.z) * 98) + 91))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 84))], placeholder_shared[(((((int)threadIdx.z) * 98) + 91))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 79))], placeholder_shared[(((((int)threadIdx.z) * 98) + 43))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 83))], placeholder_shared[(((((int)threadIdx.z) * 98) + 43))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 81))], placeholder_shared[(((((int)threadIdx.z) * 98) + 43))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 85))], placeholder_shared[(((((int)threadIdx.z) * 98) + 43))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 79))], placeholder_shared[(((((int)threadIdx.z) * 98) + 92))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 83))], placeholder_shared[(((((int)threadIdx.z) * 98) + 92))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 81))], placeholder_shared[(((((int)threadIdx.z) * 98) + 92))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 85))], placeholder_shared[(((((int)threadIdx.z) * 98) + 92))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 80))], placeholder_shared[(((((int)threadIdx.z) * 98) + 44))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 84))], placeholder_shared[(((((int)threadIdx.z) * 98) + 44))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 82))], placeholder_shared[(((((int)threadIdx.z) * 98) + 44))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 86))], placeholder_shared[(((((int)threadIdx.z) * 98) + 44))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 80))], placeholder_shared[(((((int)threadIdx.z) * 98) + 93))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 84))], placeholder_shared[(((((int)threadIdx.z) * 98) + 93))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 82))], placeholder_shared[(((((int)threadIdx.z) * 98) + 93))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 86))], placeholder_shared[(((((int)threadIdx.z) * 98) + 93))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 81))], placeholder_shared[(((((int)threadIdx.z) * 98) + 45))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 85))], placeholder_shared[(((((int)threadIdx.z) * 98) + 45))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 83))], placeholder_shared[(((((int)threadIdx.z) * 98) + 45))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 87))], placeholder_shared[(((((int)threadIdx.z) * 98) + 45))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 81))], placeholder_shared[(((((int)threadIdx.z) * 98) + 94))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 85))], placeholder_shared[(((((int)threadIdx.z) * 98) + 94))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 83))], placeholder_shared[(((((int)threadIdx.z) * 98) + 94))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 87))], placeholder_shared[(((((int)threadIdx.z) * 98) + 94))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 82))], placeholder_shared[(((((int)threadIdx.z) * 98) + 46))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 86))], placeholder_shared[(((((int)threadIdx.z) * 98) + 46))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 84))], placeholder_shared[(((((int)threadIdx.z) * 98) + 46))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 88))], placeholder_shared[(((((int)threadIdx.z) * 98) + 46))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 82))], placeholder_shared[(((((int)threadIdx.z) * 98) + 95))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 86))], placeholder_shared[(((((int)threadIdx.z) * 98) + 95))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 84))], placeholder_shared[(((((int)threadIdx.z) * 98) + 95))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 88))], placeholder_shared[(((((int)threadIdx.z) * 98) + 95))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 83))], placeholder_shared[(((((int)threadIdx.z) * 98) + 47))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 87))], placeholder_shared[(((((int)threadIdx.z) * 98) + 47))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 85))], placeholder_shared[(((((int)threadIdx.z) * 98) + 47))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 89))], placeholder_shared[(((((int)threadIdx.z) * 98) + 47))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 83))], placeholder_shared[(((((int)threadIdx.z) * 98) + 96))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 87))], placeholder_shared[(((((int)threadIdx.z) * 98) + 96))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 85))], placeholder_shared[(((((int)threadIdx.z) * 98) + 96))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 89))], placeholder_shared[(((((int)threadIdx.z) * 98) + 96))], compute[(7)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 84))], placeholder_shared[(((((int)threadIdx.z) * 98) + 48))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 88))], placeholder_shared[(((((int)threadIdx.z) * 98) + 48))], compute[(4)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 86))], placeholder_shared[(((((int)threadIdx.z) * 98) + 48))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 90))], placeholder_shared[(((((int)threadIdx.z) * 98) + 48))], compute[(5)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 84))], placeholder_shared[(((((int)threadIdx.z) * 98) + 97))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 88))], placeholder_shared[(((((int)threadIdx.z) * 98) + 97))], compute[(6)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 86))], placeholder_shared[(((((int)threadIdx.z) * 98) + 97))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 26) + 90))], placeholder_shared[(((((int)threadIdx.z) * 98) + 97))], compute[(7)]);\n }\n T_relu[((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((int)task_idx.y) * 512)) + (((int)threadIdx.y) * 32)) + (((int)task_idx.x) * 4)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 32) + (((int)threadIdx.z) * 2)))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((int)task_idx.y) * 512)) + (((int)threadIdx.y) * 32)) + (((int)task_idx.x) * 4)) + 2))] = max((compute[(4)] + placeholder2[(((((int)task_idx.z) * 32) + (((int)threadIdx.z) * 2)))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((int)task_idx.y) * 512)) + (((int)threadIdx.y) * 32)) + (((int)task_idx.x) * 4)) + 1))] = max((compute[(1)] + placeholder2[(((((int)task_idx.z) * 32) + (((int)threadIdx.z) * 2)))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((int)task_idx.y) * 512)) + (((int)threadIdx.y) * 32)) + (((int)task_idx.x) * 4)) + 3))] = max((compute[(5)] + placeholder2[(((((int)task_idx.z) * 32) + (((int)threadIdx.z) * 2)))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((int)task_idx.y) * 512)) + (((int)threadIdx.y) * 32)) + (((int)task_idx.x) * 4)) + 1024))] = max((compute[(2)] + placeholder2[((((((int)task_idx.z) * 32) + (((int)threadIdx.z) * 2)) + 1))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((int)task_idx.y) * 512)) + (((int)threadIdx.y) * 32)) + (((int)task_idx.x) * 4)) + 1026))] = max((compute[(6)] + placeholder2[((((((int)task_idx.z) * 32) + (((int)threadIdx.z) * 2)) + 1))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((int)task_idx.y) * 512)) + (((int)threadIdx.y) * 32)) + (((int)task_idx.x) * 4)) + 1025))] = max((compute[(3)] + placeholder2[((((((int)task_idx.z) * 32) + (((int)threadIdx.z) * 2)) + 1))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((int)task_idx.y) * 512)) + (((int)threadIdx.y) * 32)) + (((int)task_idx.x) * 4)) + 1027))] = max((compute[(7)] + placeholder2[((((((int)task_idx.z) * 32) + (((int)threadIdx.z) * 2)) + 1))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_add_nn_relu_2_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2, float* __restrict__ placeholder3){\n float compute[16];\n __shared__ float pad_temp_shared[1024];\n __shared__ float placeholder_shared[1024];\n compute[(0)] = 0.000000e+00f;\n compute[(4)] = 0.000000e+00f;\n compute[(8)] = 0.000000e+00f;\n compute[(12)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n compute[(5)] = 0.000000e+00f;\n compute[(9)] = 0.000000e+00f;\n compute[(13)] = 0.000000e+00f;\n compute[(2)] = 0.000000e+00f;\n compute[(6)] = 0.000000e+00f;\n compute[(10)] = 0.000000e+00f;\n compute[(14)] = 0.000000e+00f;\n compute[(3)] = 0.000000e+00f;\n compute[(7)] = 0.000000e+00f;\n compute[(11)] = 0.000000e+00f;\n compute[(15)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder[((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder1[((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(15)]);\n }\n T_relu[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = max(((compute[(0)] + placeholder2[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))]) + placeholder3[(((((int)task_idx.z) * 64) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4096))] = max(((compute[(4)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4096))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 16))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8192))] = max(((compute[(8)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8192))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 32))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12288))] = max(((compute[(12)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12288))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 48))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = max(((compute[(1)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))]) + placeholder3[(((((int)task_idx.z) * 64) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4097))] = max(((compute[(5)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4097))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 16))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8193))] = max(((compute[(9)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8193))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 32))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12289))] = max(((compute[(13)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12289))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 48))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = max(((compute[(2)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))]) + placeholder3[(((((int)task_idx.z) * 64) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4098))] = max(((compute[(6)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4098))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 16))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8194))] = max(((compute[(10)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8194))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 32))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12290))] = max(((compute[(14)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12290))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 48))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = max(((compute[(3)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))]) + placeholder3[(((((int)task_idx.z) * 64) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4099))] = max(((compute[(7)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4099))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 16))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8195))] = max(((compute[(11)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8195))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 32))]), 0.000000e+00f);\n T_relu[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12291))] = max(((compute[(15)] + placeholder2[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12291))]) + placeholder3[((((((int)task_idx.z) * 64) + ((int)threadIdx.z)) + 48))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_10_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[16];\n __shared__ float pad_temp_shared[1024];\n __shared__ float placeholder_shared[1024];\n compute[(0)] = 0.000000e+00f;\n compute[(4)] = 0.000000e+00f;\n compute[(8)] = 0.000000e+00f;\n compute[(12)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n compute[(5)] = 0.000000e+00f;\n compute[(9)] = 0.000000e+00f;\n compute[(13)] = 0.000000e+00f;\n compute[(2)] = 0.000000e+00f;\n compute[(6)] = 0.000000e+00f;\n compute[(10)] = 0.000000e+00f;\n compute[(14)] = 0.000000e+00f;\n compute[(3)] = 0.000000e+00f;\n compute[(7)] = 0.000000e+00f;\n compute[(11)] = 0.000000e+00f;\n compute[(15)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 16; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder[((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)threadIdx.z) * 1024) + (((int)threadIdx.y) * 256)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[((((((((int)threadIdx.z) * 1024) + (((int)threadIdx.y) * 256)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[((((((((int)threadIdx.z) * 1024) + (((int)threadIdx.y) * 256)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[((((((((int)threadIdx.z) * 1024) + (((int)threadIdx.y) * 256)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(15)]);\n }\n T_relu[(((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = max((compute[(0)] + placeholder2[(((int)threadIdx.z))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4096))] = max((compute[(4)] + placeholder2[((((int)threadIdx.z) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8192))] = max((compute[(8)] + placeholder2[((((int)threadIdx.z) + 32))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12288))] = max((compute[(12)] + placeholder2[((((int)threadIdx.z) + 48))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = max((compute[(1)] + placeholder2[(((int)threadIdx.z))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4097))] = max((compute[(5)] + placeholder2[((((int)threadIdx.z) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8193))] = max((compute[(9)] + placeholder2[((((int)threadIdx.z) + 32))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12289))] = max((compute[(13)] + placeholder2[((((int)threadIdx.z) + 48))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = max((compute[(2)] + placeholder2[(((int)threadIdx.z))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4098))] = max((compute[(6)] + placeholder2[((((int)threadIdx.z) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8194))] = max((compute[(10)] + placeholder2[((((int)threadIdx.z) + 32))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12290))] = max((compute[(14)] + placeholder2[((((int)threadIdx.z) + 48))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = max((compute[(3)] + placeholder2[(((int)threadIdx.z))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4099))] = max((compute[(7)] + placeholder2[((((int)threadIdx.z) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8195))] = max((compute[(11)] + placeholder2[((((int)threadIdx.z) + 32))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12291))] = max((compute[(15)] + placeholder2[((((int)threadIdx.z) + 48))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_4_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[4];\n __shared__ float pad_temp_shared[256];\n __shared__ float placeholder_shared[512];\n compute[(0)] = 0.000000e+00f;\n compute[(2)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n compute[(3)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 64; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)))] = placeholder[(((((rc_outer * 256) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 1))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 1))];\n placeholder_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)))] = placeholder1[((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) >> 4) * 1024)) + (rc_outer * 16)) + (((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 1) >> 4) * 1024)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 1) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 2) >> 4) * 1024)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 2) & 15)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[((((((((int)task_idx.z) * 32768) + (((int)threadIdx.z) * 2048)) + (((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 3) >> 4) * 1024)) + (rc_outer * 16)) + ((((((int)threadIdx.y) * 8) + (((int)threadIdx.x) * 4)) + 3) & 15)))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 1))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 17))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 17))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 33))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 33))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 49))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 49))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 80))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 80))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 81))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 81))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 96))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 96))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 97))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 97))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 112))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 112))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 113))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 113))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 144))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 144))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 145))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 145))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 160))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 160))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 161))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 161))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 176))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 176))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 177))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 177))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 208))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 208))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 209))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 209))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 224))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 224))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 225))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 225))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(3)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 240))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 240))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(2)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 241))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(1)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + 241))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(3)]);\n }\n T_relu[(((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 256))] = max((compute[(2)] + placeholder2[((((((int)task_idx.z) * 32) + ((int)threadIdx.z)) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 1))] = max((compute[(1)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[((((((((int)task_idx.z) * 512) + (((int)threadIdx.z) * 16)) + (((int)threadIdx.y) * 4)) + (((int)threadIdx.x) * 2)) + 257))] = max((compute[(3)] + placeholder2[((((((int)task_idx.z) * 32) + ((int)threadIdx.z)) + 16))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_6_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[1];\n __shared__ float pad_temp_shared[192];\n __shared__ float placeholder_shared[2304];\n compute[(0)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 16; ++rc_outer) {\n __syncthreads();\n if (((((((int)threadIdx.y) * 3) + ((int)threadIdx.x)) \/ 6) + ((int)threadIdx.z)) < 32) {\n if ((((((int)threadIdx.z) * 6) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)) < 192) {\n if (((((int)threadIdx.y) * 3) + ((int)threadIdx.x)) < 6) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[((((((int)threadIdx.z) * 6) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)))] = (((((1 <= ((((int)task_idx.y) * 2) + (((int)threadIdx.z) & 3))) && (((((int)task_idx.y) * 2) + (((int)threadIdx.z) & 3)) < 9)) && (1 <= (((((int)task_idx.x) * 4) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)))) && ((((((int)task_idx.x) * 4) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)) < 9)) ? placeholder[(((((((((rc_outer * 512) + ((((int)threadIdx.z) >> 2) * 64)) + (((int)task_idx.y) * 16)) + ((((int)threadIdx.z) & 3) * 8)) + (((int)task_idx.x) * 4)) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)) - 9))] : 0.000000e+00f);\n }\n }\n }\n }\n for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 9; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {\n placeholder_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + (((int)threadIdx.x) * 9)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = placeholder1[(((((((((int)task_idx.z) * 36864) + (((int)threadIdx.z) * 1152)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + (((int)threadIdx.x) * 9)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))];\n }\n __syncthreads();\n for (int rc_inner = 0; rc_inner < 8; ++rc_inner) {\n for (int ry_inner = 0; ry_inner < 3; ++ry_inner) {\n for (int rx_inner = 0; rx_inner < 3; ++rx_inner) {\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((rc_inner * 24) + (((int)threadIdx.y) * 6)) + (ry_inner * 6)) + ((int)threadIdx.x)) + rx_inner))], placeholder_shared[(((((((int)threadIdx.z) * 72) + (rc_inner * 9)) + (ry_inner * 3)) + rx_inner))], compute[(0)]);\n }\n }\n }\n }\n T_relu[(((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 64)) + (((int)task_idx.y) * 16)) + (((int)threadIdx.y) * 8)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_8_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[1];\n __shared__ float pad_temp_shared[6272];\n __shared__ float placeholder_shared[2048];\n compute[(0)] = 0.000000e+00f;\n for (int i = 0; i < 23; i++)\n pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) + i))] = placeholder[((((((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 512)) + ((((((int)threadIdx.x) * 25) + i) \/ 49) * 256)) + (((int)task_idx.y) * 128)) + (((((((int)threadIdx.x) * 25) + 1) % 49) \/ 7) * 16)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.x) * 25) + i) % 7)))];\n for (int i = 0; i < 8; i++)\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + i))] = placeholder1[((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + i))];\n \n __syncthreads();\n for (int i = 0; i < 128; i++) \n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 2)) + 49 * i))], placeholder_shared[(((((int)threadIdx.z) * 128) + i))], compute[(0)]);\n \n\n __syncthreads();\n for (int i = 0; i < 23; i++) \n pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) + i))] = placeholder[(((((((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 512)) + ((((((int)threadIdx.x) * 25) + i) \/ 49) * 256)) + (((int)task_idx.y) * 128)) + (((((((int)threadIdx.x) * 25) + i) % 49) \/ 7) * 16)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.x) * 25) + i) % 7)) + 32768))];\n for (int i = 0; i < 8; i++)\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + i))] = placeholder1[((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 128 + i))];\n __syncthreads();\n for (int i = 0; i < 128; i++)\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 2)) + 49 * i))], placeholder_shared[(((((int)threadIdx.z) * 128) + i))], compute[(0)]);\n T_relu[(((((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 64)) + (((int)task_idx.y) * 32)) + (((int)threadIdx.y) * 8)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 16) + ((int)threadIdx.z)))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_add_nn_relu_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2, float* __restrict__ placeholder3){\n if (threadIdx.x + threadIdx.y * 4 + threadIdx.z * 1 * 4 >= 4 * 1 * 64) return;\n if (task_idx.x + task_idx.y * 1 + task_idx.z * 4 * 1 >= 1 * 4 * 16) return;\n float compute[1];\n __shared__ float pad_temp_shared[64];\n __shared__ float placeholder_shared[1024];\n compute[(0)] = 0.000000e+00f;\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 1))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 2))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 3))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 256))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 16))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 17))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 18))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 19))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 512))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 32))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 33))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 34))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 35))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 768))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 48))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 49))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 50))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 51))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 1024))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 64))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 65))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 66))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 67))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 1280))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 80))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 81))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 82))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 83))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 1536))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 96))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 97))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 98))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 99))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 1792))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 112))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 113))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 114))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 115))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 2048))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 128))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 129))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 130))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 131))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 2304))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 144))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 145))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 146))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 147))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 2560))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 160))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 161))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 162))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 163))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 2816))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 176))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 177))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 178))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 179))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 3072))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 192))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 193))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 194))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 195))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 3328))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 208))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 209))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 210))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 211))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 3584))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 224))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 225))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 226))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 227))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 3840))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 240))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 241))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 242))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 243))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n T_relu[(((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 16)) + (((int)task_idx.y) * 4)) + ((int)threadIdx.x)))] = max(((compute[(0)] + placeholder2[(((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 16)) + (((int)task_idx.y) * 4)) + ((int)threadIdx.x)))]) + placeholder3[(((((int)task_idx.z) * 64) + ((int)threadIdx.z)))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[2];\n __shared__ float pad_temp_shared[128];\n __shared__ float placeholder_shared[2304];\n compute[(0)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 64; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[(((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)))] = ((((1 <= (((int)threadIdx.z) & 3)) && ((((int)threadIdx.z) & 3) < 3)) && (1 <= ((int)threadIdx.y))) ? placeholder[((((((rc_outer * 32) + ((((int)threadIdx.z) >> 2) * 4)) + (((int)threadIdx.y) * 2)) + ((((int)threadIdx.z) & 3) * 2)) - 3))] : 0.000000e+00f);\n pad_temp_shared[((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + 1))] = ((((1 <= (((int)threadIdx.z) & 3)) && ((((int)threadIdx.z) & 3) < 3)) && (((int)threadIdx.y) < 1)) ? placeholder[((((((rc_outer * 32) + ((((int)threadIdx.z) >> 2) * 4)) + (((int)threadIdx.y) * 2)) + ((((int)threadIdx.z) & 3) * 2)) - 2))] : 0.000000e+00f);\n placeholder_shared[(((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)))] = placeholder1[(((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 1))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 1))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 2))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 2))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 3))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 4))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 4))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 5))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 5))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 6))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 6))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 7))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 7))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 8))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 8))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 9))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 9))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 10))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 10))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 11))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 11))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 12))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 12))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 13))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 13))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 14))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 14))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 15))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 15))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 16))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 16))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 17))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 17))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 18))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 18))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 19))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 19))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 20))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 20))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 21))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 21))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 22))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 22))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 23))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 23))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 24))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 24))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 25))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 25))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 26))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 26))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 27))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 27))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 28))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 28))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 29))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 29))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 30))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 30))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 31))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 31))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 32))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 32))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 33))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 33))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 34))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 34))];\n placeholder_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + 35))] = placeholder1[((((((((int)task_idx.z) * 147456) + (((int)threadIdx.z) * 4608)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + 35))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.y) * 4))], placeholder_shared[((((int)threadIdx.z) * 72))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 1))], placeholder_shared[((((int)threadIdx.z) * 72))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 1))], placeholder_shared[(((((int)threadIdx.z) * 72) + 1))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 2))], placeholder_shared[(((((int)threadIdx.z) * 72) + 1))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 2))], placeholder_shared[(((((int)threadIdx.z) * 72) + 2))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 3))], placeholder_shared[(((((int)threadIdx.z) * 72) + 2))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 4))], placeholder_shared[(((((int)threadIdx.z) * 72) + 3))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 5))], placeholder_shared[(((((int)threadIdx.z) * 72) + 3))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 5))], placeholder_shared[(((((int)threadIdx.z) * 72) + 4))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 6))], placeholder_shared[(((((int)threadIdx.z) * 72) + 4))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 6))], placeholder_shared[(((((int)threadIdx.z) * 72) + 5))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 7))], placeholder_shared[(((((int)threadIdx.z) * 72) + 5))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 8))], placeholder_shared[(((((int)threadIdx.z) * 72) + 6))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 9))], placeholder_shared[(((((int)threadIdx.z) * 72) + 6))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 9))], placeholder_shared[(((((int)threadIdx.z) * 72) + 7))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 10))], placeholder_shared[(((((int)threadIdx.z) * 72) + 7))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 10))], placeholder_shared[(((((int)threadIdx.z) * 72) + 8))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 11))], placeholder_shared[(((((int)threadIdx.z) * 72) + 8))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 16))], placeholder_shared[(((((int)threadIdx.z) * 72) + 9))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 17))], placeholder_shared[(((((int)threadIdx.z) * 72) + 9))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 17))], placeholder_shared[(((((int)threadIdx.z) * 72) + 10))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 18))], placeholder_shared[(((((int)threadIdx.z) * 72) + 10))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 18))], placeholder_shared[(((((int)threadIdx.z) * 72) + 11))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 19))], placeholder_shared[(((((int)threadIdx.z) * 72) + 11))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 20))], placeholder_shared[(((((int)threadIdx.z) * 72) + 12))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 21))], placeholder_shared[(((((int)threadIdx.z) * 72) + 12))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 21))], placeholder_shared[(((((int)threadIdx.z) * 72) + 13))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 22))], placeholder_shared[(((((int)threadIdx.z) * 72) + 13))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 22))], placeholder_shared[(((((int)threadIdx.z) * 72) + 14))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 23))], placeholder_shared[(((((int)threadIdx.z) * 72) + 14))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 24))], placeholder_shared[(((((int)threadIdx.z) * 72) + 15))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 25))], placeholder_shared[(((((int)threadIdx.z) * 72) + 15))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 25))], placeholder_shared[(((((int)threadIdx.z) * 72) + 16))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 26))], placeholder_shared[(((((int)threadIdx.z) * 72) + 16))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 26))], placeholder_shared[(((((int)threadIdx.z) * 72) + 17))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 27))], placeholder_shared[(((((int)threadIdx.z) * 72) + 17))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 32))], placeholder_shared[(((((int)threadIdx.z) * 72) + 18))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 33))], placeholder_shared[(((((int)threadIdx.z) * 72) + 18))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 33))], placeholder_shared[(((((int)threadIdx.z) * 72) + 19))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 34))], placeholder_shared[(((((int)threadIdx.z) * 72) + 19))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 34))], placeholder_shared[(((((int)threadIdx.z) * 72) + 20))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 35))], placeholder_shared[(((((int)threadIdx.z) * 72) + 20))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 36))], placeholder_shared[(((((int)threadIdx.z) * 72) + 21))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 37))], placeholder_shared[(((((int)threadIdx.z) * 72) + 21))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 37))], placeholder_shared[(((((int)threadIdx.z) * 72) + 22))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 38))], placeholder_shared[(((((int)threadIdx.z) * 72) + 22))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 38))], placeholder_shared[(((((int)threadIdx.z) * 72) + 23))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 39))], placeholder_shared[(((((int)threadIdx.z) * 72) + 23))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 40))], placeholder_shared[(((((int)threadIdx.z) * 72) + 24))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 41))], placeholder_shared[(((((int)threadIdx.z) * 72) + 24))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 41))], placeholder_shared[(((((int)threadIdx.z) * 72) + 25))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 42))], placeholder_shared[(((((int)threadIdx.z) * 72) + 25))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 42))], placeholder_shared[(((((int)threadIdx.z) * 72) + 26))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 43))], placeholder_shared[(((((int)threadIdx.z) * 72) + 26))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 48))], placeholder_shared[(((((int)threadIdx.z) * 72) + 27))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 49))], placeholder_shared[(((((int)threadIdx.z) * 72) + 27))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 49))], placeholder_shared[(((((int)threadIdx.z) * 72) + 28))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 50))], placeholder_shared[(((((int)threadIdx.z) * 72) + 28))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 50))], placeholder_shared[(((((int)threadIdx.z) * 72) + 29))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 51))], placeholder_shared[(((((int)threadIdx.z) * 72) + 29))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 52))], placeholder_shared[(((((int)threadIdx.z) * 72) + 30))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 53))], placeholder_shared[(((((int)threadIdx.z) * 72) + 30))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 53))], placeholder_shared[(((((int)threadIdx.z) * 72) + 31))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 54))], placeholder_shared[(((((int)threadIdx.z) * 72) + 31))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 54))], placeholder_shared[(((((int)threadIdx.z) * 72) + 32))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 55))], placeholder_shared[(((((int)threadIdx.z) * 72) + 32))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 56))], placeholder_shared[(((((int)threadIdx.z) * 72) + 33))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 57))], placeholder_shared[(((((int)threadIdx.z) * 72) + 33))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 57))], placeholder_shared[(((((int)threadIdx.z) * 72) + 34))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 58))], placeholder_shared[(((((int)threadIdx.z) * 72) + 34))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 58))], placeholder_shared[(((((int)threadIdx.z) * 72) + 35))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 59))], placeholder_shared[(((((int)threadIdx.z) * 72) + 35))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 64))], placeholder_shared[(((((int)threadIdx.z) * 72) + 36))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 65))], placeholder_shared[(((((int)threadIdx.z) * 72) + 36))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 65))], placeholder_shared[(((((int)threadIdx.z) * 72) + 37))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 66))], placeholder_shared[(((((int)threadIdx.z) * 72) + 37))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 66))], placeholder_shared[(((((int)threadIdx.z) * 72) + 38))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 67))], placeholder_shared[(((((int)threadIdx.z) * 72) + 38))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 68))], placeholder_shared[(((((int)threadIdx.z) * 72) + 39))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 69))], placeholder_shared[(((((int)threadIdx.z) * 72) + 39))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 69))], placeholder_shared[(((((int)threadIdx.z) * 72) + 40))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 70))], placeholder_shared[(((((int)threadIdx.z) * 72) + 40))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 70))], placeholder_shared[(((((int)threadIdx.z) * 72) + 41))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 71))], placeholder_shared[(((((int)threadIdx.z) * 72) + 41))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 72))], placeholder_shared[(((((int)threadIdx.z) * 72) + 42))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 73))], placeholder_shared[(((((int)threadIdx.z) * 72) + 42))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 73))], placeholder_shared[(((((int)threadIdx.z) * 72) + 43))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 74))], placeholder_shared[(((((int)threadIdx.z) * 72) + 43))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 74))], placeholder_shared[(((((int)threadIdx.z) * 72) + 44))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 75))], placeholder_shared[(((((int)threadIdx.z) * 72) + 44))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 80))], placeholder_shared[(((((int)threadIdx.z) * 72) + 45))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 81))], placeholder_shared[(((((int)threadIdx.z) * 72) + 45))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 81))], placeholder_shared[(((((int)threadIdx.z) * 72) + 46))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 82))], placeholder_shared[(((((int)threadIdx.z) * 72) + 46))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 82))], placeholder_shared[(((((int)threadIdx.z) * 72) + 47))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 83))], placeholder_shared[(((((int)threadIdx.z) * 72) + 47))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 84))], placeholder_shared[(((((int)threadIdx.z) * 72) + 48))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 85))], placeholder_shared[(((((int)threadIdx.z) * 72) + 48))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 85))], placeholder_shared[(((((int)threadIdx.z) * 72) + 49))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 86))], placeholder_shared[(((((int)threadIdx.z) * 72) + 49))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 86))], placeholder_shared[(((((int)threadIdx.z) * 72) + 50))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 87))], placeholder_shared[(((((int)threadIdx.z) * 72) + 50))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 88))], placeholder_shared[(((((int)threadIdx.z) * 72) + 51))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 89))], placeholder_shared[(((((int)threadIdx.z) * 72) + 51))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 89))], placeholder_shared[(((((int)threadIdx.z) * 72) + 52))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 90))], placeholder_shared[(((((int)threadIdx.z) * 72) + 52))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 90))], placeholder_shared[(((((int)threadIdx.z) * 72) + 53))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 91))], placeholder_shared[(((((int)threadIdx.z) * 72) + 53))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 96))], placeholder_shared[(((((int)threadIdx.z) * 72) + 54))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 97))], placeholder_shared[(((((int)threadIdx.z) * 72) + 54))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 97))], placeholder_shared[(((((int)threadIdx.z) * 72) + 55))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 98))], placeholder_shared[(((((int)threadIdx.z) * 72) + 55))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 98))], placeholder_shared[(((((int)threadIdx.z) * 72) + 56))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 99))], placeholder_shared[(((((int)threadIdx.z) * 72) + 56))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 100))], placeholder_shared[(((((int)threadIdx.z) * 72) + 57))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 101))], placeholder_shared[(((((int)threadIdx.z) * 72) + 57))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 101))], placeholder_shared[(((((int)threadIdx.z) * 72) + 58))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 102))], placeholder_shared[(((((int)threadIdx.z) * 72) + 58))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 102))], placeholder_shared[(((((int)threadIdx.z) * 72) + 59))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 103))], placeholder_shared[(((((int)threadIdx.z) * 72) + 59))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 104))], placeholder_shared[(((((int)threadIdx.z) * 72) + 60))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 105))], placeholder_shared[(((((int)threadIdx.z) * 72) + 60))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 105))], placeholder_shared[(((((int)threadIdx.z) * 72) + 61))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 106))], placeholder_shared[(((((int)threadIdx.z) * 72) + 61))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 106))], placeholder_shared[(((((int)threadIdx.z) * 72) + 62))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 107))], placeholder_shared[(((((int)threadIdx.z) * 72) + 62))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 112))], placeholder_shared[(((((int)threadIdx.z) * 72) + 63))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 113))], placeholder_shared[(((((int)threadIdx.z) * 72) + 63))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 113))], placeholder_shared[(((((int)threadIdx.z) * 72) + 64))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 114))], placeholder_shared[(((((int)threadIdx.z) * 72) + 64))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 114))], placeholder_shared[(((((int)threadIdx.z) * 72) + 65))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 115))], placeholder_shared[(((((int)threadIdx.z) * 72) + 65))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 116))], placeholder_shared[(((((int)threadIdx.z) * 72) + 66))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 117))], placeholder_shared[(((((int)threadIdx.z) * 72) + 66))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 117))], placeholder_shared[(((((int)threadIdx.z) * 72) + 67))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 118))], placeholder_shared[(((((int)threadIdx.z) * 72) + 67))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 118))], placeholder_shared[(((((int)threadIdx.z) * 72) + 68))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 119))], placeholder_shared[(((((int)threadIdx.z) * 72) + 68))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 120))], placeholder_shared[(((((int)threadIdx.z) * 72) + 69))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 121))], placeholder_shared[(((((int)threadIdx.z) * 72) + 69))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 121))], placeholder_shared[(((((int)threadIdx.z) * 72) + 70))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 122))], placeholder_shared[(((((int)threadIdx.z) * 72) + 70))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 122))], placeholder_shared[(((((int)threadIdx.z) * 72) + 71))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + 123))], placeholder_shared[(((((int)threadIdx.z) * 72) + 71))], compute[(1)]);\n }\n T_relu[((((((int)task_idx.z) * 128) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[(((((((int)task_idx.z) * 128) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + 1))] = max((compute[(1)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n}\n\n__device__ void fused_add_nn_relu_2_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ T_relu, float* __restrict__ placeholder, float* __restrict__ placeholder1){\n T_relu[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] = max((placeholder[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] + placeholder1[(((((int)task_idx.x) * 4) + (((int)threadIdx.x) >> 6)))]), 0.000000e+00f);\n}\n\n__device__ void fused_add_nn_relu_1_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ T_relu, float* __restrict__ placeholder, float* __restrict__ placeholder1){\n T_relu[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] = max((placeholder[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] + placeholder1[(((((int)task_idx.x) * 16) + (((int)threadIdx.x) >> 4)))]), 0.000000e+00f);\n}\n\n__device__ void fused_add_nn_relu_3_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ T_relu, float* __restrict__ placeholder, float* __restrict__ placeholder1){\n T_relu[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] = max((placeholder[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] + placeholder1[(((int)task_idx.x))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_2_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[2];\n __shared__ float pad_temp_shared[144];\n __shared__ float placeholder_shared[256];\n compute[(0)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 64; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + (((((int)threadIdx.x) * 5) \/ 3) * 4)) + ((((int)threadIdx.x) * 5) % 3)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) + 1))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + ((((((int)threadIdx.x) * 5) + 1) \/ 3) * 4)) + (((((int)threadIdx.x) * 5) + 1) % 3)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) + 2))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + ((((((int)threadIdx.x) * 5) + 2) \/ 3) * 4)) + (((((int)threadIdx.x) * 5) + 2) % 3)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) + 3))] = placeholder[(((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + (((((int)threadIdx.x) * 5) \/ 3) * 4)) + ((((int)threadIdx.x) * 5) % 3)) + 4))];\n if ((((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 5) + 4) \/ 9)) + ((int)threadIdx.y)) < 16) {\n if ((((((int)threadIdx.z) * 6) + (((int)threadIdx.y) * 3)) + (((((int)threadIdx.x) * 5) + 4) \/ 3)) < 48) {\n if ((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) < 140) {\n if (((((int)threadIdx.y) * 9) + (((int)threadIdx.x) * 5)) < 14) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[(((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) + 4))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + ((((((int)threadIdx.x) * 5) + 4) \/ 3) * 4)) + ((((int)threadIdx.x) * 5) + 1)))];\n }\n }\n }\n }\n }\n placeholder_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 3))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 4))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 5))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 6))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 7))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 128))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 9))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 9))], placeholder_shared[(((((int)threadIdx.z) * 16) + 129))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 18))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 18))], placeholder_shared[(((((int)threadIdx.z) * 16) + 130))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 27))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 27))], placeholder_shared[(((((int)threadIdx.z) * 16) + 131))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 132))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 45))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 45))], placeholder_shared[(((((int)threadIdx.z) * 16) + 133))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 54))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 54))], placeholder_shared[(((((int)threadIdx.z) * 16) + 134))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 63))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 63))], placeholder_shared[(((((int)threadIdx.z) * 16) + 135))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 72))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 72))], placeholder_shared[(((((int)threadIdx.z) * 16) + 136))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 81))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 81))], placeholder_shared[(((((int)threadIdx.z) * 16) + 137))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 90))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 90))], placeholder_shared[(((((int)threadIdx.z) * 16) + 138))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 99))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 99))], placeholder_shared[(((((int)threadIdx.z) * 16) + 139))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 108))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 108))], placeholder_shared[(((((int)threadIdx.z) * 16) + 140))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 117))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 117))], placeholder_shared[(((((int)threadIdx.z) * 16) + 141))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 126))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 126))], placeholder_shared[(((((int)threadIdx.z) * 16) + 142))], compute[(1)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 135))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 135))], placeholder_shared[(((((int)threadIdx.z) * 16) + 143))], compute[(1)]);\n }\n T_relu[(((((((int)task_idx.z) * 64) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 16) + ((int)threadIdx.z)))]), 0.000000e+00f);\n T_relu[((((((((int)task_idx.z) * 64) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)) + 32))] = max((compute[(1)] + placeholder2[((((((int)task_idx.z) * 16) + ((int)threadIdx.z)) + 8))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_9_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[1];\n __shared__ float pad_temp_shared[192];\n __shared__ float placeholder_shared[2304];\n compute[(0)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {\n __syncthreads();\n if (((((((int)threadIdx.y) * 3) + ((int)threadIdx.x)) \/ 6) + ((int)threadIdx.z)) < 32) {\n if ((((((int)threadIdx.z) * 6) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)) < 192) {\n if (((((int)threadIdx.y) * 3) + ((int)threadIdx.x)) < 6) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[((((((int)threadIdx.z) * 6) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)))] = (((((1 <= ((((int)task_idx.y) * 2) + (((int)threadIdx.z) & 3))) && (((((int)task_idx.y) * 2) + (((int)threadIdx.z) & 3)) < 17)) && (1 <= (((((int)task_idx.x) * 4) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)))) && ((((((int)task_idx.x) * 4) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)) < 17)) ? placeholder[(((((((((rc_outer * 2048) + ((((int)threadIdx.z) >> 2) * 256)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.z) & 3) * 16)) + (((int)task_idx.x) * 4)) + (((int)threadIdx.y) * 3)) + ((int)threadIdx.x)) - 17))] : 0.000000e+00f);\n }\n }\n }\n }\n for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 9; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {\n placeholder_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 36)) + (((int)threadIdx.x) * 9)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = placeholder1[(((((((((int)task_idx.z) * 18432) + (((int)threadIdx.z) * 576)) + (rc_outer * 72)) + (((int)threadIdx.y) * 36)) + (((int)threadIdx.x) * 9)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))];\n }\n __syncthreads();\n for (int rc_inner = 0; rc_inner < 8; ++rc_inner) {\n for (int ry_inner = 0; ry_inner < 3; ++ry_inner) {\n for (int rx_inner = 0; rx_inner < 3; ++rx_inner) {\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((rc_inner * 24) + (((int)threadIdx.y) * 6)) + (ry_inner * 6)) + ((int)threadIdx.x)) + rx_inner))], placeholder_shared[(((((((int)threadIdx.z) * 72) + (rc_inner * 9)) + (ry_inner * 3)) + rx_inner))], compute[(0)]);\n }\n }\n }\n }\n T_relu[(((((((((int)task_idx.z) * 8192) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 32)) + (((int)threadIdx.y) * 16)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))] = max((compute[(0)] + placeholder2[(((((int)task_idx.z) * 32) + ((int)threadIdx.z)))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_add_add_nn_relu_1_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2, float* __restrict__ placeholder3){\n float compute[1];\n __shared__ float pad_temp_shared[2048];\n __shared__ float placeholder_shared[2048];\n compute[(0)] = 0.000000e+00f;\n pad_temp_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder[(((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((int)threadIdx.x) >> 1) * 64)) + (((int)task_idx.y) * 32)) + ((((int)threadIdx.x) & 1) * 16)) + (((int)task_idx.x) * 4)) + 3))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder[(((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder[((((((((((int)threadIdx.z) * 512) + (((int)threadIdx.y) * 128)) + ((((((int)threadIdx.x) * 2) + 1) >> 2) * 64)) + (((int)task_idx.y) * 32)) + ((((((int)threadIdx.x) * 2) + 1) & 3) * 8)) + (((int)task_idx.x) * 4)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))] = placeholder1[(((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 3))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 4))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 5))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 6))];\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[((((((((int)task_idx.z) * 2048) + (((int)threadIdx.z) * 128)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 7))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))], placeholder_shared[((((int)threadIdx.z) * 128))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))], placeholder_shared[(((((int)threadIdx.z) * 128) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))], placeholder_shared[(((((int)threadIdx.z) * 128) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))], placeholder_shared[(((((int)threadIdx.z) * 128) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 128) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))], placeholder_shared[(((((int)threadIdx.z) * 128) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))], placeholder_shared[(((((int)threadIdx.z) * 128) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))], placeholder_shared[(((((int)threadIdx.z) * 128) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 128) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))], placeholder_shared[(((((int)threadIdx.z) * 128) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))], placeholder_shared[(((((int)threadIdx.z) * 128) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))], placeholder_shared[(((((int)threadIdx.z) * 128) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 128) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))], placeholder_shared[(((((int)threadIdx.z) * 128) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))], placeholder_shared[(((((int)threadIdx.z) * 128) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))], placeholder_shared[(((((int)threadIdx.z) * 128) + 15))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 128) + 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))], placeholder_shared[(((((int)threadIdx.z) * 128) + 17))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))], placeholder_shared[(((((int)threadIdx.z) * 128) + 18))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))], placeholder_shared[(((((int)threadIdx.z) * 128) + 19))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 128) + 20))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))], placeholder_shared[(((((int)threadIdx.z) * 128) + 21))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))], placeholder_shared[(((((int)threadIdx.z) * 128) + 22))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))], placeholder_shared[(((((int)threadIdx.z) * 128) + 23))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 128) + 24))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))], placeholder_shared[(((((int)threadIdx.z) * 128) + 25))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))], placeholder_shared[(((((int)threadIdx.z) * 128) + 26))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))], placeholder_shared[(((((int)threadIdx.z) * 128) + 27))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 128) + 28))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))], placeholder_shared[(((((int)threadIdx.z) * 128) + 29))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))], placeholder_shared[(((((int)threadIdx.z) * 128) + 30))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))], placeholder_shared[(((((int)threadIdx.z) * 128) + 31))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 128) + 32))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))], placeholder_shared[(((((int)threadIdx.z) * 128) + 33))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))], placeholder_shared[(((((int)threadIdx.z) * 128) + 34))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))], placeholder_shared[(((((int)threadIdx.z) * 128) + 35))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 128) + 36))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))], placeholder_shared[(((((int)threadIdx.z) * 128) + 37))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))], placeholder_shared[(((((int)threadIdx.z) * 128) + 38))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))], placeholder_shared[(((((int)threadIdx.z) * 128) + 39))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 128) + 40))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))], placeholder_shared[(((((int)threadIdx.z) * 128) + 41))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))], placeholder_shared[(((((int)threadIdx.z) * 128) + 42))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))], placeholder_shared[(((((int)threadIdx.z) * 128) + 43))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 128) + 44))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))], placeholder_shared[(((((int)threadIdx.z) * 128) + 45))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))], placeholder_shared[(((((int)threadIdx.z) * 128) + 46))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))], placeholder_shared[(((((int)threadIdx.z) * 128) + 47))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 128) + 48))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))], placeholder_shared[(((((int)threadIdx.z) * 128) + 49))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))], placeholder_shared[(((((int)threadIdx.z) * 128) + 50))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))], placeholder_shared[(((((int)threadIdx.z) * 128) + 51))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 128) + 52))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))], placeholder_shared[(((((int)threadIdx.z) * 128) + 53))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))], placeholder_shared[(((((int)threadIdx.z) * 128) + 54))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))], placeholder_shared[(((((int)threadIdx.z) * 128) + 55))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 128) + 56))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))], placeholder_shared[(((((int)threadIdx.z) * 128) + 57))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))], placeholder_shared[(((((int)threadIdx.z) * 128) + 58))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))], placeholder_shared[(((((int)threadIdx.z) * 128) + 59))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 128) + 60))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))], placeholder_shared[(((((int)threadIdx.z) * 128) + 61))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))], placeholder_shared[(((((int)threadIdx.z) * 128) + 62))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))], placeholder_shared[(((((int)threadIdx.z) * 128) + 63))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))], placeholder_shared[(((((int)threadIdx.z) * 128) + 64))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))], placeholder_shared[(((((int)threadIdx.z) * 128) + 65))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))], placeholder_shared[(((((int)threadIdx.z) * 128) + 66))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))], placeholder_shared[(((((int)threadIdx.z) * 128) + 67))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))], placeholder_shared[(((((int)threadIdx.z) * 128) + 68))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))], placeholder_shared[(((((int)threadIdx.z) * 128) + 69))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))], placeholder_shared[(((((int)threadIdx.z) * 128) + 70))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))], placeholder_shared[(((((int)threadIdx.z) * 128) + 71))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))], placeholder_shared[(((((int)threadIdx.z) * 128) + 72))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))], placeholder_shared[(((((int)threadIdx.z) * 128) + 73))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))], placeholder_shared[(((((int)threadIdx.z) * 128) + 74))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))], placeholder_shared[(((((int)threadIdx.z) * 128) + 75))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))], placeholder_shared[(((((int)threadIdx.z) * 128) + 76))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))], placeholder_shared[(((((int)threadIdx.z) * 128) + 77))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))], placeholder_shared[(((((int)threadIdx.z) * 128) + 78))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))], placeholder_shared[(((((int)threadIdx.z) * 128) + 79))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))], placeholder_shared[(((((int)threadIdx.z) * 128) + 80))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))], placeholder_shared[(((((int)threadIdx.z) * 128) + 81))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))], placeholder_shared[(((((int)threadIdx.z) * 128) + 82))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))], placeholder_shared[(((((int)threadIdx.z) * 128) + 83))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))], placeholder_shared[(((((int)threadIdx.z) * 128) + 84))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))], placeholder_shared[(((((int)threadIdx.z) * 128) + 85))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))], placeholder_shared[(((((int)threadIdx.z) * 128) + 86))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))], placeholder_shared[(((((int)threadIdx.z) * 128) + 87))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))], placeholder_shared[(((((int)threadIdx.z) * 128) + 88))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))], placeholder_shared[(((((int)threadIdx.z) * 128) + 89))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))], placeholder_shared[(((((int)threadIdx.z) * 128) + 90))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))], placeholder_shared[(((((int)threadIdx.z) * 128) + 91))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))], placeholder_shared[(((((int)threadIdx.z) * 128) + 92))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))], placeholder_shared[(((((int)threadIdx.z) * 128) + 93))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))], placeholder_shared[(((((int)threadIdx.z) * 128) + 94))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))], placeholder_shared[(((((int)threadIdx.z) * 128) + 95))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))], placeholder_shared[(((((int)threadIdx.z) * 128) + 96))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))], placeholder_shared[(((((int)threadIdx.z) * 128) + 97))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))], placeholder_shared[(((((int)threadIdx.z) * 128) + 98))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))], placeholder_shared[(((((int)threadIdx.z) * 128) + 99))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))], placeholder_shared[(((((int)threadIdx.z) * 128) + 100))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))], placeholder_shared[(((((int)threadIdx.z) * 128) + 101))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))], placeholder_shared[(((((int)threadIdx.z) * 128) + 102))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))], placeholder_shared[(((((int)threadIdx.z) * 128) + 103))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))], placeholder_shared[(((((int)threadIdx.z) * 128) + 104))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))], placeholder_shared[(((((int)threadIdx.z) * 128) + 105))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))], placeholder_shared[(((((int)threadIdx.z) * 128) + 106))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))], placeholder_shared[(((((int)threadIdx.z) * 128) + 107))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))], placeholder_shared[(((((int)threadIdx.z) * 128) + 108))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))], placeholder_shared[(((((int)threadIdx.z) * 128) + 109))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))], placeholder_shared[(((((int)threadIdx.z) * 128) + 110))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))], placeholder_shared[(((((int)threadIdx.z) * 128) + 111))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))], placeholder_shared[(((((int)threadIdx.z) * 128) + 112))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))], placeholder_shared[(((((int)threadIdx.z) * 128) + 113))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))], placeholder_shared[(((((int)threadIdx.z) * 128) + 114))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))], placeholder_shared[(((((int)threadIdx.z) * 128) + 115))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))], placeholder_shared[(((((int)threadIdx.z) * 128) + 116))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))], placeholder_shared[(((((int)threadIdx.z) * 128) + 117))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))], placeholder_shared[(((((int)threadIdx.z) * 128) + 118))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))], placeholder_shared[(((((int)threadIdx.z) * 128) + 119))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))], placeholder_shared[(((((int)threadIdx.z) * 128) + 120))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))], placeholder_shared[(((((int)threadIdx.z) * 128) + 121))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))], placeholder_shared[(((((int)threadIdx.z) * 128) + 122))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))], placeholder_shared[(((((int)threadIdx.z) * 128) + 123))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))], placeholder_shared[(((((int)threadIdx.z) * 128) + 124))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))], placeholder_shared[(((((int)threadIdx.z) * 128) + 125))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))], placeholder_shared[(((((int)threadIdx.z) * 128) + 126))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))], placeholder_shared[(((((int)threadIdx.z) * 128) + 127))], compute[(0)]);\n T_relu[(((((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 64)) + (((int)task_idx.y) * 32)) + (((int)threadIdx.y) * 8)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))] = max(((compute[(0)] + placeholder2[(((((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 64)) + (((int)task_idx.y) * 32)) + (((int)threadIdx.y) * 8)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))]) + placeholder3[(((((int)task_idx.z) * 16) + ((int)threadIdx.z)))]), 0.000000e+00f);\n}\n\n__device__ void fused_nn_conv2d_3_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ compute){\n float compute_local[2];\n __shared__ float pad_temp_shared[144];\n __shared__ float placeholder_shared[256];\n compute_local[(0)] = 0.000000e+00f;\n compute_local[(1)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 64; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + (((((int)threadIdx.x) * 5) \/ 3) * 4)) + ((((int)threadIdx.x) * 5) % 3)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) + 1))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + ((((((int)threadIdx.x) * 5) + 1) \/ 3) * 4)) + (((((int)threadIdx.x) * 5) + 1) % 3)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) + 2))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + ((((((int)threadIdx.x) * 5) + 2) \/ 3) * 4)) + (((((int)threadIdx.x) * 5) + 2) % 3)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) + 3))] = placeholder[(((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + (((((int)threadIdx.x) * 5) \/ 3) * 4)) + ((((int)threadIdx.x) * 5) % 3)) + 4))];\n if ((((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 5) + 4) \/ 9)) + ((int)threadIdx.y)) < 16) {\n if ((((((int)threadIdx.z) * 6) + (((int)threadIdx.y) * 3)) + (((((int)threadIdx.x) * 5) + 4) \/ 3)) < 48) {\n if ((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) < 140) {\n if (((((int)threadIdx.y) * 9) + (((int)threadIdx.x) * 5)) < 14) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[(((((((int)threadIdx.z) * 18) + (((int)threadIdx.y) * 9)) + (((int)threadIdx.x) * 5)) + 4))] = placeholder[((((((rc_outer * 256) + (((int)threadIdx.z) * 32)) + (((int)threadIdx.y) * 16)) + ((((((int)threadIdx.x) * 5) + 4) \/ 3) * 4)) + ((((int)threadIdx.x) * 5) + 1)))];\n }\n }\n }\n }\n }\n placeholder_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)))] = placeholder1[((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 1))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 2))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 3))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 3))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 4))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 4))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 5))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 5))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 6))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 6))];\n placeholder_shared[(((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 8)) + 7))] = placeholder1[(((((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 2048)) + (((int)threadIdx.y) * 1024)) + (rc_outer * 16)) + (((int)threadIdx.x) * 8)) + 7))];\n __syncthreads();\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 128))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 9))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 9))], placeholder_shared[(((((int)threadIdx.z) * 16) + 129))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 18))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 18))], placeholder_shared[(((((int)threadIdx.z) * 16) + 130))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 27))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 27))], placeholder_shared[(((((int)threadIdx.z) * 16) + 131))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 132))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 45))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 45))], placeholder_shared[(((((int)threadIdx.z) * 16) + 133))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 54))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 54))], placeholder_shared[(((((int)threadIdx.z) * 16) + 134))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 63))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 63))], placeholder_shared[(((((int)threadIdx.z) * 16) + 135))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 72))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 72))], placeholder_shared[(((((int)threadIdx.z) * 16) + 136))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 81))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 81))], placeholder_shared[(((((int)threadIdx.z) * 16) + 137))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 90))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 90))], placeholder_shared[(((((int)threadIdx.z) * 16) + 138))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 99))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 99))], placeholder_shared[(((((int)threadIdx.z) * 16) + 139))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 108))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 108))], placeholder_shared[(((((int)threadIdx.z) * 16) + 140))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 117))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 117))], placeholder_shared[(((((int)threadIdx.z) * 16) + 141))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 126))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 126))], placeholder_shared[(((((int)threadIdx.z) * 16) + 142))], compute_local[(1)]);\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 135))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute_local[(0)]);\n compute_local[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 2)) + 135))], placeholder_shared[(((((int)threadIdx.z) * 16) + 143))], compute_local[(1)]);\n }\n compute[(((((((int)task_idx.z) * 64) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)))] = compute_local[(0)];\n compute[((((((((int)task_idx.z) * 64) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)) + 32))] = compute_local[(1)];\n}\n\n__device__ void fused_nn_global_avg_pool2d_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ tensor){\n float tensor1[1];\n tensor1[(0)] = 0.000000e+00f;\n for (int rv0 = 0; rv0 < 2; ++rv0) {\n for (int rv1 = 0; rv1 < 2; ++rv1) {\n if (((int)threadIdx.y) < 1) {\n tensor1[(0)] = (tensor1[(0)] + placeholder[((((((((int)threadIdx.y) * 8192) + (((int)task_idx.x) * 32)) + (((int)threadIdx.x) * 4)) + (rv0 * 2)) + rv1))]);\n }\n }\n }\n if (((int)threadIdx.y) < 1) {\n tensor[((((((int)threadIdx.y) * 2048) + (((int)task_idx.x) * 8)) + ((int)threadIdx.x)))] = (tensor1[(0)] * 2.500000e-01f);\n }\n}\n\n__device__ void fused_nn_conv2d_1_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ compute){\n float compute_local[1];\n __shared__ float pad_temp_shared[6272];\n __shared__ float placeholder_shared[2048];\n compute_local[(0)] = 0.000000e+00f;\n for (int i = 0; i < 23; i++)\n pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) + i))] = placeholder[((((((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 512)) + ((((((int)threadIdx.x) * 25) + i) \/ 49) * 256)) + (((int)task_idx.y) * 128)) + (((((((int)threadIdx.x) * 25) + i) % 49) \/ 7) * 16)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.x) * 25) + i) % 7)))];\n if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 25) + 23) \/ 49)) < 128) {\n if ((((((int)threadIdx.z) * 56) + (((int)threadIdx.y) * 14)) + (((((int)threadIdx.x) * 25) + 23) \/ 7)) < 896) {\n if ((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) < 6249) {\n if (((((int)threadIdx.y) * 98) + (((int)threadIdx.x) * 25)) < 369) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) + 23))] = placeholder[((((((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 512)) + ((((((int)threadIdx.x) * 25) + 23) \/ 49) * 256)) + (((int)task_idx.y) * 128)) + (((((((int)threadIdx.x) * 25) + 23) % 49) \/ 7) * 16)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.x) * 25) + 2) % 7)))];\n }\n }\n }\n }\n }\n if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 25) + 24) \/ 49)) < 128) {\n if ((((((int)threadIdx.z) * 56) + (((int)threadIdx.y) * 14)) + (((((int)threadIdx.x) * 25) + 24) \/ 7)) < 896) {\n if ((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) < 6248) {\n if (((((int)threadIdx.y) * 98) + (((int)threadIdx.x) * 25)) < 368) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) + 24))] = placeholder[((((((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 512)) + ((((((int)threadIdx.x) * 25) + 24) \/ 49) * 256)) + (((int)task_idx.y) * 128)) + (((((((int)threadIdx.x) * 25) + 24) % 49) \/ 7) * 16)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.x) * 25) + 3) % 7)))];\n }\n }\n }\n }\n }\n for (int i = 0; i < 8; i++)\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + i))] = placeholder1[((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + i))];\n __syncthreads();\n for (int i = 0; i < 128; i++)\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 2)) + 49 * i))], placeholder_shared[(((((int)threadIdx.z) * 128) + i))], compute_local[(0)]);\n __syncthreads();\n for (int i = 0; i < 23; i++)\n pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) + i))] = placeholder[(((((((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 512)) + ((((((int)threadIdx.x) * 25) + i) \/ 49) * 256)) + (((int)task_idx.y) * 128)) + (((((((int)threadIdx.x) * 25) + i) % 49) \/ 7) * 16)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.x) * 25) + i) % 7)) + 32768))];\n if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 25) + 23) \/ 49)) < 128) {\n if ((((((int)threadIdx.z) * 56) + (((int)threadIdx.y) * 14)) + (((((int)threadIdx.x) * 25) + 23) \/ 7)) < 896) {\n if ((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) < 6249) {\n if (((((int)threadIdx.y) * 98) + (((int)threadIdx.x) * 25)) < 369) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) + 23))] = placeholder[(((((((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 512)) + ((((((int)threadIdx.x) * 25) + 23) \/ 49) * 256)) + (((int)task_idx.y) * 128)) + (((((((int)threadIdx.x) * 25) + 23) % 49) \/ 7) * 16)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.x) * 25) + 2) % 7)) + 32768))];\n }\n }\n }\n }\n }\n if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 25) + 24) \/ 49)) < 128) {\n if ((((((int)threadIdx.z) * 56) + (((int)threadIdx.y) * 14)) + (((((int)threadIdx.x) * 25) + 24) \/ 7)) < 896) {\n if ((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) < 6248) {\n if (((((int)threadIdx.y) * 98) + (((int)threadIdx.x) * 25)) < 368) {\n if (((int)threadIdx.x) < 3) {\n pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 98)) + (((int)threadIdx.x) * 25)) + 24))] = placeholder[(((((((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 512)) + ((((((int)threadIdx.x) * 25) + 24) \/ 49) * 256)) + (((int)task_idx.y) * 128)) + (((((((int)threadIdx.x) * 25) + 24) % 49) \/ 7) * 16)) + (((int)task_idx.x) * 8)) + (((((int)threadIdx.x) * 25) + 3) % 7)) + 32768))];\n }\n }\n }\n }\n }\n for (int i = 0; i < 8; i++)\n placeholder_shared[(((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + i))] = placeholder1[((((((((int)task_idx.z) * 4096) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.y) * 32)) + (((int)threadIdx.x) * 8)) + 128 + i))];\n __syncthreads();\n for (int i = 0; i < 128; i++)\n compute_local[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 14) + (((int)threadIdx.x) * 2)) + 49 * i))], placeholder_shared[(((((int)threadIdx.z) * 128) + i))], compute_local[(0)]);\n compute[(((((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 64)) + (((int)task_idx.y) * 32)) + (((int)threadIdx.y) * 8)) + (((int)task_idx.x) * 4)) + ((int)threadIdx.x)))] = compute_local[(0)];\n}\n\n__device__ void fused_nn_batch_flatten_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ tensor, float* __restrict__ placeholder){\n tensor[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] = placeholder[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))];\n}\n\n__device__ void fused_nn_conv2d_add_nn_relu_11_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_relu, float* __restrict__ placeholder2){\n float compute[16];\n __shared__ float pad_temp_shared[1024];\n __shared__ float placeholder_shared[1024];\n compute[(0)] = 0.000000e+00f;\n compute[(4)] = 0.000000e+00f;\n compute[(8)] = 0.000000e+00f;\n compute[(12)] = 0.000000e+00f;\n compute[(1)] = 0.000000e+00f;\n compute[(5)] = 0.000000e+00f;\n compute[(9)] = 0.000000e+00f;\n compute[(13)] = 0.000000e+00f;\n compute[(2)] = 0.000000e+00f;\n compute[(6)] = 0.000000e+00f;\n compute[(10)] = 0.000000e+00f;\n compute[(14)] = 0.000000e+00f;\n compute[(3)] = 0.000000e+00f;\n compute[(7)] = 0.000000e+00f;\n compute[(11)] = 0.000000e+00f;\n compute[(15)] = 0.000000e+00f;\n for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {\n __syncthreads();\n pad_temp_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder[((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n pad_temp_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder[(((((((rc_outer * 4096) + (((int)threadIdx.z) * 256)) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n placeholder_shared[((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)threadIdx.z) * 256) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[((((((((int)threadIdx.z) * 256) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[((((((((int)threadIdx.z) * 256) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];\n placeholder_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[((((((((int)threadIdx.z) * 256) + (((int)threadIdx.y) * 64)) + (rc_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[(((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 1))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 2))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 256))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 512))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 3))], placeholder_shared[(((((int)threadIdx.z) * 16) + 768))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 64))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 65))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 66))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 257))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 513))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 67))], placeholder_shared[(((((int)threadIdx.z) * 16) + 769))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 128))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 129))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 130))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 258))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 514))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 131))], placeholder_shared[(((((int)threadIdx.z) * 16) + 770))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 192))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 193))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 194))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 259))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 515))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 195))], placeholder_shared[(((((int)threadIdx.z) * 16) + 771))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 256))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 257))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 258))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 260))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 516))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 259))], placeholder_shared[(((((int)threadIdx.z) * 16) + 772))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 320))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 321))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 322))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 261))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 517))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 323))], placeholder_shared[(((((int)threadIdx.z) * 16) + 773))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 384))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 385))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 386))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 262))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 518))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 387))], placeholder_shared[(((((int)threadIdx.z) * 16) + 774))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 448))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 449))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 450))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 263))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 519))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 451))], placeholder_shared[(((((int)threadIdx.z) * 16) + 775))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 512))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 513))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 514))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 264))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 520))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 515))], placeholder_shared[(((((int)threadIdx.z) * 16) + 776))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 576))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 577))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 578))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 265))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 521))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 579))], placeholder_shared[(((((int)threadIdx.z) * 16) + 777))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 640))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 641))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 642))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 266))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 522))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 643))], placeholder_shared[(((((int)threadIdx.z) * 16) + 778))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 704))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 705))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 706))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 267))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 523))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 707))], placeholder_shared[(((((int)threadIdx.z) * 16) + 779))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 768))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 769))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 770))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 268))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 524))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 771))], placeholder_shared[(((((int)threadIdx.z) * 16) + 780))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 832))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 833))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 834))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 269))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 525))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 835))], placeholder_shared[(((((int)threadIdx.z) * 16) + 781))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 896))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 897))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 898))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 270))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 526))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 899))], placeholder_shared[(((((int)threadIdx.z) * 16) + 782))], compute[(15)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n compute[(4)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(4)]);\n compute[(8)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(8)]);\n compute[(12)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 960))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(12)]);\n compute[(1)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(1)]);\n compute[(5)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(5)]);\n compute[(9)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(9)]);\n compute[(13)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 961))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(13)]);\n compute[(2)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(2)]);\n compute[(6)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(6)]);\n compute[(10)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(10)]);\n compute[(14)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 962))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(14)]);\n compute[(3)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(3)]);\n compute[(7)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 271))], compute[(7)]);\n compute[(11)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 527))], compute[(11)]);\n compute[(15)] = __ocml_fma_f32(pad_temp_shared[((((((int)threadIdx.y) * 16) + (((int)threadIdx.x) * 4)) + 963))], placeholder_shared[(((((int)threadIdx.z) * 16) + 783))], compute[(15)]);\n }\n T_relu[(((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)))] = max((compute[(0)] + placeholder2[(((int)threadIdx.z))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4096))] = max((compute[(4)] + placeholder2[((((int)threadIdx.z) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8192))] = max((compute[(8)] + placeholder2[((((int)threadIdx.z) + 32))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12288))] = max((compute[(12)] + placeholder2[((((int)threadIdx.z) + 48))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = max((compute[(1)] + placeholder2[(((int)threadIdx.z))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4097))] = max((compute[(5)] + placeholder2[((((int)threadIdx.z) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8193))] = max((compute[(9)] + placeholder2[((((int)threadIdx.z) + 32))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12289))] = max((compute[(13)] + placeholder2[((((int)threadIdx.z) + 48))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = max((compute[(2)] + placeholder2[(((int)threadIdx.z))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4098))] = max((compute[(6)] + placeholder2[((((int)threadIdx.z) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8194))] = max((compute[(10)] + placeholder2[((((int)threadIdx.z) + 32))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12290))] = max((compute[(14)] + placeholder2[((((int)threadIdx.z) + 48))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = max((compute[(3)] + placeholder2[(((int)threadIdx.z))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 4099))] = max((compute[(7)] + placeholder2[((((int)threadIdx.z) + 16))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 8195))] = max((compute[(11)] + placeholder2[((((int)threadIdx.z) + 32))]), 0.000000e+00f);\n T_relu[((((((((int)threadIdx.z) * 256) + (((int)task_idx.y) * 64)) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 4)) + 12291))] = max((compute[(15)] + placeholder2[((((int)threadIdx.z) + 48))]), 0.000000e+00f);\n}\n\n__device__ void fused_add_14_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ T_add, float* __restrict__ placeholder, float* __restrict__ placeholder1){\n T_add[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] = (placeholder[(((((int)task_idx.x) * 256) + ((int)threadIdx.x)))] + placeholder1[((((((int)task_idx.x) * 4) + (((int)threadIdx.x) >> 6)) >> 6))]);\n}\n\n__device__ void fused_nn_conv2d_add_1_kernel0_device(dim3 task_idx, dim3 thread_idx, float* __restrict__ placeholder, float* __restrict__ placeholder1, float* __restrict__ T_add, float* __restrict__ placeholder2){\n if (threadIdx.x + threadIdx.y * 4 + threadIdx.z * 1 * 4 >= 4 * 1 * 64) return;\n if (task_idx.x + task_idx.y * 1 + task_idx.z * 4 * 1 >= 1 * 4 * 16) return;\n float compute[1];\n __shared__ float pad_temp_shared[64];\n __shared__ float placeholder_shared[1024];\n compute[(0)] = 0.000000e+00f;\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 1))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 2))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 3))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 256))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 16))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 17))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 18))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 19))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 512))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 32))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 33))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 34))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 35))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 768))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 48))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 49))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 50))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 51))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 1024))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 64))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 65))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 66))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 67))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 1280))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 80))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 81))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 82))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 83))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 1536))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 96))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 97))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 98))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 99))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 1792))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 112))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 113))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 114))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 115))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 2048))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 128))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 129))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 130))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 131))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 2304))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 144))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 145))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 146))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 147))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 2560))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 160))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 161))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 162))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 163))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 2816))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 176))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 177))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 178))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 179))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 3072))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 192))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 193))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 194))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 195))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 3328))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 208))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 209))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 210))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 211))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 3584))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 224))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 225))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 226))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 227))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n __syncthreads();\n if ((((int)threadIdx.x) + ((int)threadIdx.z)) < 64) {\n if (((int)threadIdx.x) < 1) {\n pad_temp_shared[((((int)threadIdx.x) + ((int)threadIdx.z)))] = placeholder[(((((((((int)threadIdx.x) + ((int)threadIdx.z)) >> 2) * 16) + (((int)task_idx.y) * 4)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) & 3)) + 3840))];\n }\n }\n placeholder_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 240))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 1))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 241))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 2))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 242))];\n placeholder_shared[((((((int)threadIdx.z) * 16) + (((int)threadIdx.x) * 4)) + 3))] = placeholder1[(((((((int)task_idx.z) * 16384) + (((int)threadIdx.z) * 256)) + (((int)threadIdx.x) * 4)) + 243))];\n __syncthreads();\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[(((int)threadIdx.x))], placeholder_shared[((((int)threadIdx.z) * 16))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 4))], placeholder_shared[(((((int)threadIdx.z) * 16) + 1))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 8))], placeholder_shared[(((((int)threadIdx.z) * 16) + 2))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 12))], placeholder_shared[(((((int)threadIdx.z) * 16) + 3))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 16))], placeholder_shared[(((((int)threadIdx.z) * 16) + 4))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 20))], placeholder_shared[(((((int)threadIdx.z) * 16) + 5))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 24))], placeholder_shared[(((((int)threadIdx.z) * 16) + 6))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 28))], placeholder_shared[(((((int)threadIdx.z) * 16) + 7))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 32))], placeholder_shared[(((((int)threadIdx.z) * 16) + 8))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 36))], placeholder_shared[(((((int)threadIdx.z) * 16) + 9))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 40))], placeholder_shared[(((((int)threadIdx.z) * 16) + 10))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 44))], placeholder_shared[(((((int)threadIdx.z) * 16) + 11))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 48))], placeholder_shared[(((((int)threadIdx.z) * 16) + 12))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 52))], placeholder_shared[(((((int)threadIdx.z) * 16) + 13))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 56))], placeholder_shared[(((((int)threadIdx.z) * 16) + 14))], compute[(0)]);\n compute[(0)] = __ocml_fma_f32(pad_temp_shared[((((int)threadIdx.x) + 60))], placeholder_shared[(((((int)threadIdx.z) * 16) + 15))], compute[(0)]);\n T_add[(((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 16)) + (((int)task_idx.y) * 4)) + ((int)threadIdx.x)))] = (compute[(0)] + placeholder2[(((((((int)task_idx.z) * 1024) + (((int)threadIdx.z) * 16)) + (((int)task_idx.y) * 4)) + ((int)threadIdx.x)))]);\n}\n\n\n\nextern \"C\" __global__ void fused_nn_dense_add_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1000,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_dense_add_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_2_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(2,2,32);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_2_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_2_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,32);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_2_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,32);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_multiply_add_nn_relu_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,32);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_multiply_add_nn_relu_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3], (float* __restrict__)args[4], (float* __restrict__)args[5]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_max_pool2d_add_nn_relu_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(64,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_max_pool2d_add_nn_relu_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_3_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,8);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_3_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_1_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,32);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_1_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_add_nn_relu_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(32,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_add_nn_relu_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_softmax_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_softmax_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_7_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(2,2,8);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_7_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_3_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,4,4);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_3_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_5_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,8);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_5_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,4,4);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_12_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(8,2,2);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_12_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_add_nn_relu_2_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,4,4);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_add_nn_relu_2_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3], (float* __restrict__)args[4]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_10_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,4,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_10_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_4_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,8);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_4_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_6_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(2,4,4);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_6_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_8_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(2,2,8);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_8_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_add_nn_relu_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,4,16);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_add_nn_relu_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3], (float* __restrict__)args[4]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,16);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_add_nn_relu_2_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(128,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_add_nn_relu_2_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_add_nn_relu_1_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(64,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_add_nn_relu_1_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_add_nn_relu_3_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(256,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_add_nn_relu_3_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_2_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,32);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_2_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_9_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(4,8,2);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_9_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_add_nn_relu_1_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(2,2,32);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_add_nn_relu_1_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3], (float* __restrict__)args[4]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_3_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,1,128);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_3_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_global_avg_pool2d_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(256,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_global_avg_pool2d_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_1_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(2,2,32);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_1_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_batch_flatten_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(8,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_batch_flatten_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_nn_relu_11_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,4,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_nn_relu_11_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void fused_add_14_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(48,1,1);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_add_14_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2]);\n }\n}\n\nextern \"C\" __global__ void fused_nn_conv2d_add_1_kernel0(volatile int* preempted, int task_num, int task_offset, int* task_slot, float** args) {\n dim3 task_dim = dim3(1,4,16);\n dim3 thread_idx = dim3(threadIdx.x, threadIdx.y, threadIdx.z);\n __shared__ int idx[1];\n while (true) {\n if (*preempted) return;\n if (threadIdx.x + threadIdx.y + threadIdx.z == 0) {\n \/\/ first thread of the block\n int temp = atomicAdd(task_slot, 1);\n idx[0] = temp + task_offset;\n }\n __syncthreads();\n if (idx[0] >= task_num) return;\n dim3 task_idx = get_3d_idx(idx[0], task_dim);\n fused_nn_conv2d_add_1_kernel0_device(task_idx, thread_idx, (float* __restrict__)args[0], (float* __restrict__)args[1], (float* __restrict__)args[2], (float* __restrict__)args[3]);\n }\n}\n\nextern \"C\" __global__ void preemption_proxy(volatile int* stop, volatile int* host, volatile int* device) {\n while(!(*stop)) {\n *device = *host;\n long long start_clock = clock64();\n while (clock64() < (start_clock+10000)) {}\n }\n}\n","avg_line_length":147.2782121006,"max_line_length":790,"alphanum_fraction":0.5989706722} {"size":2228,"ext":"cu","lang":"Cuda","max_stars_count":36275.0,"content":"#include \n\n#include \"caffe\/layers\/conv_layer.hpp\"\n\nnamespace caffe {\n\ntemplate \nvoid ConvolutionLayer::Forward_gpu(const vector*>& bottom,\n const vector*>& top) {\n const Dtype* weight = this->blobs_[0]->gpu_data();\n for (int i = 0; i < bottom.size(); ++i) {\n const Dtype* bottom_data = bottom[i]->gpu_data();\n Dtype* top_data = top[i]->mutable_gpu_data();\n for (int n = 0; n < this->num_; ++n) {\n this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight,\n top_data + n * this->top_dim_);\n if (this->bias_term_) {\n const Dtype* bias = this->blobs_[1]->gpu_data();\n this->forward_gpu_bias(top_data + n * this->top_dim_, bias);\n }\n }\n }\n}\n\ntemplate \nvoid ConvolutionLayer::Backward_gpu(const vector*>& top,\n const vector& propagate_down, const vector*>& bottom) {\n const Dtype* weight = this->blobs_[0]->gpu_data();\n Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();\n for (int i = 0; i < top.size(); ++i) {\n const Dtype* top_diff = top[i]->gpu_diff();\n \/\/ Bias gradient, if necessary.\n if (this->bias_term_ && this->param_propagate_down_[1]) {\n Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();\n for (int n = 0; n < this->num_; ++n) {\n this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);\n }\n }\n if (this->param_propagate_down_[0] || propagate_down[i]) {\n const Dtype* bottom_data = bottom[i]->gpu_data();\n Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();\n for (int n = 0; n < this->num_; ++n) {\n \/\/ gradient w.r.t. weight. Note that we will accumulate diffs.\n if (this->param_propagate_down_[0]) {\n this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,\n top_diff + n * this->top_dim_, weight_diff);\n }\n \/\/ gradient w.r.t. bottom data, if necessary.\n if (propagate_down[i]) {\n this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight,\n bottom_diff + n * this->bottom_dim_);\n }\n }\n }\n }\n}\n\nINSTANTIATE_LAYER_GPU_FUNCS(ConvolutionLayer);\n\n} \/\/ namespace caffe\n","avg_line_length":36.5245901639,"max_line_length":79,"alphanum_fraction":0.618940754} {"size":20200,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/**\n* Tests of feature Spatial 3D messaging\n*\n* Tests cover:\n* > mandatory messaging, send\/recieve\n*\/\n#include \"flamegpu\/flame_api.h\"\n#include \"flamegpu\/runtime\/flamegpu_api.h\"\n\n#include \"gtest\/gtest.h\"\n\n\nnamespace test_message_spatial2d {\n\nFLAMEGPU_AGENT_FUNCTION(out_mandatory2D, MsgNone, MsgSpatial2D) {\n FLAMEGPU->message_out.setVariable(\"id\", FLAMEGPU->getVariable(\"id\"));\n FLAMEGPU->message_out.setLocation(\n FLAMEGPU->getVariable(\"x\"),\n FLAMEGPU->getVariable(\"y\"));\n return ALIVE;\n}\nFLAMEGPU_AGENT_FUNCTION(out_optional2D, MsgNone, MsgSpatial2D) {\n if (FLAMEGPU->getVariable(\"do_output\")) {\n FLAMEGPU->message_out.setVariable(\"id\", FLAMEGPU->getVariable(\"id\"));\n FLAMEGPU->message_out.setLocation(\n FLAMEGPU->getVariable(\"x\"),\n FLAMEGPU->getVariable(\"y\"));\n }\n return ALIVE;\n}\nFLAMEGPU_AGENT_FUNCTION(out_optional2DNone, MsgNone, MsgSpatial2D) {\n return ALIVE;\n}\nFLAMEGPU_AGENT_FUNCTION(in2D, MsgSpatial2D, MsgNone) {\n const float x1 = FLAMEGPU->getVariable(\"x\");\n const float y1 = FLAMEGPU->getVariable(\"y\");\n unsigned int count = 0;\n unsigned int badCount = 0;\n unsigned int myBin[2] = {\n static_cast(x1),\n static_cast(y1)\n };\n \/\/ Count how many messages we recieved (including our own)\n \/\/ This is all those which fall within the 3x3x3 Moore neighbourhood\n \/\/ Not our search radius\n for (const auto &message : FLAMEGPU->message_in(x1, y1)) {\n unsigned int msgBin[2] = {\n static_cast(message.getVariable(\"x\")),\n static_cast(message.getVariable(\"y\"))\n };\n bool isBad = false;\n for (unsigned int i = 0; i < 2; ++i) { \/\/ Iterate axis\n int binDiff = myBin[i] - msgBin[i];\n if (binDiff > 1 || binDiff < -1) {\n isBad = true;\n }\n }\n count++;\n badCount = isBad ? badCount + 1 : badCount;\n }\n FLAMEGPU->setVariable(\"count\", count);\n FLAMEGPU->setVariable(\"badCount\", badCount);\n return ALIVE;\n}\nTEST(Spatial2DMsgTest, Mandatory) {\n std::unordered_map bin_counts;\n \/\/ Construct model\n ModelDescription model(\"Spatial2DMsgTestModel\");\n { \/\/ Location message\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n message.setMin(0, 0);\n message.setMax(11, 11);\n message.setRadius(1);\n \/\/ 11x11 bins, total 121\n message.newVariable(\"id\"); \/\/ unused by current test\n }\n { \/\/ Circle agent\n AgentDescription &agent = model.newAgent(\"agent\");\n agent.newVariable(\"id\");\n agent.newVariable(\"x\");\n agent.newVariable(\"y\");\n agent.newVariable(\"myBin\"); \/\/ This will be presumed bin index of the agent, might not use this\n agent.newVariable(\"count\"); \/\/ Store the distance moved here, for validation\n agent.newVariable(\"badCount\"); \/\/ Store how many messages are out of range\n agent.newFunction(\"out\", out_mandatory2D).setMessageOutput(\"location\");\n agent.newFunction(\"in\", in2D).setMessageInput(\"location\");\n }\n { \/\/ Layer #1\n LayerDescription &layer = model.newLayer();\n layer.addAgentFunction(out_mandatory2D);\n }\n { \/\/ Layer #2\n LayerDescription &layer = model.newLayer();\n layer.addAgentFunction(in2D);\n }\n CUDASimulation cuda_model(model);\n\n const int AGENT_COUNT = 2049;\n AgentVector population(model.Agent(\"agent\"), AGENT_COUNT);\n \/\/ Initialise agents (TODO)\n {\n \/\/ Currently population has not been init, so generate an agent population on the fly\n std::default_random_engine rng;\n std::uniform_real_distribution dist(0.0f, 11.0f);\n for (unsigned int i = 0; i < AGENT_COUNT; i++) {\n AgentVector::Agent instance = population[i];\n instance.setVariable(\"id\", i);\n float pos[3] = { dist(rng), dist(rng), dist(rng) };\n instance.setVariable(\"x\", pos[0]);\n instance.setVariable(\"y\", pos[1]);\n \/\/ Solve the bin index\n const unsigned int bin_pos[2] = {\n (unsigned int)(pos[0] \/ 1),\n (unsigned int)(pos[1] \/ 1)\n };\n const unsigned int bin_index =\n bin_pos[1] * 11 +\n bin_pos[0];\n instance.setVariable(\"myBin\", bin_index);\n \/\/ Create it if it doesn't already exist\n if (bin_counts.find(bin_index) == bin_counts.end()) {\n bin_counts.emplace(bin_index, 0);\n }\n bin_counts[bin_index] += 1;\n }\n cuda_model.setPopulationData(population);\n }\n\n \/\/ Generate results expectation\n std::unordered_map bin_results;\n \/\/ Iterate host bin\n for (unsigned int x1 = 0; x1 < 11; x1++) {\n for (unsigned int y1 = 0; y1 < 11; y1++) {\n \/\/ Solve the bin index\n const unsigned int bin_pos1[3] = {\n x1,\n y1\n };\n const unsigned int bin_index1 =\n bin_pos1[1] * 11 +\n bin_pos1[0];\n \/\/ Count our neighbours\n unsigned int count_sum = 0;\n for (int x2 = -1; x2 <= 1; x2++) {\n int bin_pos2[2] = {\n static_cast(bin_pos1[0]) + x2,\n 0\n };\n for (int y2 = -1; y2 <= 1; y2++) {\n bin_pos2[1] = static_cast(bin_pos1[1]) + y2;\n \/\/ Ensure bin is in bounds\n if (\n bin_pos2[0] >= 0 &&\n bin_pos2[1] >= 0 &&\n bin_pos2[0] < 11 &&\n bin_pos2[1] < 11\n ) {\n const unsigned int bin_index2 =\n bin_pos2[1] * 11 +\n bin_pos2[0];\n count_sum += bin_counts[bin_index2];\n }\n }\n }\n bin_results.emplace(bin_index1, count_sum);\n }\n }\n\n \/\/ Execute a single step of the model\n cuda_model.step();\n\n \/\/ Recover the results and check they match what was expected\n\n cuda_model.getPopulationData(population);\n \/\/ Validate each agent has same result\n unsigned int badCountWrong = 0;\n for (AgentVector::Agent ai : population) {\n unsigned int myBin = ai.getVariable(\"myBin\");\n unsigned int myResult = ai.getVariable(\"count\");\n EXPECT_EQ(myResult, bin_results.at(myBin));\n if (ai.getVariable(\"badCount\"))\n badCountWrong++;\n }\n EXPECT_EQ(badCountWrong, 0u);\n}\n\nTEST(Spatial2DMsgTest, Optional) {\n \/**\n * This test is same as Mandatory, however extra flag has been added to block certain agents from outputting messages\n * Look for NEW!\n *\/\n std::unordered_map bin_counts;\n std::unordered_map bin_counts_optional;\n \/\/ Construct model\n ModelDescription model(\"Spatial2DMsgTestModel\");\n { \/\/ Location message\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n message.setMin(0, 0);\n message.setMax(11, 11);\n message.setRadius(1);\n \/\/ 11x11 bins, total 121\n message.newVariable(\"id\"); \/\/ unused by current test\n }\n { \/\/ Circle agent\n AgentDescription &agent = model.newAgent(\"agent\");\n agent.newVariable(\"id\");\n agent.newVariable(\"x\");\n agent.newVariable(\"y\");\n agent.newVariable(\"do_output\"); \/\/ NEW!\n agent.newVariable(\"myBin\"); \/\/ This will be presumed bin index of the agent, might not use this\n agent.newVariable(\"count\"); \/\/ Store the distance moved here, for validation\n agent.newVariable(\"badCount\"); \/\/ Store how many messages are out of range\n auto &af = agent.newFunction(\"out\", out_optional2D); \/\/ NEW!\n af.setMessageOutput(\"location\");\n af.setMessageOutputOptional(true); \/\/ NEW!\n agent.newFunction(\"in\", in2D).setMessageInput(\"location\");\n }\n { \/\/ Layer #1\n LayerDescription &layer = model.newLayer();\n layer.addAgentFunction(out_optional2D); \/\/ NEW!\n }\n { \/\/ Layer #2\n LayerDescription &layer = model.newLayer();\n layer.addAgentFunction(in2D);\n }\n CUDASimulation cuda_model(model);\n\n const int AGENT_COUNT = 2049;\n AgentVector population(model.Agent(\"agent\"), AGENT_COUNT);\n \/\/ Initialise agents (TODO)\n {\n \/\/ Currently population has not been init, so generate an agent population on the fly\n std::default_random_engine rng;\n std::uniform_real_distribution dist(0.0f, 11.0f);\n std::uniform_real_distribution dist5(0.0f, 5.0f);\n for (unsigned int i = 0; i < AGENT_COUNT; i++) {\n AgentVector::Agent instance = population[i];\n instance.setVariable(\"id\", i);\n float pos[3] = { dist(rng), dist(rng), dist(rng) };\n int do_output = dist5(rng) < 4 ? 1 : 0; \/\/ 80% chance of output \/\/ NEW!\n instance.setVariable(\"x\", pos[0]);\n instance.setVariable(\"y\", pos[1]);\n instance.setVariable(\"do_output\", do_output); \/\/ NEW!\n \/\/ Solve the bin index\n const unsigned int bin_pos[2] = {\n (unsigned int)(pos[0] \/ 1),\n (unsigned int)(pos[1] \/ 1)\n };\n const unsigned int bin_index =\n bin_pos[1] * 11 +\n bin_pos[0];\n instance.setVariable(\"myBin\", bin_index);\n \/\/ Create it if it doesn't already exist\n bin_counts[bin_index] += 1;\n if (do_output) { \/\/ NEW!\n bin_counts_optional[bin_index] += 1; \/\/ NEW!\n }\n }\n cuda_model.setPopulationData(population);\n }\n\n \/\/ Generate results expectation\n std::unordered_map bin_results;\n std::unordered_map bin_results_optional;\n \/\/ Iterate host bin\n for (unsigned int x1 = 0; x1 < 11; x1++) {\n for (unsigned int y1 = 0; y1 < 11; y1++) {\n \/\/ Solve the bin index\n const unsigned int bin_pos1[3] = {\n x1,\n y1\n };\n const unsigned int bin_index1 =\n bin_pos1[1] * 11 +\n bin_pos1[0];\n \/\/ Count our neighbours\n unsigned int count_sum = 0;\n unsigned int count_sum_optional = 0; \/\/ NEW!\n for (int x2 = -1; x2 <= 1; x2++) {\n int bin_pos2[2] = {\n static_cast(bin_pos1[0]) + x2,\n 0\n };\n for (int y2 = -1; y2 <= 1; y2++) {\n bin_pos2[1] = static_cast(bin_pos1[1]) + y2;\n \/\/ Ensure bin is in bounds\n if (\n bin_pos2[0] >= 0 &&\n bin_pos2[1] >= 0 &&\n bin_pos2[0] < 11 &&\n bin_pos2[1] < 11\n ) {\n const unsigned int bin_index2 =\n bin_pos2[1] * 11 +\n bin_pos2[0];\n count_sum += bin_counts[bin_index2];\n count_sum_optional += bin_counts_optional[bin_index2]; \/\/ NEW!\n }\n }\n }\n bin_results.emplace(bin_index1, count_sum);\n bin_results_optional.emplace(bin_index1, count_sum_optional); \/\/ NEW!\n }\n }\n\n \/\/ Execute a single step of the model\n cuda_model.step();\n\n \/\/ Recover the results and check they match what was expected\n\n cuda_model.getPopulationData(population);\n \/\/ Validate each agent has same result\n unsigned int badCountWrong = 0;\n for (AgentVector::Agent ai : population) {\n unsigned int myBin = ai.getVariable(\"myBin\");\n unsigned int myResult = ai.getVariable(\"count\");\n if (ai.getVariable(\"badCount\"))\n badCountWrong++;\n EXPECT_EQ(myResult, bin_results_optional.at(myBin)); \/\/ NEW!\n }\n EXPECT_EQ(badCountWrong, 0u);\n}\nTEST(Spatial2DMsgTest, OptionalNone) {\n \/**\n * This test is same as Mandatory, however extra flag has been added to block certain agents from outputting messages\n * Look for NEW!\n *\/\n std::unordered_map bin_counts;\n std::unordered_map bin_counts_optional;\n \/\/ Construct model\n ModelDescription model(\"Spatial2DMsgTestModel\");\n { \/\/ Location message\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n message.setMin(0, 0);\n message.setMax(11, 11);\n message.setRadius(1);\n \/\/ 11x11 bins, total 121\n message.newVariable(\"id\"); \/\/ unused by current test\n }\n { \/\/ Circle agent\n AgentDescription &agent = model.newAgent(\"agent\");\n agent.newVariable(\"id\");\n agent.newVariable(\"x\");\n agent.newVariable(\"y\");\n agent.newVariable(\"do_output\"); \/\/ NEW!\n agent.newVariable(\"myBin\"); \/\/ This will be presumed bin index of the agent, might not use this\n agent.newVariable(\"count\"); \/\/ Store the distance moved here, for validation\n agent.newVariable(\"badCount\"); \/\/ Store how many messages are out of range\n auto &af = agent.newFunction(\"out\", out_optional2DNone); \/\/ NEW!\n af.setMessageOutput(\"location\");\n af.setMessageOutputOptional(true); \/\/ NEW!\n agent.newFunction(\"in\", in2D).setMessageInput(\"location\");\n }\n { \/\/ Layer #1\n LayerDescription &layer = model.newLayer();\n layer.addAgentFunction(out_optional2DNone); \/\/ NEW!\n }\n { \/\/ Layer #2\n LayerDescription &layer = model.newLayer();\n layer.addAgentFunction(in2D);\n }\n CUDASimulation cuda_model(model);\n\n const int AGENT_COUNT = 2049;\n AgentVector population(model.Agent(\"agent\"), AGENT_COUNT);\n \/\/ Initialise agents (TODO)\n {\n \/\/ Currently population has not been init, so generate an agent population on the fly\n std::default_random_engine rng;\n std::uniform_real_distribution dist(0.0f, 11.0f);\n std::uniform_real_distribution dist5(0.0f, 5.0f);\n for (unsigned int i = 0; i < AGENT_COUNT; i++) {\n AgentVector::Agent instance = population[i];\n instance.setVariable(\"id\", i);\n float pos[3] = { dist(rng), dist(rng), dist(rng) };\n int do_output = dist5(rng) < 4 ? 1 : 0; \/\/ 80% chance of output \/\/ NEW!\n instance.setVariable(\"x\", pos[0]);\n instance.setVariable(\"y\", pos[1]);\n instance.setVariable(\"do_output\", do_output); \/\/ NEW!\n \/\/ Solve the bin index\n const unsigned int bin_pos[2] = {\n (unsigned int)(pos[0] \/ 1),\n (unsigned int)(pos[1] \/ 1)\n };\n const unsigned int bin_index =\n bin_pos[1] * 11 +\n bin_pos[0];\n instance.setVariable(\"myBin\", bin_index);\n \/\/ Create it if it doesn't already exist\n bin_counts[bin_index] += 1;\n if (do_output) { \/\/ NEW!\n bin_counts_optional[bin_index] += 1; \/\/ NEW!\n }\n }\n cuda_model.setPopulationData(population);\n }\n\n \/\/ Execute a single step of the model\n cuda_model.step();\n\n \/\/ Recover the results and check they match what was expected\n\n cuda_model.getPopulationData(population);\n \/\/ Validate each agent has same result\n unsigned int badCountWrong = 0;\n for (AgentVector::Agent ai : population) {\n unsigned int myResult = ai.getVariable(\"count\");\n if (ai.getVariable(\"badCount\"))\n badCountWrong++;\n EXPECT_EQ(myResult, 0u); \/\/ NEW!\n }\n EXPECT_EQ(badCountWrong, 0u);\n}\n\nTEST(Spatial2DMsgTest, BadRadius) {\n ModelDescription model(\"Spatial2DMsgTestModel\");\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n EXPECT_THROW(message.setRadius(0), InvalidArgument);\n EXPECT_THROW(message.setRadius(-10), InvalidArgument);\n}\nTEST(Spatial2DMsgTest, BadMin) {\n ModelDescription model(\"Spatial2DMsgTestModel\");\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n message.setMax(5, 5);\n EXPECT_THROW(message.setMin(5, 0), InvalidArgument);\n EXPECT_THROW(message.setMin(0, 5), InvalidArgument);\n EXPECT_THROW(message.setMin(6, 0), InvalidArgument);\n EXPECT_THROW(message.setMin(0, 6), InvalidArgument);\n}\nTEST(Spatial2DMsgTest, BadMax) {\n ModelDescription model(\"Spatial2DMsgTestModel\");\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n message.setMin(5, 5);\n EXPECT_THROW(message.setMax(5, 0), InvalidArgument);\n EXPECT_THROW(message.setMax(0, 5), InvalidArgument);\n EXPECT_THROW(message.setMax(4, 0), InvalidArgument);\n EXPECT_THROW(message.setMax(0, 4), InvalidArgument);\n}\nTEST(Spatial2DMsgTest, UnsetMax) {\n ModelDescription model(\"Spatial2DMsgTestModel\");\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n message.setMin(5, 5);\n EXPECT_THROW(CUDASimulation m(model), InvalidMessage);\n}\nTEST(Spatial2DMsgTest, UnsetMin) {\n ModelDescription model(\"Spatial2DMsgTestModel\");\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n message.setMin(5, 5);\n EXPECT_THROW(CUDASimulation m(model), InvalidMessage);\n}\nTEST(Spatial2DMsgTest, reserved_name) {\n ModelDescription model(\"Spatial2DMsgTestModel\");\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n EXPECT_THROW(message.newVariable(\"_\"), ReservedName);\n}\n\nFLAMEGPU_AGENT_FUNCTION(count2D, MsgSpatial2D, MsgNone) {\n unsigned int count = 0;\n \/\/ Count how many messages we received (including our own)\n \/\/ This is all those which fall within the 3x3 Moore neighbourhood\n for (const auto &message : FLAMEGPU->message_in(0, 0)) {\n count++;\n }\n FLAMEGPU->setVariable(\"count\", count);\n return ALIVE;\n}\nTEST(Spatial2DMsgTest, ReadEmpty) {\n\/\/ What happens if we read a message list before it has been output?\n ModelDescription model(\"Model\");\n { \/\/ Location message\n MsgSpatial2D::Description &message = model.newMessage(\"location\");\n message.setMin(-3, -3);\n message.setMax(3, 3);\n message.setRadius(2);\n message.newVariable(\"id\"); \/\/ unused by current test\n }\n { \/\/ Circle agent\n AgentDescription &agent = model.newAgent(\"agent\");\n agent.newVariable(\"count\", 0); \/\/ Count the number of messages read\n agent.newFunction(\"in\", count2D).setMessageInput(\"location\");\n }\n { \/\/ Layer #1\n LayerDescription &layer = model.newLayer();\n layer.addAgentFunction(count2D);\n }\n \/\/ Create 1 agent\n AgentVector pop_in(model.Agent(\"agent\"), 1);\n CUDASimulation cuda_model(model);\n cuda_model.setPopulationData(pop_in);\n \/\/ Execute model\n EXPECT_NO_THROW(cuda_model.step());\n \/\/ Check result\n AgentVector pop_out(model.Agent(\"agent\"), 1);\n pop_out[0].setVariable(\"count\", 1);\n cuda_model.getPopulationData(pop_out);\n EXPECT_EQ(pop_out.size(), 1u);\n EXPECT_EQ(pop_out[0].getVariable(\"count\"), 0u);\n}\n} \/\/ namespace test_message_spatial2d\n","avg_line_length":40.4,"max_line_length":121,"alphanum_fraction":0.5954455446} {"size":9139,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \"..\/..\/data\/parameters.h\"\n#include \"..\/..\/data\/size.h\"\n#include \"init_params.h\"\n#include \"option.h\"\n#include \n#include \n#include \n\nFILE *wout = fopen(\"weight.dat\",\"w\");\n\nint LoadConnectivityFile(const char *file_name,unsigned int **host_rptr, unsigned int **d_rptr, unsigned int **d_cindices, CTYPE **d_val, CTYPE weight ,int PreSN_num,int PostSN_num){\n\tPreSN_num = (PreSN_num < 1)? 1: PreSN_num;\n\tPostSN_num = (PostSN_num < 1)? 1000: PostSN_num;\n\n\n\tFILE *fp;\n\tif((fp = fopen( file_name ,\"r\")) == NULL ){\n\t\tfprintf(stderr, \"can't open file : %s\\n\",file_name);\n\t\texit(1);\n\t}\n\n\tweight = (weight < 0)?(-1)*weight: weight;\n\tint num_of_data = PostSN_num*10;\n\tunsigned int *rptr = NULL;\n\tunsigned int *cindices = NULL;\n\tCTYPE *val =NULL;\n\n\tint max_conv = 0;\n\n\n\trptr = (unsigned int *)malloc( (PostSN_num+1)*sizeof(unsigned int) );\n\tcindices = (unsigned int *)malloc( num_of_data*sizeof(unsigned int) );\n\tval = (CTYPE *)malloc( num_of_data*sizeof(CTYPE) );\n\n\tif(rptr == NULL || cindices == NULL || val == NULL){\n\t\tfprintf(stderr,\"malloc error\\n\");\n\t\texit(1);\n\t}\n\n\tchar str[256] = {'\\0'};\n\tint i = 0;\n\tint prev_post_id = 0;\n\tint post_id;\n\trptr[0] = 0;\n\twhile( fgets(str, 256, fp) != NULL ){\n\n\t\t\/\/sscanf(str, \"%d %d %f\", &cindices[i], &post_id, &val[i] );\n\t\tsscanf(str, \"%d %d\", &cindices[i], &post_id );\n\t\t\n\t\t\/\/ val[i] \u306b\u73fe\u72b6\u8ddd\u96e2\u304c\u5165\u3063\u3066\u3044\u308b\u306e\u3067\u3001weight\u306b\u7f6e\u304d\u63db\u3048\u308b.\n\t\tval[i] = weight;\n\n\t\t\/\/ \u672c\u6765\u306fpost_id > prev_post_id (\u30bd\u30fc\u30c8\u6e08\u307f\u524d\u63d0)\n\t\tif(post_id != prev_post_id) {\n\t\t\tfor(int j=prev_post_id+1;j num_of_data-1){\n\t\t\tfloat avg = (post_id != 0)?(float)i\/(float)(post_id):i;\n\t\t\tnum_of_data = (int)(avg*PostSN_num);\n\n\t\t\t\/\/fprintf(stderr, \"realloc phase %d to %d\\n\", i, num_of_data);\n\n\t\t\tunsigned int *i_tmp=NULL;\n\t\t\tCTYPE *c_tmp=NULL;\n\t\t\tif(( i_tmp = (unsigned int *)realloc(cindices, num_of_data*sizeof(unsigned int))) == NULL){\n\t\t\t\tfree(cindices);\n\t\t\t\texit(1);\n\t\t\t}else{\n\t\t\t\tif(cindices != i_tmp){\n\t\t\t\t\tcindices = i_tmp;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif(( c_tmp = (CTYPE *)realloc(val, num_of_data*sizeof(CTYPE) )) == NULL){\n\t\t\t\tfree(val);\n\t\t\t\texit(1);\n\t\t\t}else{\n\t\t\t\tif(val != c_tmp){\n\t\t\t\t\tval = c_tmp;\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n\tif(num_of_data != i){\n\t\tnum_of_data = i;\n\t\tfor(int j = post_id+1; j < PostSN_num+1;j++){\n\t\t\trptr[j] = num_of_data;\n\t\t}\n\n\t\t\/\/ \u7e2e\u5c0f\n\t\t\/\/fprintf(stderr, \"realloc phase :to %d\\n\", num_of_data);\n\t\tunsigned int *i_tmp = NULL;\n\t\tCTYPE *c_tmp = NULL;\n\t\tif(( i_tmp = (unsigned int *)realloc(cindices, num_of_data*sizeof(unsigned int))) == NULL){\n\t\t\tfprintf(stderr, \"can't realloc memory in roading phase: %s\\n\", file_name);\n\t\t\tfree(cindices);\n\t\t\texit(1);\n\t\t}else{\n\t\t\tif(cindices != i_tmp)cindices = i_tmp;\n\t\t}\n\n\t\tif(( c_tmp = (CTYPE *)realloc(val, num_of_data*sizeof(CTYPE))) == NULL){\n\t\t\tfprintf(stderr, \"can't realloc memory in roading phase: %s\\n\", file_name);\n\t\t\tfree(val);\n\t\t\texit(1);\n\t\t}else{\n\t\t\tif(val != c_tmp) val = c_tmp;\n\t\t}\n\t}\n\n\tfor(int i = 0; i < PostSN_num; i++) max_conv = (max_conv < rptr[i+1]-rptr[i])?rptr[i+1]-rptr[i]:max_conv;\n\n\tCUDA_SAFE_CALL( cudaMalloc( d_rptr, sizeof(unsigned int)*(PostSN_num+1)) );\n\tCUDA_SAFE_CALL( cudaMalloc( d_cindices, sizeof(unsigned int)*num_of_data) );\n\tCUDA_SAFE_CALL( cudaMalloc( d_val, sizeof(CTYPE)*num_of_data));\n\n\tCUDA_SAFE_CALL( cudaMemcpy( *d_rptr, rptr, sizeof(unsigned int)*(PostSN_num+1), cudaMemcpyHostToDevice));\n\tCUDA_SAFE_CALL( cudaMemcpy( *d_cindices, cindices, sizeof(unsigned int)*num_of_data, cudaMemcpyHostToDevice));\n\tCUDA_SAFE_CALL( cudaMemcpy( *d_val, val, sizeof(CTYPE)*num_of_data, cudaMemcpyHostToDevice));\n\n\t*host_rptr = rptr;\n\n\tfclose(fp);\n\t\/\/free(rptr);\n\tfree(cindices);\n\tfree(val);\n\n\treturn max_conv;\n}\nvoid set_neuron_params(Neuron *n,enum NeuronType type,const char* filename, int num, int base_id, CTYPE Cm, CTYPE tau_m, CTYPE El, CTYPE dt_ref, CTYPE Ie, CTYPE Vr, CTYPE Vth, CTYPE tau_exc, CTYPE tau_inh, CTYPE gL ){\n\tn[type].type = type;\n\tstrcpy(n[type].filename, filename);\n\tn[type].num = num;\n\tn[type].base_id = base_id;\n\tn[type].Cm = Cm;\n\tn[type].tau_m = tau_m;\n\tn[type].El = El;\n\tn[type].dt_ref = dt_ref;\n\tn[type].Ie = Ie;\n\tn[type].Vr = Vr;\n\tn[type].Vth = Vth;\n\tn[type].tau_exc = tau_exc;\n\tn[type].tau_inh = tau_inh;\n\tn[type].gL = gL;\n\treturn;\n}\nvoid set_connectivity_params(Connectivity *c, enum ConnectionType type,const char*filename, int preNum, int postNum, enum NeuronType preType, enum NeuronType postType, CTYPE initial_weight, CTYPE delay, int UseParallelReduction ){\n\tc[type].type = type;\n\tc[type].preNum = preNum;\n\tc[type].postNum = postNum;\n\tc[type].preType = preType;\n\tc[type].postType = postType;\n\tc[type].initial_weight = initial_weight;\n\tc[type].delay = delay;\n\tc[type].max_conv = LoadConnectivityFile(filename,&c[type].host_rptr, &c[type].rptr, &c[type].cindices, &c[type].val,initial_weight, preNum, postNum );\n\tc[type].pr = (UseParallelReduction);\n\n\n\treturn;\n}\n\nint set_base_id(Neuron *Neurons){\n\tint base = 0;\n\tfor(int i = 0;i < TotalNumOfCellTypes;i++){\n\t\tNeurons[i].base_id = base;\n\t\tbase += Neurons[i].num;\n\t}\n\treturn base;\n}\n\n__global__ void InitParams( CTYPE *u, CTYPE *g_exc, CTYPE *dg_exc, CTYPE *g_inh, CTYPE *dg_inh, int *refractory_time_left, char *spike , Neuron *Neurons ,char *type, const int total_nn){\n\tint i = threadIdx.x + blockIdx.x*blockDim.x;\n\tif( i < total_nn){\n\t\tu[i] = Neurons[type[i]].Vr + (Neurons[type[i]].Vth - Neurons[type[i]].Vr)*u[i];\n\t\tg_exc[i] = 0.f;\n\t\tdg_exc[i] = 0.f;\n\t\tg_inh[i] = 0.f;\n\t\tdg_inh[i] = 0.f;\n\t\trefractory_time_left[i] = 0;\n\t\tspike[i] = 0;\n\t}\n};\n\n\nvoid init_neurons_params( Neuron *Neurons){\n\n\tset_neuron_params(\n\t\tNeurons,\n\t\tGranuleCell,\n\t\t\"GranuleCell.dat\",\n\t\t206415,\n\t\t19578,\n\t\t7,\n\t\t24.15,\n\t\t-62,\n\t\t1.5,\n\t\t0,\n\t\t-70,\n\t\t-41,\n\t\t5.8,\n\t\t13.6,\n\t\t1.5\n\t);\n\n\tset_neuron_params(\n\t\tNeurons,\n\t\tPurkinjeCell,\n\t\t\"PurkinjeCell.dat\",\n\t\t171,\n\t\t0,\n\t\t334,\n\t\t47,\n\t\t-59,\n\t\t0.5,\n\t\t800,\n\t\t-69,\n\t\t-43,\n\t\t1.1,\n\t\t2.8,\n\t\t7.0\n\t);\n\n\tset_neuron_params(\n\t\tNeurons,\n\t\tGolgiCell,\n\t\t\"GolgiCell.dat\",\n\t\t486,\n\t\t187,\n\t\t145,\n\t\t44,\n\t\t-62,\n\t\t2,\n\t\t36.8,\n\t\t-75,\n\t\t-55,\n\t\t0.5,\n\t\t10,\n\t\t3.6\n\t);\n\n\tset_neuron_params(\n\t\tNeurons,\n\t\tStellateCell,\n\t\t\"StellateCell.dat\",\n\t\t918,\n\t\t673,\n\t\t14.6,\n\t\t9.125,\n\t\t-68,\n\t\t1,\n\t\t24.05,\n\t\t-78,\n\t\t-53,\n\t\t0.64,\n\t\t2,\n\t\t1.0\n\t);\n\n\tset_neuron_params(\n\t\tNeurons,\n\t\tBasketCell,\n\t\t\"BasketCell.dat\",\n\t\t1784,\n\t\t1591,\n\t\t14.6,\n\t\t9.125,\n\t\t-68,\n\t\t1,\n\t\t24.05,\n\t\t-78,\n\t\t-53,\n\t\t0.64,\n\t\t2,\n\t\t1.0\n\t);\n\n\tset_neuron_params(\n\t\tNeurons,\n\t\tDCNCell,\n\t\t\"DCNCell.dat\",\n\t\t16,\n\t\t171,\n\t\t142,\n\t\t33,\n\t\t-45,\n\t\t1.5,\n\t\t180,\n\t\t-55,\n\t\t-36,\n\t\t1,\n\t\t0.7,\n\t\t1.56\n\t);\n\n\tset_neuron_params(\n\t\tNeurons,\n\t\tGlomerulus,\n\t\t\"Glomerulus.dat\",\n\t\t16203,\n\t\t3375,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0\n\t);\n\n}\n\nvoid init_connectivity_params(Connectivity *connectivities){\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tGlomerulusGolgi,\n\t\t\"GlomerulusGolgi.dat\",\n\t\t16203,\n\t\t486,\n\t\tGlomerulus,\n\t\tGolgiCell,\n\t\t2.0,\n\t\t4.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tGlomerulusGranule,\n\t\t\"GlomerulusGranule.dat\",\n\t\t16203,\n\t\t206415,\n\t\tGlomerulus,\n\t\tGranuleCell,\n\t\t9.0,\n\t\t4.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tGranuleGolgi,\n\t\t\"GranuleGolgi.dat\",\n\t\t206415,\n\t\t486,\n\t\tGranuleCell,\n\t\tGolgiCell,\n\t\t0.4,\n\t\t5.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tGolgiGranule,\n\t\t\"GolgiGranule.dat\",\n\t\t486,\n\t\t206415,\n\t\tGolgiCell,\n\t\tGranuleCell,\n\t\t-5.0,\n\t\t2.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tAscAxonPurkinje,\n\t\t\"AscAxonPurkinje.dat\",\n\t\t206415,\n\t\t171,\n\t\tGranuleCell,\n\t\tPurkinjeCell,\n\t\t75.0,\n\t\t2.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tPFPurkinje,\n\t\t\"PFPurkinje.dat\",\n\t\t206415,\n\t\t171,\n\t\tGranuleCell,\n\t\tPurkinjeCell,\n\t\t0.02,\n\t\t5.0,\n\t\t1\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tPFBasket,\n\t\t\"PFBasket.dat\",\n\t\t206415,\n\t\t1784,\n\t\tGranuleCell,\n\t\tBasketCell,\n\t\t0.2,\n\t\t5.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tPFStellate,\n\t\t\"PFStellate.dat\",\n\t\t206415,\n\t\t918,\n\t\tGranuleCell,\n\t\tStellateCell,\n\t\t0.2,\n\t\t5.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tGapJunctionsStellate,\n\t\t\"GapJunctionsStellate.dat\",\n\t\t918,\n\t\t918,\n\t\tStellateCell,\n\t\tStellateCell,\n\t\t-2.0,\n\t\t1.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tGapJunctionsBasket,\n\t\t\"GapJunctionsBasket.dat\",\n\t\t1784,\n\t\t1784,\n\t\tBasketCell,\n\t\tBasketCell,\n\t\t-2.5,\n\t\t1.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tGapJunctionsGolgi,\n\t\t\"GapJunctionsGolgi.dat\",\n\t\t486,\n\t\t486,\n\t\tGolgiCell,\n\t\tGolgiCell,\n\t\t-8.0,\n\t\t1.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tPurkinjeDCN,\n\t\t\"PurkinjeDCN.dat\",\n\t\t171,\n\t\t16,\n\t\tPurkinjeCell,\n\t\tDCNCell,\n\t\t-0.0075,\n\t\t4.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tGlomerulusDCN,\n\t\t\"GlomerulusDCN.dat\",\n\t\t16203,\n\t\t16,\n\t\tGlomerulus,\n\t\tDCNCell,\n\t\t0.006,\n\t\t4.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tBasketPurkinje,\n\t\t\"BasketPurkinje.dat\",\n\t\t1784,\n\t\t171,\n\t\tBasketCell,\n\t\tPurkinjeCell,\n\t\t-9.0,\n\t\t4.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tStellatePurkinje,\n\t\t\"StellatePurkinje.dat\",\n\t\t918,\n\t\t171,\n\t\tStellateCell,\n\t\tPurkinjeCell,\n\t\t-8.5,\n\t\t5.0,\n\t\t0\n\t);\n\n\tset_connectivity_params(\n\t\tconnectivities,\n\t\tAscendingAxonGolgi,\n\t\t\"AscendingAxonGolgi.dat\",\n\t\t206415,\n\t\t486,\n\t\tGranuleCell,\n\t\tGolgiCell,\n\t\t20.0,\n\t\t2.0,\n\t\t0\n\t);\n\n}\n\n","avg_line_length":17.1463414634,"max_line_length":231,"alphanum_fraction":0.6410985885} {"size":15252,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/* \n * Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and\/or other materials provided with the distribution.\n * * Neither the name of NVIDIA CORPORATION nor the names of its\n * contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\/\n\n#include \"tutorial.h\"\n#include \n\nrtDeclareVariable(float3, geometric_normal, attribute geometric_normal, ); \nrtDeclareVariable(float3, shading_normal, attribute shading_normal, ); \n\nrtDeclareVariable(PerRayData_radiance, prd_radiance, rtPayload, );\nrtDeclareVariable(PerRayData_shadow, prd_shadow, rtPayload, );\n\nrtDeclareVariable(optix::Ray, ray, rtCurrentRay, );\nrtDeclareVariable(float, t_hit, rtIntersectionDistance, );\nrtDeclareVariable(uint2, launch_index, rtLaunchIndex, );\n\nrtDeclareVariable(unsigned int, radiance_ray_type, , );\nrtDeclareVariable(unsigned int, shadow_ray_type , , );\nrtDeclareVariable(float, scene_epsilon, , );\nrtDeclareVariable(rtObject, top_object, , );\n\n\n\/\/\n\/\/ Pinhole camera implementation\n\/\/\nrtDeclareVariable(float3, eye, , );\nrtDeclareVariable(float3, U, , );\nrtDeclareVariable(float3, V, , );\nrtDeclareVariable(float3, W, , );\nrtDeclareVariable(float3, bad_color, , );\nrtBuffer output_buffer;\n\nRT_PROGRAM void pinhole_camera()\n{\n size_t2 screen = output_buffer.size();\n\n float2 d = make_float2(launch_index) \/ make_float2(screen) * 2.f - 1.f;\n float3 ray_origin = eye;\n float3 ray_direction = normalize(d.x*U + d.y*V + W);\n\n optix::Ray ray(ray_origin, ray_direction, radiance_ray_type, scene_epsilon );\n\n PerRayData_radiance prd;\n prd.importance = 1.f;\n prd.depth = 0;\n\n rtTrace(top_object, ray, prd);\n\n output_buffer[launch_index] = make_color( prd.result );\n}\n\n\n\/\/\n\/\/ Environment map background\n\/\/\nrtTextureSampler envmap;\nRT_PROGRAM void envmap_miss()\n{\n float theta = atan2f( ray.direction.x, ray.direction.z );\n float phi = M_PIf * 0.5f - acosf( ray.direction.y );\n float u = (theta + M_PIf) * (0.5f * M_1_PIf);\n float v = 0.5f * ( 1.0f + sin(phi) );\n prd_radiance.result = make_float3( tex2D(envmap, u, v) );\n}\n \n\n\/\/\n\/\/ Terminates and fully attenuates ray after any hit\n\/\/\nRT_PROGRAM void any_hit_shadow()\n{\n \/\/ this material is opaque, so it fully attenuates all shadow rays\n prd_shadow.attenuation = make_float3(0);\n\n rtTerminateRay();\n}\n \n\n\/\/\n\/\/ Procedural rusted metal surface shader\n\/\/\n\n\/*\n * Translated to CUDA C from Larry Gritz's LGRustyMetal.sl shader found at:\n * http:\/\/renderman.org\/RMR\/Shaders\/LGShaders\/LGRustyMetal.sl\n *\n * Used with permission from tal AT renderman DOT org.\n *\/\n\nrtDeclareVariable(float3, ambient_light_color, , );\nrtBuffer lights; \nrtDeclareVariable(rtObject, top_shadower, , );\nrtDeclareVariable(float, importance_cutoff, , ); \nrtDeclareVariable(int, max_depth, , );\nrtDeclareVariable(float3, reflectivity_n, , );\n\nrtDeclareVariable(float, metalKa, , ) = 1;\nrtDeclareVariable(float, metalKs, , ) = 1;\nrtDeclareVariable(float, metalroughness, , ) = .1;\nrtDeclareVariable(float, rustKa, , ) = 1;\nrtDeclareVariable(float, rustKd, , ) = 1;\nrtDeclareVariable(float3, rustcolor, , ) = {.437, .084, 0};\nrtDeclareVariable(float3, metalcolor, , ) = {.7, .7, .7};\nrtDeclareVariable(float, txtscale, , ) = .02;\nrtDeclareVariable(float, rusty, , ) = 0.2;\nrtDeclareVariable(float, rustbump, , ) = 0.85;\n#define MAXOCTAVES 6\n\nrtTextureSampler noise_texture;\nstatic __device__ __inline__ float snoise(float3 p)\n{\n return tex3D(noise_texture, p.x, p.y, p.z) * 2 -1;\n}\n\nRT_PROGRAM void box_closest_hit_radiance()\n{\n float3 world_geo_normal = normalize( rtTransformNormal( RT_OBJECT_TO_WORLD, geometric_normal ) );\n float3 world_shade_normal = normalize( rtTransformNormal( RT_OBJECT_TO_WORLD, shading_normal ) );\n float3 ffnormal = faceforward( world_shade_normal, -ray.direction, world_geo_normal );\n float3 hit_point = ray.origin + t_hit * ray.direction;\n\n \/* Sum several octaves of abs(snoise), i.e. turbulence. Limit the\n * number of octaves by the estimated change in PP between adjacent\n * shading samples.\n *\/\n float3 PP = txtscale * hit_point;\n float a = 1;\n float sum = 0;\n for(int i = 0; i < MAXOCTAVES; i++ ){\n sum += a * fabs(snoise(PP));\n PP *= 2.0f;\n a *= 0.5f;\n }\n\n \/* Scale the rust appropriately, modulate it by another noise \n * computation, then sharpen it by squaring its value.\n *\/\n float rustiness = step (1-rusty, clamp (sum,0.0f,1.0f));\n rustiness *= clamp (abs(snoise(PP)), 0.0f, .08f) \/ 0.08f;\n rustiness *= rustiness;\n\n \/* If we have any rust, calculate the color of the rust, taking into\n * account the perturbed normal and shading like matte.\n *\/\n float3 Nrust = ffnormal;\n if (rustiness > 0) {\n \/* If it's rusty, also add a high frequency bumpiness to the normal *\/\n Nrust = normalize(ffnormal + rustbump * snoise(PP));\n Nrust = faceforward (Nrust, -ray.direction, world_geo_normal);\n }\n\n float3 color = mix(metalcolor * metalKa, rustcolor * rustKa, rustiness) * ambient_light_color;\n for(int i = 0; i < lights.size(); ++i) {\n BasicLight light = lights[i];\n float3 L = normalize(light.pos - hit_point);\n float nmDl = dot( ffnormal, L);\n float nrDl = dot( Nrust, L);\n\n if( nmDl > 0.0f || nrDl > 0.0f ){\n \/\/ cast shadow ray\n PerRayData_shadow shadow_prd;\n shadow_prd.attenuation = make_float3(1.0f);\n float Ldist = length(light.pos - hit_point);\n optix::Ray shadow_ray( hit_point, L, shadow_ray_type, scene_epsilon, Ldist );\n rtTrace(top_shadower, shadow_ray, shadow_prd);\n float3 light_attenuation = shadow_prd.attenuation;\n\n if( fmaxf(light_attenuation) > 0.0f ){\n float3 Lc = light.color * light_attenuation;\n nrDl = max(nrDl * rustiness, 0.0f);\n color += rustKd * rustcolor * nrDl * Lc;\n\n float r = nmDl * (1.0f-rustiness);\n if(nmDl > 0.0f){\n float3 H = normalize(L - ray.direction);\n float nmDh = dot( ffnormal, H );\n if(nmDh > 0)\n color += r * metalKs * Lc * pow(nmDh, 1.f\/metalroughness);\n }\n }\n\n }\n }\n\n float3 r = schlick(-dot(ffnormal, ray.direction), reflectivity_n * (1-rustiness));\n float importance = prd_radiance.importance * optix::luminance( r );\n\n \/\/ reflection ray\n if( importance > importance_cutoff && prd_radiance.depth < max_depth) {\n PerRayData_radiance refl_prd;\n refl_prd.importance = importance;\n refl_prd.depth = prd_radiance.depth+1;\n float3 R = reflect( ray.direction, ffnormal );\n optix::Ray refl_ray( hit_point, R, radiance_ray_type, scene_epsilon );\n rtTrace(top_object, refl_ray, refl_prd);\n color += r * refl_prd.result;\n }\n\n prd_radiance.result = color;\n}\n \n\n\/\/\n\/\/ Phong surface shading with shadows and schlick-approximated fresnel reflections.\n\/\/ Uses procedural texture to determine diffuse response.\n\/\/\nrtDeclareVariable(float, phong_exp, , );\nrtDeclareVariable(float3, tile_v0, , );\nrtDeclareVariable(float3, tile_v1, , ); \nrtDeclareVariable(float3, crack_color, , );\nrtDeclareVariable(float, crack_width, , );\nrtDeclareVariable(float3, Ka, , );\nrtDeclareVariable(float3, Ks, , );\nrtDeclareVariable(float3, Kd, , );\n\nRT_PROGRAM void floor_closest_hit_radiance()\n{\n float3 world_geo_normal = normalize( rtTransformNormal( RT_OBJECT_TO_WORLD, geometric_normal ) );\n float3 world_shade_normal = normalize( rtTransformNormal( RT_OBJECT_TO_WORLD, shading_normal ) );\n float3 ffnormal = faceforward( world_shade_normal, -ray.direction, world_geo_normal );\n float3 color = Ka * ambient_light_color;\n\n float3 hit_point = ray.origin + t_hit * ray.direction;\n\n float v0 = dot(tile_v0, hit_point);\n float v1 = dot(tile_v1, hit_point);\n v0 = v0 - floor(v0);\n v1 = v1 - floor(v1);\n\n float3 local_Kd;\n if( v0 > crack_width && v1 > crack_width ){\n local_Kd = Kd;\n } else {\n local_Kd = crack_color;\n }\n\n for(int i = 0; i < lights.size(); ++i) {\n BasicLight light = lights[i];\n float3 L = normalize(light.pos - hit_point);\n float nDl = dot( ffnormal, L);\n\n if( nDl > 0.0f ){\n \/\/ cast shadow ray\n PerRayData_shadow shadow_prd;\n shadow_prd.attenuation = make_float3(1.0f);\n float Ldist = length(light.pos - hit_point);\n optix::Ray shadow_ray( hit_point, L, shadow_ray_type, scene_epsilon, Ldist );\n rtTrace(top_shadower, shadow_ray, shadow_prd);\n float3 light_attenuation = shadow_prd.attenuation;\n\n if( fmaxf(light_attenuation) > 0.0f ){\n float3 Lc = light.color * light_attenuation;\n color += local_Kd * nDl * Lc;\n\n float3 H = normalize(L - ray.direction);\n float nDh = dot( ffnormal, H );\n if(nDh > 0)\n color += Ks * Lc * pow(nDh, phong_exp);\n }\n\n }\n }\n\n float3 r = schlick(-dot(ffnormal, ray.direction), reflectivity_n);\n float importance = prd_radiance.importance * optix::luminance( r );\n\n \/\/ reflection ray\n if( importance > importance_cutoff && prd_radiance.depth < max_depth) {\n PerRayData_radiance refl_prd;\n refl_prd.importance = importance;\n refl_prd.depth = prd_radiance.depth+1;\n float3 R = reflect( ray.direction, ffnormal );\n optix::Ray refl_ray( hit_point, R, radiance_ray_type, scene_epsilon );\n rtTrace(top_object, refl_ray, refl_prd);\n color += r * refl_prd.result;\n }\n\n prd_radiance.result = color;\n}\n \n\n\/\/\n\/\/ (NEW)\n\/\/ Bounding box program for programmable convex hull primitive\n\/\/\nrtDeclareVariable(float3, chull_bbmin, , );\nrtDeclareVariable(float3, chull_bbmax, , );\nRT_PROGRAM void chull_bounds (int primIdx, float result[6])\n{\n optix::Aabb* aabb = (optix::Aabb*)result;\n aabb->m_min = chull_bbmin;\n aabb->m_max = chull_bbmax;\n}\n\n\n\/\/\n\/\/ (NEW)\n\/\/ Intersection program for programmable convex hull primitive\n\/\/\nrtBuffer planes;\nRT_PROGRAM void chull_intersect(int primIdx)\n{\n int n = planes.size();\n float t0 = -FLT_MAX;\n float t1 = FLT_MAX;\n float3 t0_normal = make_float3(0);\n float3 t1_normal = make_float3(0);\n for(int i = 0; i < n && t0 < t1; ++i ) {\n float4 plane = planes[i];\n float3 n = make_float3(plane);\n float d = plane.w;\n\n float denom = dot(n, ray.direction);\n float t = -(d + dot(n, ray.origin))\/denom;\n if( denom < 0){\n \/\/ enter\n if(t > t0){\n t0 = t;\n t0_normal = n;\n }\n } else {\n \/\/exit\n if(t < t1){\n t1 = t;\n t1_normal = n;\n }\n }\n }\n\n if(t0 > t1)\n return;\n\n if(rtPotentialIntersection( t0 )){\n shading_normal = geometric_normal = t0_normal;\n rtReportIntersection(0);\n } else if(rtPotentialIntersection( t1 )){\n shading_normal = geometric_normal = t1_normal;\n rtReportIntersection(0);\n }\n}\n\n\n\/\/\n\/\/ (NEW)\n\/\/ Dielectric surface shader\n\/\/\nrtDeclareVariable(float3, cutoff_color, , );\nrtDeclareVariable(float, fresnel_exponent, , );\nrtDeclareVariable(float, fresnel_minimum, , );\nrtDeclareVariable(float, fresnel_maximum, , );\nrtDeclareVariable(float, refraction_index, , );\nrtDeclareVariable(int, refraction_maxdepth, , );\nrtDeclareVariable(int, reflection_maxdepth, , );\nrtDeclareVariable(float3, refraction_color, , );\nrtDeclareVariable(float3, reflection_color, , );\nrtDeclareVariable(float3, extinction_constant, , );\nRT_PROGRAM void glass_closest_hit_radiance()\n{\n \/\/ intersection vectors\n const float3 h = ray.origin + t_hit * ray.direction; \/\/ hitpoint\n const float3 n = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); \/\/ normal\n const float3 i = ray.direction; \/\/ incident direction\n\n float reflection = 1.0f;\n float3 result = make_float3(0.0f);\n\n float3 beer_attenuation;\n if(dot(n, ray.direction) > 0){\n \/\/ Beer's law attenuation\n beer_attenuation = exp(extinction_constant * t_hit);\n } else {\n beer_attenuation = make_float3(1);\n }\n\n \/\/ refraction\n if (prd_radiance.depth < min(refraction_maxdepth, max_depth))\n {\n float3 t; \/\/ transmission direction\n if ( refract(t, i, n, refraction_index) )\n {\n\n \/\/ check for external or internal reflection\n float cos_theta = dot(i, n);\n if (cos_theta < 0.0f)\n cos_theta = -cos_theta;\n else\n cos_theta = dot(t, n);\n\n reflection = fresnel_schlick(cos_theta, fresnel_exponent, fresnel_minimum, fresnel_maximum);\n\n float importance = prd_radiance.importance * (1.0f-reflection) * optix::luminance( refraction_color * beer_attenuation );\n if ( importance > importance_cutoff ) {\n optix::Ray ray( h, t, radiance_ray_type, scene_epsilon );\n PerRayData_radiance refr_prd;\n refr_prd.depth = prd_radiance.depth+1;\n refr_prd.importance = importance;\n\n rtTrace( top_object, ray, refr_prd );\n result += (1.0f - reflection) * refraction_color * refr_prd.result;\n } else {\n result += (1.0f - reflection) * refraction_color * cutoff_color;\n }\n }\n \/\/ else TIR\n }\n\n \/\/ reflection\n if (prd_radiance.depth < min(reflection_maxdepth, max_depth))\n {\n float3 r = reflect(i, n);\n\n float importance = prd_radiance.importance * reflection * optix::luminance( reflection_color * beer_attenuation );\n if ( importance > importance_cutoff ) {\n optix::Ray ray( h, r, radiance_ray_type, scene_epsilon );\n PerRayData_radiance refl_prd;\n refl_prd.depth = prd_radiance.depth+1;\n refl_prd.importance = importance;\n\n rtTrace( top_object, ray, refl_prd );\n result += reflection * reflection_color * refl_prd.result;\n } else {\n result += reflection * reflection_color * cutoff_color;\n }\n }\n\n result = result * beer_attenuation;\n\n prd_radiance.result = result;\n}\n\n\n\/\/\n\/\/ Set pixel to solid color upon failure\n\/\/\nRT_PROGRAM void exception()\n{\n output_buffer[launch_index] = make_color( bad_color );\n}\n","avg_line_length":32.8706896552,"max_line_length":127,"alphanum_fraction":0.6774193548} {"size":8895,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"\/*\n * Copyright (c) 2019, NVIDIA CORPORATION.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nstruct TrajectorySubsetTest : public GdfTest \n{\n};\n\ntemplate \nusing wrapper = cudf::test::column_wrapper;\n\nconstexpr gdf_size_type column_size{1000};\n\nvoid test_subset(std::vector ids_to_keep)\n{\n std::vector sequence(column_size);\n std::iota(sequence.begin(), sequence.end(), 0);\n\n \/\/three sorted trajectories: one with 2\/3 of the points, two with 1\/6\n std::vector id_vector(column_size);\n std::transform(sequence.cbegin(), sequence.cend(), id_vector.begin(),\n [](int32_t i) { return (i < 2 * i \/ 3) ? 0 : \n (i < 5 * i \/ 6) ? 1 : 2; });\n \n \/\/ timestamp milliseconds\n std::vector ms_vector(sequence.begin(), sequence.end()); \n \n \/\/randomize sequence\n std::seed_seq seed{0};\n std::mt19937 g(seed);\n \n std::shuffle(sequence.begin(), sequence.end(), g);\n\n wrapper in_x(column_size,\n [&](gdf_index_type i) { return static_cast(sequence[i]); });\n wrapper in_y(column_size,\n [&](gdf_index_type i) { return static_cast(sequence[i]); });\n wrapper in_id(column_size,\n [&](gdf_index_type i) { return id_vector[sequence[i]]; });\n wrapper in_ts(column_size,\n [&](gdf_index_type i) { \n return static_cast(ms_vector[sequence[i]]); \n });\n wrapper ids{ids_to_keep};\n\n gdf_column out_x{}, out_y{}, out_id{}, out_ts{};\n\n \/\/ sort the ids to keep now that we've copied them unsorted to input column\n std::sort(ids_to_keep.begin(), ids_to_keep.end());\n\n std::vector expected_sequence(sequence.size());\n auto expected_size =\n std::copy_if(sequence.begin(), sequence.end(), expected_sequence.begin(),\n [&](int32_t i) {\n return std::binary_search(ids_to_keep.begin(), ids_to_keep.end(),\n id_vector[i]);\n }\n ) - expected_sequence.begin();\n\n wrapper expected_x(expected_size,\n [&](gdf_index_type i) { return static_cast(expected_sequence[i]); });\n wrapper expected_y(expected_size,\n [&](gdf_index_type i) { return static_cast(expected_sequence[i]); });\n wrapper expected_id(expected_size,\n [&](gdf_index_type i) { return id_vector[expected_sequence[i]]; });\n wrapper expected_ts(expected_size,\n [&](gdf_index_type i) {\n return static_cast(ms_vector[expected_sequence[i]]);\n });\n\n gdf_size_type num_hit{0};\n \n EXPECT_NO_THROW(\n num_hit = cuspatial::subset_trajectory_id(ids, in_x, in_y, in_id, in_ts,\n out_x, out_y, out_id, out_ts));\n\n EXPECT_EQ(num_hit, expected_size);\n EXPECT_TRUE(expected_x == out_x);\n EXPECT_TRUE(expected_y == out_y);\n EXPECT_TRUE(expected_id == out_id);\n EXPECT_TRUE(expected_ts == out_ts);\n}\n\nTEST_F(TrajectorySubsetTest, SelectSome)\n{\n std::vector keep_all{0, 1, 2};\n test_subset(keep_all);\n std::vector keep_all_unsorted{2, 0, 1};\n test_subset(keep_all_unsorted);\n std::vector keep_two{1, 2};\n test_subset(keep_two);\n std::vector keep_one{1};\n test_subset(keep_one);\n std::vector keep_none{};\n test_subset(keep_none);\n}\n\nTEST_F(TrajectorySubsetTest, BadData)\n{\n \/\/constexpr gdf_size_type column_size{1000};\n\n gdf_column out_x, out_y, out_id, out_timestamp;\n\n gdf_column bad_x, bad_y, bad_in_id, bad_timestamp, bad_id;\n gdf_column_view(&bad_id, 0, 0, 0, GDF_INT32);\n gdf_column_view(&bad_x, 0, 0, 0, GDF_FLOAT64);\n gdf_column_view(&bad_y, 0, 0, 0, GDF_FLOAT64);\n gdf_column_view(&bad_in_id, 0, 0, 0, GDF_INT32);\n gdf_column_view(&bad_timestamp, 0, 0, 0, GDF_TIMESTAMP);\n\n \/\/ null pointers\n CUDF_EXPECT_THROW_MESSAGE(cuspatial::subset_trajectory_id(bad_id,\n bad_x, bad_y,\n bad_in_id,\n bad_timestamp,\n out_x, out_y, \n out_id,\n out_timestamp),\n \"Null input data\");\n \n \/\/ size mismatch\n bad_x.data = bad_y.data = bad_in_id.data = bad_timestamp.data = \n reinterpret_cast(0x0badf00d);\n bad_x.size = 10;\n bad_y.size = 12; \/\/ mismatch\n bad_in_id.size = 10;\n bad_timestamp.size = 10;\n \n CUDF_EXPECT_THROW_MESSAGE(cuspatial::subset_trajectory_id(bad_id,\n bad_x, bad_y,\n bad_in_id,\n bad_timestamp,\n out_x, out_y, \n out_id,\n out_timestamp),\n \"Data size mismatch\");\n\n \/\/ Invalid ID datatype\n bad_y.size = 10;\n bad_in_id.dtype = GDF_FLOAT32;\n\n CUDF_EXPECT_THROW_MESSAGE(cuspatial::subset_trajectory_id(bad_id,\n bad_x, bad_y,\n bad_in_id,\n bad_timestamp,\n out_x, out_y, \n out_id,\n out_timestamp),\n \"Invalid trajectory ID datatype\");\n\n bad_in_id.dtype = GDF_INT32;\n bad_id.dtype = GDF_INT8;\n\n CUDF_EXPECT_THROW_MESSAGE(cuspatial::subset_trajectory_id(bad_id,\n bad_x, bad_y,\n bad_in_id,\n bad_timestamp,\n out_x, out_y, \n out_id,\n out_timestamp),\n \"Trajectory ID datatype mismatch\");\n\n bad_id.dtype = GDF_INT32;\n bad_timestamp.dtype = GDF_DATE32;\n\n CUDF_EXPECT_THROW_MESSAGE(cuspatial::subset_trajectory_id(bad_id,\n bad_x, bad_y,\n bad_in_id,\n bad_timestamp,\n out_x, out_y, \n out_id,\n out_timestamp),\n \"Invalid timestamp datatype\");\n\n bad_timestamp.dtype = GDF_TIMESTAMP;\n bad_x.null_count = 5;\n CUDF_EXPECT_THROW_MESSAGE(cuspatial::subset_trajectory_id(bad_id,\n bad_x, bad_y,\n bad_in_id,\n bad_timestamp,\n out_x, out_y, \n out_id,\n out_timestamp),\n \"NULL support unimplemented\");\n}\n","avg_line_length":41.1805555556,"max_line_length":85,"alphanum_fraction":0.5088251827} {"size":1367,"ext":"cu","lang":"Cuda","max_stars_count":115.0,"content":"\/\/ RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device \\\n\/\/ RUN: -emit-llvm %s -o - | FileCheck -check-prefix=AMDGCN %s\n\/\/ RUN: %clang_cc1 -triple nvptx64-nvidia-cuda- -fcuda-is-device \\\n\/\/ RUN: -emit-llvm %s -o - | FileCheck -check-prefix=NVPTX %s\n#include \"Inputs\/cuda.h\"\n\nstruct A {\n int a[32];\n};\n\n\/\/ AMDGCN: define amdgpu_kernel void @_Z6kernel1A(%struct.A %x.coerce)\n\/\/ NVPTX: define void @_Z6kernel1A(%struct.A* byval align 4 %x)\n__global__ void kernel(A x) {\n}\n\nclass Kernel {\npublic:\n \/\/ AMDGCN: define amdgpu_kernel void @_ZN6Kernel12memberKernelE1A(%struct.A %x.coerce)\n \/\/ NVPTX: define void @_ZN6Kernel12memberKernelE1A(%struct.A* byval align 4 %x)\n static __global__ void memberKernel(A x){}\n template static __global__ void templateMemberKernel(T x) {}\n};\n\n\ntemplate \n__global__ void templateKernel(T x) {}\n\nvoid launch(void*);\n\nvoid test() {\n Kernel K;\n \/\/ AMDGCN: define amdgpu_kernel void @_Z14templateKernelI1AEvT_(%struct.A %x.coerce)\n \/\/ NVPTX: define void @_Z14templateKernelI1AEvT_(%struct.A* byval align 4 %x)\n launch((void*)templateKernel);\n\n \/\/ AMDGCN: define amdgpu_kernel void @_ZN6Kernel20templateMemberKernelI1AEEvT_(%struct.A %x.coerce)\n \/\/ NVPTX: define void @_ZN6Kernel20templateMemberKernelI1AEEvT_(%struct.A* byval align 4 %x)\n launch((void*)Kernel::templateMemberKernel);\n}\n","avg_line_length":34.175,"max_line_length":101,"alphanum_fraction":0.7227505486} {"size":5678,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/* Copyright 2020 Stanford, Facebook\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n#include \"dlrm.h\"\n#include \"cuda_helper.h\"\n\nvoid DataLoader::load_sparse_input(const Task *task,\n const std::vector ®ions,\n Context ctx,\n Runtime* runtime)\n{\n assert(regions.size() == 2);\n assert(task->regions.size() == 2);\n int hash = *((int*) task->args);\n int num_sparse_inputs = hash \/ 1000;\n int my_input_idx = hash % 1000;\n SampleIdxs* meta = (SampleIdxs*) task->local_args;\n TensorAccessorR acc_full_input(\n regions[0], task->regions[0], FID_DATA, ctx, runtime);\n TensorAccessorW acc_batch_input(\n regions[1], task->regions[1], FID_DATA, ctx, runtime,\n false\/*readOutput*\/);\n int batch_size = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1;\n int in_dim = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1;\n assert(acc_full_input.rect.hi[0]-acc_full_input.rect.lo[0]+1 == num_sparse_inputs * in_dim);\n int64_t* input_zc;\n checkCUDA(cudaHostAlloc(&input_zc, sizeof(int64_t) * acc_batch_input.rect.volume(),\n cudaHostAllocPortable | cudaHostAllocMapped));\n assert(batch_size == meta->num_samples);\n for (int i = 0; i < batch_size; i++) {\n int full_offset = meta->idxs[i] * num_sparse_inputs * in_dim + my_input_idx * in_dim;\n int batch_offset = i * in_dim;\n assert(full_offset + in_dim <= (int)acc_full_input.rect.volume());\n for (int j = 0; j < in_dim; j++) {\n input_zc[batch_offset+j] = acc_full_input.ptr[full_offset+j];\n }\n }\n checkCUDA(cudaMemcpy(acc_batch_input.ptr, input_zc,\n sizeof(int64_t) * acc_batch_input.rect.volume(),\n cudaMemcpyHostToDevice));\n checkCUDA(cudaFreeHost(input_zc));\n checkCUDA(cudaDeviceSynchronize());\n \/\/print_tensor<2, int>(acc_batch_input.ptr, acc_batch_input.rect, \"[DataLoader:load_sparse]\");\n}\n\nvoid DataLoader::load_dense_input(const Task *task,\n const std::vector ®ions,\n Context ctx,\n Runtime* runtime)\n{\n assert(regions.size() == 2);\n assert(task->regions.size() == 2);\n SampleIdxs* meta = (SampleIdxs*) task->local_args;\n TensorAccessorR acc_full_input(\n regions[0], task->regions[0], FID_DATA, ctx, runtime);\n TensorAccessorW acc_batch_input(\n regions[1], task->regions[1], FID_DATA, ctx, runtime,\n false\/*readOutput*\/);\n int batch_size = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1;\n int num_feats = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1;\n assert(acc_batch_input.rect.hi[0] == acc_full_input.rect.hi[0]);\n assert(acc_batch_input.rect.lo[0] == acc_full_input.rect.lo[0]);\n float* input_zc;\n checkCUDA(cudaHostAlloc(&input_zc, sizeof(float) * acc_batch_input.rect.volume(),\n cudaHostAllocPortable | cudaHostAllocMapped));\n assert(batch_size == meta->num_samples);\n for (int i = 0; i < batch_size; i++) {\n int base_offset = meta->idxs[i] * num_feats;\n for (int j = 0; j < num_feats; j++)\n input_zc[i*num_feats+j] = acc_full_input.ptr[base_offset+j];\n }\n checkCUDA(cudaMemcpy(acc_batch_input.ptr, input_zc,\n sizeof(float) * acc_batch_input.rect.volume(),\n cudaMemcpyHostToDevice));\n checkCUDA(cudaFreeHost(input_zc));\n}\n\nvoid DataLoader::load_label(const Task *task,\n const std::vector& regions,\n Context ctx,\n Runtime* runtime)\n{\n assert(regions.size() == 2);\n assert(task->regions.size() == 2);\n SampleIdxs* meta = (SampleIdxs*) task->local_args;\n TensorAccessorR acc_full_label(\n regions[0], task->regions[0], FID_DATA, ctx, runtime);\n TensorAccessorW acc_batch_label(\n regions[1], task->regions[1], FID_DATA, ctx, runtime,\n false\/*readOutput*\/);\n int batch_size = acc_batch_label.rect.hi[1] - acc_batch_label.rect.lo[1] + 1;\n int num_label = acc_batch_label.rect.hi[0] - acc_batch_label.rect.lo[0] + 1;\n assert(num_label == 1); \/\/ Kaggle dataset a has single label\n assert(acc_batch_label.rect.hi[0] == acc_full_label.rect.hi[0]);\n assert(acc_batch_label.rect.lo[0] == acc_full_label.rect.lo[0]);\n float* label_zc;\n checkCUDA(cudaHostAlloc(&label_zc, sizeof(float) * acc_batch_label.rect.volume(),\n cudaHostAllocPortable | cudaHostAllocMapped));\n assert(batch_size == meta->num_samples);\n for (int i = 0; i < batch_size; i++) {\n int base_offset = meta->idxs[i] * num_label;\n for (int j = 0; j < num_label; j++)\n label_zc[i*num_label+j] = acc_full_label.ptr[base_offset+j];\n \/\/printf(\"meta->idxs[%d]=%d label=%.2lf\\n\", i, meta->idxs[i], label_zc[i]);\n }\n checkCUDA(cudaMemcpy(acc_batch_label.ptr, label_zc,\n sizeof(float) * acc_batch_label.rect.volume(),\n cudaMemcpyHostToDevice));\n checkCUDA(cudaFreeHost(label_zc));\n}\n","avg_line_length":46.162601626,"max_line_length":96,"alphanum_fraction":0.648820007} {"size":2158,"ext":"cu","lang":"Cuda","max_stars_count":70.0,"content":"\/*\n * queryb.cu\n *\/\n\/* cf. Jason Sanders, Edward Kandrot. CUDA by Example: An Introduction to General-Purpose GPU Programming *\/\n\/* 3.3 Querying Devices \n** pp. 32 *\/\n#include \n#include \"common\/errors.h\"\n\nint main(void) {\n cudaDeviceProp prop;\n\n int count;\n HANDLE_ERROR(\n\t cudaGetDeviceCount( &count )\n\t );\n\n for (int i = 0 ; i < count ; i++ ) {\n HANDLE_ERROR(\n\t\t cudaGetDeviceProperties( &prop, i )\n\t\t );\n printf(\" --- General Information for device %d ---\\n\", i);\n printf( \"Name: %s\\n\", prop.name );\n printf( \"Compute capability: %d.%d\\n\", prop.major, prop.minor );\n printf( \"Clock rate: %d\\n\", prop.clockRate);\n printf( \"Device copy overlap: \");\n if (prop.deviceOverlap)\n printf( \"Enabled\\n\" );\n else\n printf( \"Disabled\\n\" );\n printf( \"Kernel execution timeout : \");\n if (prop.kernelExecTimeoutEnabled)\n printf( \"Enabled\\n\" );\n else\n printf( \"Disabled\\n\" );\n\n printf(\" --- Memory Information for device %d ---\\n\", i );\n printf(\"Total global mem: %ld\\n\", prop.totalGlobalMem );\n printf(\"Total constant Mem: %ld\\n\", prop.totalConstMem );\n printf(\"Max mem pitch: %ld\\n\", prop.memPitch );\n printf(\"Texture Alignment: %ld\\n\", prop.textureAlignment);\n printf(\" --- MP Information for device %d ---\\n\", i);\n printf(\"Multiprocessor count: %d\\n\",\n\t prop.multiProcessorCount);\n printf(\"Shared mem per mp: %ld\\n\", prop.sharedMemPerBlock);\n printf(\"Registers per mp: %d\\n\", prop.regsPerBlock);\n printf(\"Threads in warp: %d\\n\", prop.warpSize);\n printf(\"Max threads per block: %d\\n\",\n\t prop.maxThreadsPerBlock );\n printf(\"Max thread dimensions: (%d, %d, %d) \\n\",\n\t prop.maxThreadsDim[0], prop.maxThreadsDim[1],\n\t prop.maxThreadsDim[2]);\n printf(\"Max grid dimensions: (%d, %d, %d) \\n\",\n\t prop.maxGridSize[0], prop.maxGridSize[1],\n\t prop.maxGridSize[2] );\n printf(\"\\n\");\n \n printf(\" --- Other Information for device %d ---\\n\", i);\n printf(\"Max. 3D textures dimensions: (%d, %d, %d) \\n\",\n\t prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2] );\n \n \n\n }\n}\n","avg_line_length":32.696969697,"max_line_length":108,"alphanum_fraction":0.5982391103} {"size":6087,"ext":"cu","lang":"Cuda","max_stars_count":2.0,"content":"\/\/ Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n#include \"paddle\/phi\/kernels\/roi_pool_grad_kernel.h\"\n\n#include \"paddle\/phi\/backends\/gpu\/gpu_context.h\"\n#include \"paddle\/phi\/backends\/gpu\/gpu_launch_config.h\"\n#include \"paddle\/phi\/common\/place.h\"\n#include \"paddle\/phi\/core\/kernel_registry.h\"\n#include \"paddle\/phi\/kernels\/funcs\/math_function.h\"\n\n#include \"paddle\/fluid\/memory\/memory.h\"\n#include \"paddle\/fluid\/platform\/device\/gpu\/gpu_primitives.h\"\n\nnamespace phi {\n\nstatic constexpr int kNumCUDAThreads = 512;\nstatic constexpr int kNumMaxinumNumBlocks = 4096;\n\nstatic inline int NumBlocks(const int N) {\n return std::min((N + kNumCUDAThreads - 1) \/ kNumCUDAThreads,\n kNumMaxinumNumBlocks);\n}\n\ntemplate \n__global__ void GPURoiPoolBackward(const int nthreads,\n const T* input_rois,\n const T* output_grad,\n const int64_t* arg_max_data,\n const int num_rois,\n const float spatial_scale,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n int* box_batch_id_data,\n T* input_grad) {\n int index = blockIdx.x * blockDim.x + threadIdx.x;\n int offset = blockDim.x * gridDim.x;\n for (int i = index; i < nthreads; i += offset) {\n int pw = i % pooled_width;\n int ph = (i \/ pooled_width) % pooled_height;\n int c = (i \/ pooled_width \/ pooled_height) % channels;\n int n = i \/ pooled_width \/ pooled_height \/ channels;\n\n int roi_batch_ind = box_batch_id_data[n];\n int input_offset = (roi_batch_ind * channels + c) * height * width;\n int output_offset = (n * channels + c) * pooled_height * pooled_width;\n const T* offset_output_grad = output_grad + output_offset;\n T* offset_input_grad = input_grad + input_offset;\n const int64_t* offset_arg_max_data = arg_max_data + output_offset;\n\n int arg_max = offset_arg_max_data[ph * pooled_width + pw];\n if (arg_max != -1) {\n paddle::platform::CudaAtomicAdd(\n offset_input_grad + arg_max,\n static_cast(offset_output_grad[ph * pooled_width + pw]));\n }\n }\n}\n\ntemplate \nvoid RoiPoolGradKernel(const Context& dev_ctx,\n const DenseTensor& x,\n const DenseTensor& boxes,\n paddle::optional boxes_num,\n const DenseTensor& arg_max,\n const DenseTensor& out_grad,\n int pooled_height,\n int pooled_width,\n float spatial_scale,\n DenseTensor* dx) {\n auto x_dims = x.dims();\n int channels = x_dims[1];\n int height = x_dims[2];\n int width = x_dims[3];\n int rois_num = boxes.dims()[0];\n\n if (dx) {\n DenseTensor box_batch_id_list;\n box_batch_id_list.Resize({rois_num});\n int* box_batch_id_data =\n dev_ctx.template HostAlloc(&box_batch_id_list);\n\n auto gplace = dev_ctx.GetPlace();\n if (boxes_num) {\n int boxes_batch_size = boxes_num->numel();\n std::vector boxes_num_list(boxes_batch_size);\n paddle::memory::Copy(phi::CPUPlace(),\n boxes_num_list.data(),\n gplace,\n boxes_num->data(),\n sizeof(int) * boxes_batch_size,\n 0);\n int start = 0;\n for (int n = 0; n < boxes_batch_size; ++n) {\n for (int i = start; i < start + boxes_num_list[n]; ++i) {\n box_batch_id_data[i] = n;\n }\n start += boxes_num_list[n];\n }\n } else {\n auto boxes_lod = boxes.lod().back();\n int boxes_batch_size = boxes_lod.size() - 1;\n for (int n = 0; n < boxes_batch_size; ++n) {\n for (size_t i = boxes_lod[n]; i < boxes_lod[n + 1]; ++i) {\n box_batch_id_data[i] = n;\n }\n }\n }\n int bytes = box_batch_id_list.numel() * sizeof(int);\n auto roi_ptr = paddle::memory::Alloc(dev_ctx, bytes);\n int* roi_id_data = reinterpret_cast(roi_ptr->ptr());\n paddle::memory::Copy(gplace,\n roi_id_data,\n phi::CPUPlace(),\n box_batch_id_data,\n bytes,\n dev_ctx.stream());\n\n dev_ctx.template Alloc(dx);\n phi::funcs::SetConstant set_zero;\n set_zero(dev_ctx, dx, static_cast(0));\n\n int output_grad_size = out_grad.numel();\n int blocks = NumBlocks(output_grad_size);\n int threads = kNumCUDAThreads;\n\n if (output_grad_size > 0) {\n GPURoiPoolBackward<<>>(\n output_grad_size,\n boxes.data(),\n out_grad.data(),\n arg_max.data(),\n rois_num,\n spatial_scale,\n channels,\n height,\n width,\n pooled_height,\n pooled_width,\n roi_id_data,\n dx->data());\n }\n }\n}\n\n} \/\/ namespace phi\n\nPD_REGISTER_KERNEL(\n roi_pool_grad, GPU, ALL_LAYOUT, phi::RoiPoolGradKernel, float, double) {\n kernel->InputAt(3).SetDataType(phi::DataType::INT64);\n}\n","avg_line_length":36.6686746988,"max_line_length":76,"alphanum_fraction":0.5787744373} {"size":18946,"ext":"cu","lang":"Cuda","max_stars_count":7.0,"content":"#include \"face\/cu_model_kernel.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n\/* Includes, cuda *\/\n#include \n#include \n\n#include \"util\/cudautil.h\"\n#include \"util\/cu_quaternion.h\"\n#include \"align\/cu_loss.h\"\n#include \"util\/transform.h\"\n\n#define BLOCKSIZE 128\n\n__global__\nvoid _calculateVertexPosition(float *position_d, const C_Params params, const C_PcaDeformModel deformModel) {\n int start_index = threadIdx.x + blockIdx.x * blockDim.x;\n int stride = blockDim.x * gridDim.x; \/\/ total number of threads in the grid\n\n const int colDim = deformModel.dim;\n\n \/\/ grid-striding loop\n for (int i = start_index; i < deformModel.dim; i += stride) {\n\n position_d[i] = 0;\n for (int j = 0; j < deformModel.shapeRank; j++) {\n position_d[i] += params.fa1Params_d[j] * deformModel.shapeDeformBasis_d[i + colDim * j];\n }\n\n for (int j = 0; j < deformModel.expressionRank; j++) {\n position_d[i] += params.fa2Params_d[j] * deformModel.expressionDeformBasis_d[i + colDim * j];\n }\n\n position_d[i] +=\n deformModel.meanShapeDeformation_d[i]\n + deformModel.meanExpressionDeformation_d[i]\n + deformModel.ref_d[i];\n }\n}\n\nvoid calculateVertexPosition(float *position_d, const C_Params params, const C_PcaDeformModel deformModel) {\n int idim = deformModel.dim;\n dim3 dimBlock(BLOCKSIZE);\n dim3 dimGrid((idim + BLOCKSIZE - 1) \/ BLOCKSIZE);\n\n _calculateVertexPosition << < dimGrid, dimBlock >> > (position_d, params, deformModel);\n CHECK_ERROR_MSG(\"Kernel Error\");\n}\n\n__global__\nvoid _homogeneousPositions(float *h_position_d, const float *position_d, int nPoints) {\n\n int start_index = threadIdx.x + blockIdx.x * blockDim.x;\n int stride = blockDim.x * gridDim.x; \/\/ total number of threads in the grid\n\n \/\/ grid-striding loop\n for (int index = start_index; index < nPoints; index += stride) {\n \/\/ homogeneous coordinates (x,y,z,1);\n float pos[4] = {position_d[3 * index], position_d[3 * index + 1], position_d[3 * index + 2], 1};\n memcpy(&h_position_d[4 * index], &pos[0], 4 * sizeof(float));\n }\n}\n\n__global__\nvoid _hnormalizedPositions(float *position_d, const float *h_position_d, int nPoints) {\n\n int start_index = threadIdx.x + blockIdx.x * blockDim.x;\n int stride = blockDim.x * gridDim.x; \/\/ total number of threads in the grid\n\n \/\/ grid-striding loop\n for (int index = start_index; index < nPoints; index += stride) {\n\n \/\/ homogeneous coordinates (x,y,z,1);\n float hnorm = h_position_d[4 * index + 3];\n position_d[3 * index] = h_position_d[4 * index] \/ hnorm;\n position_d[3 * index + 1] = h_position_d[4 * index + 1] \/ hnorm;\n position_d[3 * index + 2] = h_position_d[4 * index + 2] \/ hnorm;\n }\n}\n\nvoid cudaMatMul(float *matC, cublasHandle_t cnpHandle,\n const float *matA, int aRows, int aCols,\n const float *matB, int bRows, int bCols) {\n\n \/\/ Don't know what this is (scalar?) but examples use this\n cublasStatus_t status;\n const float alf = 1;\n const float bet = 0;\n const float *alpha = &alf;\n const float *beta = &bet;\n\n \/* Perform operation using cublas, inputs\/outputs are col-major.\n * vector and array were originally Eigen which defaults to Col-major\n * m is rows for A and C\n * n is cols for B and C\n * k is cols for A and rows for B*\/\n \/\/ Matrix Mult C = \u03b1 op ( A ) op ( B ) + \u03b2 C\n status =\n cublasSgemm(cnpHandle,\n CUBLAS_OP_N, CUBLAS_OP_N, \/\/ Matrix op(A) and op(B): No-op, Transpose, Conjugate\n aRows, bCols, aCols, \/\/(m,n,k)\n alpha,\n matA, aRows\/*leading dim, ROWS?*\/, \/\/(4x4) or (mxk)\n matB, bRows\/*leading dim*\/, \/\/(4xN) or (kxn)\n beta,\n matC, bRows\/*leading dim*\/); \/\/(4xN) or (mxk)\n\n if (status != CUBLAS_STATUS_SUCCESS) {\n printf(\"SGEMM a b : (%d,%d), (%d,%d)\\n\", aRows, aCols, bRows, bCols);\n printf(\"SGEMM status: %d\\n\", status);\n throw std::runtime_error(\"MatMul Failed\\n\");\n }\n}\n\nvoid applyRigidAlignment(float *align_pos_d, cublasHandle_t cnpHandle,\n const float *position_d, const float *transMat, int N) {\n int size_homo = 4 * N;\n dim3 grid = ((N + BLOCKSIZE - 1) \/ BLOCKSIZE);\n dim3 block = BLOCKSIZE;\n\n float *matB, *matC;\n\n cudaMalloc((void **) &matB, size_homo * sizeof(float));\n cudaMalloc((void **) &matC, size_homo * sizeof(float));\n\n\n \/\/ Create homogenous matrix (x,y,z,1)\n _homogeneousPositions << < grid, block >> > (matB, position_d, N);\n CHECK_ERROR_MSG(\"Kernel Error\");\n\n \/* Perform operation using cublas, inputs\/outputs are col-major.\n * vector and array were originally Eigen which defaults to Col-major\n * m is rows for A and C\n * n is cols for B and C\n * k is cols for A and rows for B*\/\n \/\/ Matrix Mult C = \u03b1 op ( A ) op ( B ) + \u03b2 C\n cudaMatMul(matC, cnpHandle, transMat, 4, 4, matB, 4, N);\n\n \/\/ hnormalized point (x,y,z)\n _hnormalizedPositions << < grid, block >> > (align_pos_d, matC, N);\n CHECK_ERROR_MSG(\"Kernel Error\");\n\n cudaFree(matB);\n cudaFree(matC);\n}\n\n__global__\nstatic void calculateLandmarkIndices(int *mesh_inds, int *scan_inds, C_PcaDeformModel model, C_ScanPointCloud scan) {\n const int start = blockDim.x * blockIdx.x + threadIdx.x;\n const int size = scan.numLmks;\n const int step = blockDim.x * gridDim.x;\n\n for(int ind=start; ind(std::round(xyz[0] * fx \/ xyz[2] + cx));\n uv[1] = static_cast(std::round(xyz[1] * fy \/ xyz[2] + cy));\n}\n\n__global__\nvoid _find_mesh_to_scan_corr(int *meshCorr_d, int *scanCorr_d, float *distance_d, int *numCorr,\n const float *position_d, int num_points, C_ScanPointCloud scan, float radius, int maxPoints) {\n const int start = blockIdx.x * blockDim.x + threadIdx.x;\n const int size = num_points;\n const int step = blockDim.x * gridDim.x;\n \/\/ Initialize numCorr to 0, will use atomicAdd to increment counter\n if(threadIdx.x == 0) numCorr[0] = 0;\n __syncthreads();\n\n for(int i=start; i=0 && scanIndy >=0 && scanIndz >=0) {\n \/\/ Check for NaN Points\n bool isNaN = std::isfinite(scan.scanPoints_d[scanIndx]) == 0\n || std::isfinite(scan.scanPoints_d[scanIndy]) == 0\n || std::isfinite(scan.scanPoints_d[scanIndz]) == 0;\n\n \/\/ Add correspondance if within search radius, if radius is 0, include all points\n if (!isNaN) {\n \/\/ Check z distance for within radius tolerance (Use xyz EuclidDist instead?)\n float dist = std::fabs(position_d[i * 3 + 2] - scan.scanPoints_d[scanIndx + 2]);\n\/\/ printf(\"Correspondance %.4f\\n\", dist);\n if (radius <= 0 || dist <= radius) {\n int idx = atomicAdd(&numCorr[0], 1);\n if (maxPoints <= 0 || idx < maxPoints) {\n\/\/ printf(\"Correspondance s:%d -> m:%d, d:%.4f\\n\", scanIndx \/ 3, i, dist);\n meshCorr_d[idx] = i;\n scanCorr_d[idx] = scanIndx \/ 3;\n distance_d[idx] = dist;\n }\n }\n }\n }\n }\n}\n\nvoid reduce_closest_corr(int *meshCorr_d, int *scanCorr_d, float *distance_d, int *numCorr_d, int maxPoints) {\n int numCorr;\n CUDA_CHECK(cudaMemcpy(&numCorr, numCorr_d, sizeof(int), cudaMemcpyDeviceToHost));\n\n if (numCorr > maxPoints){\n numCorr = maxPoints;\n }\n\n int *meshCorr_h = new int[numCorr];\n int *scanCorr_h = new int[numCorr];\n float *distance_h = new float[numCorr];\n\n CUDA_CHECK(cudaMemcpy(meshCorr_h, meshCorr_d, numCorr * sizeof(int), cudaMemcpyDeviceToHost));\n CUDA_CHECK(cudaMemcpy(scanCorr_h, scanCorr_d, numCorr * sizeof(int), cudaMemcpyDeviceToHost));\n CUDA_CHECK(cudaMemcpy(distance_h, distance_d, numCorr * sizeof(float), cudaMemcpyDeviceToHost));\n\n std::map scantomesh;\n std::map scandist;\n\n for (int idx = 0; idx < numCorr; idx++){\n auto meshIdx = meshCorr_h[idx];\n auto scanIdx = scanCorr_h[idx];\n auto dist = distance_h[idx];\n\n if(scantomesh.find(scanIdx) != scantomesh.end()) {\n if (dist < scandist[scanIdx]){\n scantomesh[scanIdx] = meshIdx;\n scandist[scanIdx] = dist;\n }\n } else {\n scantomesh.insert(std::make_pair(scanIdx, meshIdx));\n scandist.insert(std::make_pair(scanIdx, dist));\n }\n }\n\n std::vector finScan, finMesh;\n std::vector findist;\n for(std::map::iterator it = scantomesh.begin(); it != scantomesh.end(); ++it) {\n finScan.push_back(it->first);\n finMesh.push_back(it->second);\n findist.push_back(scandist[it->first]);\n }\n auto size = finMesh.size();\n CUDA_CHECK(cudaMemcpy(meshCorr_d, finMesh.data(), size* sizeof(int), cudaMemcpyHostToDevice));\n CUDA_CHECK(cudaMemcpy(scanCorr_d, finScan.data(), size* sizeof(int), cudaMemcpyHostToDevice));\n CUDA_CHECK(cudaMemcpy(distance_d, findist.data(), size* sizeof(float), cudaMemcpyHostToDevice));\n CUDA_CHECK(cudaMemcpy(numCorr_d, &size, sizeof(int), cudaMemcpyHostToDevice));\n}\n\nvoid find_mesh_to_scan_corr(int *meshCorr_d, int *scanCorr_d, float *distance_d, int *numCorr,\n const float *position_d, int num_points, C_ScanPointCloud scan, float radius, int maxPoints) {\n int idim = num_points\/3;\n dim3 dimBlock(BLOCKSIZE);\n dim3 dimGrid((idim + BLOCKSIZE - 1) \/ BLOCKSIZE);\n CUDA_ZERO(&numCorr, static_cast(1));\n\n _find_mesh_to_scan_corr << < dimGrid, dimBlock >> > (meshCorr_d, scanCorr_d, distance_d, numCorr,\n position_d, num_points, scan, radius, maxPoints);\n reduce_closest_corr(meshCorr_d, scanCorr_d, distance_d, numCorr, maxPoints);\n CHECK_ERROR_MSG(\"Kernel Error\");\n}\n\nvoid calculateAlignedPositions(float *result_pos_d, float *align_pos_d, float *position_d,\n const C_Params params, const C_PcaDeformModel deformModel, const C_ScanPointCloud scanPointCloud,\n cublasHandle_t cnpHandle){\n \/\/ Calculate position_d\n calculateVertexPosition(position_d, params, deformModel);\n\n \/\/ Rigid alignment\n applyRigidAlignment(align_pos_d, cnpHandle, position_d, scanPointCloud.rigidTransform_d, deformModel.dim \/ 3);\n float r[9];\n float trans[16];\n float *trans_d;\n CUDA_CHECK(cudaMalloc((void **) &trans_d, 16*sizeof(float)));\n\n calc_r_from_u(r, params.fuParams_h);\n create_trans_from_tu(trans, params.ftParams_h, r);\n CUDA_CHECK(cudaMemcpy(trans_d, trans, 16* sizeof(float), cudaMemcpyHostToDevice));\n applyRigidAlignment(result_pos_d, cnpHandle, align_pos_d, trans_d, deformModel.dim \/ 3);\n}\n\nvoid calculatePointPairLoss(float *residual, float *fa1Jacobian, float *fa2Jacobian, float *ftJacobian, float *fuJacobian,\n PointPair point_pair, C_Params params, C_PcaDeformModel deformModel,\n C_Residuals c_residuals, C_Jacobians c_jacobians,\n const float weight, const bool isJacobianRequired) {\n\n if (point_pair.point_count > 0) {\n calc_residual_point_pair(c_residuals.residual_d, point_pair, weight);\n\n }\n\n \/*\n * Copy computed residual to Host\n *\/\n CUDA_CHECK(cudaMemcpy(residual, c_residuals.residual_d, c_residuals.numResuduals*sizeof(float), cudaMemcpyDeviceToHost));\n\n if (isJacobianRequired) {\n \/\/ Compute Jacobians for each parameter\n if (point_pair.point_count > 0) {\n calc_derivatives_point_pair(c_jacobians.ftJacobian_d, c_jacobians.fuJacobian_d,\n c_jacobians.fa1Jacobian_d, c_jacobians.fa2Jacobian_d,\n params.fuParams_d, deformModel, point_pair, weight);\n }\n\n \/*\n * Copy computed jacobian to Host\n *\/\n CUDA_CHECK(cudaMemcpy(fa1Jacobian, c_jacobians.fa1Jacobian_d, c_jacobians.numa1j * sizeof(float), cudaMemcpyDeviceToHost));\n CUDA_CHECK(cudaMemcpy(fa2Jacobian, c_jacobians.fa2Jacobian_d, c_jacobians.numa2j * sizeof(float), cudaMemcpyDeviceToHost));\n CUDA_CHECK(cudaMemcpy(ftJacobian, c_jacobians.ftJacobian_d, c_jacobians.numtj * sizeof(float), cudaMemcpyDeviceToHost));\n CUDA_CHECK(cudaMemcpy(fuJacobian, c_jacobians.fuJacobian_d, c_jacobians.numuj * sizeof(float), cudaMemcpyDeviceToHost));\n }\n\n}\n\nvoid calculateLandmarkLoss(float *residual, float *fa1Jacobian, float *fa2Jacobian, float *ftJacobian, float *fuJacobian,\n float *position_d, cublasHandle_t cnpHandle, C_Params params, C_PcaDeformModel deformModel,\n C_ScanPointCloud scanPointCloud, C_Residuals c_residuals, C_Jacobians c_jacobians,\n const float weight, const bool isJacobianRequired) {\n\n float *align_pos_d, *result_pos_d;\n\n\n \/\/ Allocate memory for Rigid aligned positions\n CUDA_CHECK(cudaMalloc((void **) &align_pos_d, deformModel.dim * sizeof(float)));\n CUDA_CHECK(cudaMalloc((void **) &result_pos_d, deformModel.dim * sizeof(float)));\n \/\/ CuUDA Kernels run synchronously by default, to run asynchronously must explicitly specify streams\n\n \/\/ Calculate aligned positions\n calculateAlignedPositions(result_pos_d, align_pos_d, position_d, params, deformModel, scanPointCloud, cnpHandle);\n\n \/*\n * Compute Point Pairs (Correspondances)\n *\/\n PointPair point_pair{\n .mesh_position_d=result_pos_d,\n .mesh_positoin_before_transform_d=align_pos_d,\n .ref_position_d=scanPointCloud.scanLandmark_d,\n .mesh_corr_inds_d=nullptr,\n .ref_corr_inds_d=nullptr,\n .point_count=scanPointCloud.numLmks\n };\n CUDA_MALLOC(&point_pair.mesh_corr_inds_d, static_cast(scanPointCloud.numLmks));\n CUDA_MALLOC(&point_pair.ref_corr_inds_d, static_cast(scanPointCloud.numLmks));\n\n calculateLandmarkIndices<<<1,scanPointCloud.numLmks>>>\n (point_pair.mesh_corr_inds_d, point_pair.ref_corr_inds_d, deformModel, scanPointCloud);\n\n \/\/ Calculate residual & jacobian for Landmarks\n calculatePointPairLoss(residual, fa1Jacobian, fa2Jacobian, ftJacobian, fuJacobian, point_pair,\n params, deformModel, c_residuals, c_jacobians, weight, isJacobianRequired);\n\n CUDA_CHECK(cudaFree(align_pos_d));\n CUDA_CHECK(cudaFree(result_pos_d));\n CUDA_FREE(point_pair.mesh_corr_inds_d);\n CUDA_FREE(point_pair.ref_corr_inds_d);\n}\n\nvoid calculateGeometricLoss(float *residual, float *fa1Jacobian, float *fa2Jacobian, float *ftJacobian, float *fuJacobian,\n float *position_d, cublasHandle_t cnpHandle, const C_Params params,\n const C_PcaDeformModel deformModel, const C_ScanPointCloud scanPointCloud,\n C_Residuals c_residuals, C_Jacobians c_jacobians, const float searchRadius, const float weight,\n const bool isJacobianRequired) {\n float *align_pos_d, *result_pos_d;\n const int num_residuals = c_residuals.numResuduals;\n\n \/\/ Allocate memory for Rigid aligned positions\n CUDA_CHECK(cudaMalloc((void **) &align_pos_d, deformModel.dim * sizeof(float)));\n CUDA_CHECK(cudaMalloc((void **) &result_pos_d, deformModel.dim * sizeof(float)));\n \/\/ CuUDA Kernels run synchronously by default, to run asynchronously must explicitly specify streams\n\n \/*\n * Compute Loss\n *\/\n \/\/ Calculate aligned positions\n calculateAlignedPositions(result_pos_d, align_pos_d, position_d, params, deformModel, scanPointCloud, cnpHandle);\n\n \/*\n * Compute Point Pairs (Correspondances)\n *\/\n PointPair point_pair{\n .mesh_position_d=result_pos_d,\n .mesh_positoin_before_transform_d=align_pos_d,\n .ref_position_d=scanPointCloud.scanPoints_d,\n .mesh_corr_inds_d=nullptr,\n .ref_corr_inds_d=nullptr,\n .point_count=0\n };\n\n float* distance_d;\n int* numCorr_d; \/\/ TODO: Move to find_mesh_to_scan_corr, reference point_pair.point_count instead\n float radius = searchRadius;\n\n CUDA_MALLOC(&point_pair.mesh_corr_inds_d, static_cast(num_residuals));\n CUDA_MALLOC(&point_pair.ref_corr_inds_d, static_cast(num_residuals));\n CUDA_MALLOC(&distance_d, static_cast(num_residuals));\n CUDA_MALLOC(&numCorr_d, static_cast(1));\n\n find_mesh_to_scan_corr(point_pair.mesh_corr_inds_d, point_pair.ref_corr_inds_d, distance_d, numCorr_d,\n result_pos_d, deformModel.dim, scanPointCloud, radius, num_residuals);\n\n CUDA_CHECK(cudaMemcpy(&point_pair.point_count, numCorr_d, sizeof(int), cudaMemcpyDeviceToHost));\n \/\/ TODO: Move to find_mesh_to_scan_corr and int* numCorr_d\n if (point_pair.point_count > num_residuals\/3){\n point_pair.point_count = num_residuals\/3;\n }\n\n \/*******************\n * Calculate residual & jacobian for PointPairs\n *******************\/\n calculatePointPairLoss(residual, fa1Jacobian, fa2Jacobian, ftJacobian, fuJacobian, point_pair,\n params, deformModel, c_residuals, c_jacobians, weight, isJacobianRequired);\n\n CUDA_CHECK(cudaFree(align_pos_d));\n CUDA_CHECK(cudaFree(result_pos_d));\n CUDA_CHECK(cudaFree(distance_d));\n CUDA_CHECK(cudaFree(numCorr_d));\n CUDA_FREE(point_pair.mesh_corr_inds_d);\n CUDA_FREE(point_pair.ref_corr_inds_d);\n}\n","avg_line_length":42.1959910913,"max_line_length":131,"alphanum_fraction":0.6505858756} {"size":3758,"ext":"cu","lang":"Cuda","max_stars_count":7.0,"content":"#include \"stencil\/copy.cuh\"\n#include \"stencil\/pack_kernel.cuh\"\n\n#if 0\n\/* replaced by dev_packer_pack_domain and dev_packer_unpack_domain, which compute the offsets into dst on the fly\n*\/\n\n__global__ void multi_pack(void *__restrict__ dst, \/\/ dst buffer\n const size_t *__restrict__ offsets, \/\/ offsets into dst\n void *__restrict__ *__restrict__ const srcs, \/\/ n src pointers\n const Dim3 srcSize, const Dim3 srcPos, const Dim3 srcExtent,\n const size_t *__restrict__ elemSizes, \/\/ n elem sizes\n const size_t n) {\n for (size_t i = 0; i < n; ++i) {\n void *dstp = &(static_cast(dst)[offsets[i]]);\n grid_pack(dstp, srcs[i], srcSize, srcPos, srcExtent, elemSizes[i]);\n }\n}\n\n__global__ void multi_unpack(void **__restrict__ dsts, const Dim3 dstSize, const Dim3 dstPos, const Dim3 dstExtent,\n const void *__restrict__ const src, const size_t *__restrict__ offsets,\n const size_t *__restrict__ elemSizes, const size_t n) {\n for (size_t i = 0; i < n; ++i) {\n const void *srcp = &(static_cast(src)[offsets[i]]);\n grid_unpack(dsts[i], dstSize, dstPos, dstExtent, srcp, elemSizes[i]);\n }\n}\n#endif\n\n\/*! \\brief grid-collaborative 3d translation\n\n non-overlapping src and dst\n *\/\ninline __device__ void translate_grid(cudaPitchedPtr dst, const Dim3 dstPos, const cudaPitchedPtr src,\n const Dim3 srcPos,\n const Dim3 extent, \/\/ the extent of the region to be copied\n const size_t elemSize) {\n\n char *__restrict__ cDst = reinterpret_cast(dst.ptr);\n const char *__restrict__ cSrc = reinterpret_cast(src.ptr);\n\n const size_t tz = blockDim.z * blockIdx.z + threadIdx.z;\n const size_t ty = blockDim.y * blockIdx.y + threadIdx.y;\n const size_t tx = blockDim.x * blockIdx.x + threadIdx.x;\n\n const Dim3 dstStop = dstPos + extent;\n\n for (size_t z = tz; z < extent.z; z += blockDim.z * gridDim.z) {\n for (size_t y = ty; y < extent.y; y += blockDim.y * gridDim.y) {\n for (size_t x = tx; x < extent.x; x += blockDim.x * gridDim.x) {\n \/\/ input coordinates\n unsigned int zi = z + srcPos.z;\n unsigned int yi = y + srcPos.y;\n unsigned int xi = x + srcPos.x;\n \/\/ output coordinates\n unsigned int zo = z + dstPos.z;\n unsigned int yo = y + dstPos.y;\n unsigned int xo = x + dstPos.x;\n \/\/ linearized byte offset\n size_t lo = zo * dst.ysize * dst.pitch + yo * dst.pitch + xo * elemSize;\n size_t li = zi * src.ysize * src.pitch + yi * src.pitch + xi * elemSize;\n \/\/ printf(\"%lu %lu %lu [%lu] -> %lu %lu %lu [%lu]\\n\", xi, yi, zi, ii,\n \/\/ xo,\n \/\/ yo, zo, oi);\n \/\/ TODO: specialize to elemSize?\n memcpy(cDst + lo, cSrc + li, elemSize);\n }\n }\n }\n}\n\n__global__ void translate(cudaPitchedPtr dst, const Dim3 dstPos, cudaPitchedPtr src, const Dim3 srcPos,\n const Dim3 extent, \/\/ the extent of the region to be copied\n const size_t elemSize) {\n\n translate_grid(dst, dstPos, src, srcPos, extent, elemSize);\n}\n\n__global__ void multi_translate(cudaPitchedPtr *dsts, const Dim3 dstPos, const cudaPitchedPtr *srcs, const Dim3 srcPos,\n const Dim3 extent, \/\/ the extent of the region to be copied\n const size_t *__restrict__ elemSizes, const size_t n) {\n for (size_t i = 0; i < n; ++i) {\n translate_grid(dsts[i], dstPos, srcs[i], srcPos, extent, elemSizes[i]);\n }\n}\n","avg_line_length":43.6976744186,"max_line_length":119,"alphanum_fraction":0.5883448643} {"size":5818,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"\/*\n -- MAGMA (version 1.6.1) --\n Univ. of Tennessee, Knoxville\n Univ. of California, Berkeley\n Univ. of Colorado, Denver\n @date January 2015\n\n @precisions normal z -> s d c\n @author Stan Tomov\n*\/\n#include \"common_magma.h\"\n#include \n\n#define NB 64\n\n\/* =====================================================================\n Matrix is m x n, and is divided into block rows, each NB x n.\n Each CUDA block has NB threads to handle one block row.\n Each thread handles one row, iterating across all columns.\n*\/\n__global__ void\nzcompact_kernel(\n int m, int n,\n magmaDoubleComplex *dA, \n int ldda,\n double *dnorms, \n double tol,\n magma_int_t *active, \n magma_int_t *cBlock)\n{\n \/\/ dA is processed across row i (by the current thread)\n int i = blockIdx.x*blockDim.x + threadIdx.x;\n int cBlockSize = 0;\n if ( i < m ) {\n dA += i;\n \n for(int j = 0; j tol && active[j]){\n dA[ldda*cBlockSize] = dA[ldda*j];\n cBlockSize++;\n }\n else if (i==0)\n active[j] = 0;\n }\n }\n\n if (i==0)\n *cBlock = cBlockSize;\n}\n\n__global__ void\nzcompactactive_kernel(\n int m, \n int n,\n magmaDoubleComplex *dA, \n int ldda,\n magma_int_t *active)\n{\n \/\/ dA is processed across row i (by the current thread)\n int i = blockIdx.x*blockDim.x + threadIdx.x;\n int cBlockSize = 0;\n if ( i < m ) {\n dA += i;\n\n for(int j = 0; j tol.\n The active mask array has 1 or 0, showing if a vector remained or not\n in the compacted resulting set of vectors.\n \n Arguments\n ---------\n @param[in]\n m INTEGER\n The number of rows of the matrix dA. M >= 0.\n \n @param[in]\n n INTEGER\n The number of columns of the matrix dA. N >= 0.\n \n @param[in][in,out]\n dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)\n The m by n matrix dA.\n \n @param[in]\n ldda INTEGER\n The leading dimension of the array dA. LDDA >= max(1,M).\n \n @param[in]\n dnorms DOUBLE PRECISION array, dimension N\n The norms of the N vectors in dA\n\n @param[in]\n tol DOUBLE PRECISON\n The tolerance value used in the criteria to compact or not.\n\n @param[in][out]\n active INTEGER array, dimension N\n A mask of 1s and 0s showing if a vector remains or has been removed\n \n @param[in][out]\n cBlock magmaInt_ptr\n The number of vectors that remain in dA (i.e., with norms > tol).\n @param[in]\n queue magma_queue_t\n Queue to execute in.\n\n @ingroup magmasparse_zgegpuk\n ********************************************************************\/\n\nextern \"C\" void\nmagma_zcompact(\n magma_int_t m, \n magma_int_t n,\n magmaDoubleComplex_ptr dA, \n magma_int_t ldda,\n magmaDouble_ptr dnorms, \n double tol, \n magmaInt_ptr active,\n magmaInt_ptr cBlock,\n magma_queue_t queue )\n{\n magma_int_t info = 0;\n if ( m < 0 )\n info = -1;\n else if ( n < 0 )\n info = -2;\n else if ( ldda < max(1,m))\n info = -4;\n \n if ( info != 0 ) {\n magma_xerbla( __func__, -(info) );\n return;\n }\n \n if ( m == 0 || n == 0 )\n return;\n \n dim3 threads( NB );\n dim3 grid( (m + NB - 1)\/NB );\n \n zcompact_kernel<<< grid, threads, 0, queue >>>(\n m, n, dA, ldda, dnorms, tol, active, active+n );\n\n magma_igetvector( 1, active+n, 1, cBlock, 1 );\n}\n\n\n\/* ===================================================================== *\/\n\/**\n Purpose\n -------\n ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an\n array of 1s and 0sindicating which vectors to compact (for 1s) and\n which to disregard (for 0s).\n\n Arguments\n ---------\n @param[in]\n m INTEGER\n The number of rows of the matrix dA. M >= 0.\n\n @param[in]\n n INTEGER\n The number of columns of the matrix dA. N >= 0.\n\n @param[in][in,out]\n dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)\n The m by n matrix dA.\n\n @param[in]\n ldda INTEGER\n The leading dimension of the array dA. LDDA >= max(1,M).\n\n @param[in]\n active INTEGER array, dimension N\n A mask of 1s and 0s showing if a vector remains or has been removed\n @param[in]\n queue magma_queue_t\n Queue to execute in.\n\n @ingroup magmasparse_z\n ********************************************************************\/\n\nextern \"C\" void\nmagma_zcompactActive(\n magma_int_t m, \n magma_int_t n,\n magmaDoubleComplex_ptr dA, \n magma_int_t ldda,\n magmaInt_ptr active,\n magma_queue_t queue )\n{\n magma_int_t info = 0;\n if ( m < 0 )\n info = -1;\n else if ( n < 0 )\n info = -2;\n else if ( ldda < max(1,m))\n info = -4;\n\n if ( info != 0 ) {\n magma_xerbla( __func__, -(info) );\n return;\n }\n\n if ( m == 0 || n == 0 )\n return;\n\n dim3 threads( NB );\n dim3 grid( (m + NB - 1)\/NB );\n\n zcompactactive_kernel<<< grid, threads, 0, queue >>>(\n m, n, dA, ldda, active);\n}\n\n\/* ===================================================================== *\/\n","avg_line_length":25.1861471861,"max_line_length":83,"alphanum_fraction":0.4965623926} {"size":18664,"ext":"cu","lang":"Cuda","max_stars_count":null,"content":"#include \n#include \"Global.h\"\n#include \n#include \n#include \/\/ For max()\n#include \n#include \n\n\n\n\n\/\/ __global__\n__device__\nvoid getGridForPiece(int piece, int &width, int &height, int pGrid[5][5])\n{\n \/\/ Clear the array\n memset(pGrid, 0, sizeof(pGrid[0][0]) * 5 * 5);\n\n if (piece == 0)\n {\n width = 1;\n height = 3;\n pGrid[0][0] = 1;\n pGrid[1][0] = 1;\n pGrid[2][0] = 1;\n }\n else if (piece == 1)\n {\n width = 3;\n height = 1;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[0][2] = 1;\n }\n else if (piece == 2)\n {\n width = 3;\n height = 3;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[0][2] = 1;\n pGrid[1][2] = 1;\n pGrid[2][2] = 1;\n }\n else if (piece == 3)\n {\n width = 3;\n height = 3;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[0][2] = 1;\n pGrid[1][0] = 1;\n pGrid[2][0] = 1;\n }\n else if (piece == 4)\n {\n width = 3;\n height = 3;\n pGrid[0][0] = 1;\n pGrid[1][0] = 1;\n pGrid[2][0] = 1;\n pGrid[2][1] = 1;\n pGrid[2][2] = 1;\n }\n else if (piece == 5)\n {\n width = 3;\n height = 3;\n pGrid[0][2] = 1;\n pGrid[1][2] = 1;\n pGrid[2][0] = 1;\n pGrid[2][1] = 1;\n pGrid[2][2] = 1;\n }\n else if (piece == 6)\n {\n width = 1;\n height = 2;\n pGrid[0][0] = 1;\n pGrid[1][0] = 1;\n }\n else if (piece == 7)\n {\n width = 2;\n height = 1;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n }\n else if (piece == 8)\n {\n width = 2;\n height = 2;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[1][0] = 1;\n }\n else if (piece == 9)\n {\n width = 2;\n height = 2;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[1][1] = 1;\n }\n else if (piece == 10)\n {\n width = 2;\n height = 2;\n pGrid[0][1] = 1;\n pGrid[1][1] = 1;\n pGrid[1][0] = 1;\n }\n else if (piece == 11)\n {\n width = 2;\n height = 2;\n pGrid[0][0] = 1;\n pGrid[1][1] = 1;\n pGrid[1][0] = 1;\n }\n else if (piece == 12)\n {\n width = 1;\n height = 5;\n pGrid[0][0] = 1;\n pGrid[1][0] = 1;\n pGrid[2][0] = 1;\n pGrid[3][0] = 1;\n pGrid[4][0] = 1;\n }\n else if (piece == 13)\n {\n width = 5;\n height = 1;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[0][2] = 1;\n pGrid[0][3] = 1;\n pGrid[0][4] = 1;\n }\n else if (piece == 14)\n {\n width = 1;\n height = 4;\n pGrid[0][0] = 1;\n pGrid[1][0] = 1;\n pGrid[2][0] = 1;\n pGrid[3][0] = 1;\n }\n else if (piece == 15)\n {\n width = 4;\n height = 1;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[0][2] = 1;\n pGrid[0][3] = 1;\n }\n else if (piece == 16)\n {\n width = 1;\n height = 1;\n pGrid[0][0] = 1;\n }\n else if (piece == 17)\n {\n width = 2;\n height = 2;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[1][0] = 1;\n pGrid[1][1] = 1;\n }\n else if (piece == 18)\n {\n width = 3;\n height = 3;\n pGrid[0][0] = 1;\n pGrid[0][1] = 1;\n pGrid[0][2] = 1;\n pGrid[1][0] = 1;\n pGrid[1][1] = 1;\n pGrid[1][2] = 1;\n pGrid[2][0] = 1;\n pGrid[2][1] = 1;\n pGrid[2][2] = 1;\n }\n}\n\n__device__\nint processGrid(int grid[GAME_BOARD_GRID_SIZE][GAME_BOARD_GRID_SIZE], int x, int y, int pWidth, int pHeight)\n{\n int linesCleared = 0;\n\n bool rowsToClear[GAME_BOARD_GRID_SIZE];\n bool columnsToClear[GAME_BOARD_GRID_SIZE];\n\n for (int i = y; i < y + pHeight; i++)\n {\n bool didFindHole = false;\n\n for (int j = 0; j < GAME_BOARD_GRID_SIZE; j++)\n {\n if (grid[i][j] == 0)\n {\n didFindHole = true;\n break;\n }\n }\n\n rowsToClear[i] = !didFindHole;\n }\n\n for (int i = x; i < x + pWidth; i++)\n {\n bool didFindHole = false;\n\n for (int j = 0; j < GAME_BOARD_GRID_SIZE; j++)\n {\n if (grid[j][i] == 0)\n {\n didFindHole = true;\n break;\n }\n }\n\n columnsToClear[i] = !didFindHole;\n }\n\n for (int i = y; i < y + pHeight; i++)\n {\n if (!rowsToClear[i])\n {\n continue;\n }\n\n linesCleared++;\n\n for (int j = 0; j < GAME_BOARD_GRID_SIZE; j++)\n {\n grid[i][j] = 0;\n }\n }\n\n for (int i = x; i < x + pWidth; i++)\n {\n if (!columnsToClear[i])\n {\n continue;\n }\n linesCleared++;\n\n for (int j = 0; j < GAME_BOARD_GRID_SIZE; j++)\n {\n grid[j][i] = 0;\n }\n }\n\n return linesCleared;\n}\n\n__device__\nint isempty(int *stack, int &top) {\n\n if (top == -1)\n return 1;\n else\n return 0;\n}\n\n__device__\nint peek(int *stack, int &top) {\n return stack[top];\n}\n\n__device__\nint pop(int *stack, int &top) {\n int data;\n\n if (!isempty(stack, top)) {\n data = stack[top];\n top = top - 1;\n return data;\n } else {\n return -1;\n }\n}\n\n__device__\nint isfull(int *stack, int &top) {\n\n if (top == 100)\n return 1;\n else\n return 0;\n}\n\n__device__\nvoid push(int *stack, int &top, int data) {\n\n if (!isfull(stack, top)) {\n top = top + 1;\n stack[top] = data;\n }\n}\n\n\n\/\/ Finds the maximum area under the histogram represented\n\/\/ by histogram. See below article for details.\n\/\/ http:\/\/www.geeksforgeeks.org\/largest-rectangle-under-histogram\/\n__device__\nint maxHist(int row[GAME_BOARD_GRID_SIZE])\n{\n \/\/\/ Create an empty stack. The stack holds indexes of\n \/\/ hist[] array\/ The bars stored in stack are always\n \/\/ in increasing order of their heights.\n \/\/ std::stack result;\n int result[100];\n int top = 0;\n\n\n\n int top_val; \/\/ Top of stack\n\n int max_area = 0; \/\/ Initialize max area in current\n \/\/ row (or histogram)\n\n int area = 0; \/\/ Initialize area with current top\n\n \/\/ Run through all bars of given histogram (or row)\n int i = 0;\n while (i < GAME_BOARD_GRID_SIZE)\n {\n \/\/ If this bar is higher than the bar on top stack,\n \/\/ push it to stack\n if (isempty(result, top) || row[peek(result, top)] <= row[i])\n push(result, top, i++);\n\n else\n {\n \/\/ If this bar is lower than top of stack, then\n \/\/ calculate area of rectangle with stack top as\n \/\/ the smallest (or minimum height) bar. 'i' is\n \/\/ 'right index' for the top and element before\n \/\/ top in stack is 'left index'\n top_val = row[peek(result, top)];\n pop(result, top);\n area = top_val * i;\n\n if (!isempty(result, top))\n area = top_val * (i - peek(result, top) - 1 );\n max_area = max(area, max_area);\n }\n }\n\n \/\/ Now pop the remaining bars from stack and calculate area\n \/\/ with every popped bar as the smallest bar\n while (!isempty(result, top))\n {\n top_val = row[peek(result, top)];\n pop(result, top);\n area = top_val * i;\n if (!isempty(result, top))\n area = top_val * (i - peek(result, top) - 1 );\n\n max_area = max(area, max_area);\n }\n return max_area;\n}\n\n\/\/ Returns area of the largest rectangle with all 1s in A[][]\n__device__\nint maxRectangle(int A[GAME_BOARD_GRID_SIZE][GAME_BOARD_GRID_SIZE])\n{\n \/\/ Calculate area for first row and initialize it as\n \/\/ result\n int result = maxHist(A[0]);\n\n \/\/ iterate over row to find maximum rectangular area\n \/\/ considering each row as histogram\n for (int i = 1; i < GAME_BOARD_GRID_SIZE; i++)\n {\n\n for (int j = 0; j < GAME_BOARD_GRID_SIZE; j++)\n\n \/\/ if A[i][j] is 1 then add A[i -1][j]\n if (A[i][j]) A[i][j] += A[i - 1][j];\n\n\n \/\/ Update result if area with current row (as last row)\n \/\/ of rectangle) is more\n result = max(result, maxHist(A[i]));\n }\n\n return result;\n}\n\n__device__\nfloat evaluateMove(int grid[GAME_BOARD_GRID_SIZE][GAME_BOARD_GRID_SIZE], float heuristicCoeff[NUMBER_OF_HEURISTICS], int numberOfLinesCleared)\n{\n int numberOfHoles = 0;\n int numberOfFreeLines = 0;\n int largestRectangleArea = 0;\n int totalNumberOfLineClusters = 0;\n\n bool freeRows[GAME_BOARD_GRID_SIZE];\n bool freeColumns[GAME_BOARD_GRID_SIZE];\n\n \/\/ Check Lines\n for (int x = 0; x < GAME_BOARD_GRID_SIZE; x++)\n {\n bool isFreeLine = true;\n for (int y = 0; y < GAME_BOARD_GRID_SIZE; y++)\n {\n if (grid[y][x] != 0)\n {\n isFreeLine = false;\n break;\n }\n }\n\n if (isFreeLine)\n {\n numberOfFreeLines++;\n freeColumns[x] = false;\n }\n else\n {\n freeColumns[x] = true;\n }\n }\n\n \/\/ Check Lines\n for (int y = 0; y < GAME_BOARD_GRID_SIZE; y++)\n {\n bool isFreeLine = true;\n for (int x = 0; x < GAME_BOARD_GRID_SIZE; x++)\n {\n if (grid[y][x] != 0)\n {\n isFreeLine = false;\n break;\n }\n }\n\n if (isFreeLine)\n {\n numberOfFreeLines++;\n freeRows[y] = true;\n }\n else\n {\n freeRows[y] = false;\n }\n }\n\n bool isOnCluster = false;\n for (int i = 0; i < GAME_BOARD_GRID_SIZE; i++)\n {\n if (freeRows[i] == false)\n {\n if (!isOnCluster)\n {\n isOnCluster = true;\n totalNumberOfLineClusters++;\n }\n }\n else\n {\n isOnCluster = false;\n }\n }\n\n isOnCluster = false;\n for (int i = 0; i < GAME_BOARD_GRID_SIZE; i++)\n {\n if (freeColumns[i] == false)\n {\n if (!isOnCluster)\n {\n isOnCluster = true;\n totalNumberOfLineClusters++;\n }\n }\n else\n {\n isOnCluster = false;\n }\n }\n\n \/\/ Number of Holes\n for (int x = 0; x < GAME_BOARD_GRID_SIZE; x++)\n {\n for (int y = 0; y < GAME_BOARD_GRID_SIZE; y++)\n {\n if (((x - 1) >= 0) && (grid[y][x - 1] == 0))\n {\n \/\/ Do nothing\n }\n else if (((y - 1) >= 0) && (grid[y - 1][x] == 0))\n {\n \/\/ Do nothing\n }\n else if (((y + 1) < GAME_BOARD_GRID_SIZE) && (grid[y + 1][x] == 0))\n {\n \/\/ Do nothing\n }\n else if (((x + 1) < GAME_BOARD_GRID_SIZE) && (grid[y][x + 1] == 0))\n {\n \/\/ Do nothing\n }\n else\n {\n \/\/ Non of the tests passed so its a hall\n numberOfHoles++;\n }\n\n }\n }\n\n \/\/ Get the largest rectangle\n largestRectangleArea = maxRectangle(grid);\n\n float moveScore = 0;\n moveScore = (heuristicCoeff[0] * (float)numberOfLinesCleared);\n moveScore += (heuristicCoeff[1] * (float)numberOfFreeLines);\n moveScore += (heuristicCoeff[2] * (float)numberOfHoles);\n moveScore += (heuristicCoeff[3] * (float)largestRectangleArea);\n moveScore += (heuristicCoeff[4] * (float)totalNumberOfLineClusters);\n\n return moveScore;\n}\n\n\n\n__global__\nvoid calculateMoves(int *grid,\n int piece[NUMBER_OF_PIECES_PER_ROUND],\n float heuristicCoeff[NUMBER_OF_HEURISTICS],\n int *x,\n int *y,\n float *score,\n int *linesCleared)\n{\n int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;\n\n if (threadIndex >= (GAME_BOARD_GRID_SIZE * GAME_BOARD_GRID_SIZE))\n {\n for (int t = 0; t < NUMBER_OF_PIECES_PER_ROUND; t++)\n {\n x[threadIndex * NUMBER_OF_PIECES_PER_ROUND + t] = -1;\n y[threadIndex * NUMBER_OF_PIECES_PER_ROUND + t] = -1;\n }\n return;\n }\n\n\n return;\n\n score[threadIndex] = 0;\n linesCleared[threadIndex] = 0;\n\n\n int localGrid[GAME_BOARD_GRID_SIZE][GAME_BOARD_GRID_SIZE];\n for (int gX = 0; gX < GAME_BOARD_GRID_SIZE; gX++)\n {\n for (int gY = 0; gY < GAME_BOARD_GRID_SIZE; gY++)\n {\n localGrid[gY][gX] = grid[gY * GAME_BOARD_GRID_SIZE + gX];\n }\n }\n\n\n int xPos[NUMBER_OF_PIECES_PER_ROUND];\n int yPos[NUMBER_OF_PIECES_PER_ROUND];\n\n int positionIndex = threadIndex;\n for (int i = 0; i < NUMBER_OF_PIECES_PER_ROUND; i++)\n {\n xPos[i] = positionIndex % GAME_BOARD_GRID_SIZE;\n positionIndex \/= GAME_BOARD_GRID_SIZE;\n yPos[i] = positionIndex % GAME_BOARD_GRID_SIZE;\n positionIndex \/= GAME_BOARD_GRID_SIZE;\n }\n\n \/\/ Get the piece data\n int pWidth = 0;\n int pHeight = 0;\n int pGrid[5][5];\n\n for (int i = 0; i < NUMBER_OF_PIECES_PER_ROUND; i++)\n {\n \/\/ Get the piece data\n getGridForPiece(piece[i], pWidth, pHeight, pGrid);\n\n bool isValidMove = true;\n\n \/\/ Check if it is a valid move\n for (int pX = 0; pX < pWidth; pX++)\n {\n for (int pY = 0; pY < pHeight; pY++)\n {\n if ((pGrid[pY][pX] == 1) && (localGrid[yPos[i] + pY][xPos[i] + pX] == 1))\n {\n isValidMove = false;\n break;\n }\n }\n\n if (!isValidMove)\n {\n break;\n }\n }\n\n\n if (!isValidMove)\n {\n for (int t = 0; t < NUMBER_OF_PIECES_PER_ROUND; t++)\n {\n x[threadIndex * NUMBER_OF_PIECES_PER_ROUND + t] = -1;\n y[threadIndex * NUMBER_OF_PIECES_PER_ROUND + t] = -1;\n }\n return;\n }\n\n x[threadIndex * NUMBER_OF_PIECES_PER_ROUND + i] = xPos[i];\n y[threadIndex * NUMBER_OF_PIECES_PER_ROUND + i] = yPos[i];\n\n \/\/ Place the piece since valid\n for (int pX = 0; pX < pWidth; pX++)\n {\n for (int pY = 0; pY < pHeight; pY++)\n {\n localGrid[yPos[i] + pY][xPos[i] + pX] += pGrid[pY][pX];\n }\n }\n\n linesCleared[threadIndex] += processGrid(localGrid, xPos[i] , yPos[i], pWidth, pHeight);\n }\n\n score[threadIndex] = evaluateMove(localGrid, heuristicCoeff, linesCleared[threadIndex]);\n}\n\nextern \"C\"\nint launchCuda(int grid[GAME_BOARD_GRID_SIZE][GAME_BOARD_GRID_SIZE],\n int piece[NUMBER_OF_PIECES_PER_ROUND],\n float heuristicCoeff[NUMBER_OF_HEURISTICS],\n int moveX[NUMBER_OF_PIECES_PER_ROUND],\n int moveY[NUMBER_OF_PIECES_PER_ROUND]\n )\n{\n int numberOfMoves = 1;\n for (int i = 0; i < NUMBER_OF_PIECES_PER_ROUND; i++)\n {\n numberOfMoves *= GAME_BOARD_GRID_SIZE * GAME_BOARD_GRID_SIZE;\n }\n\n\n int *x;\n int *y;\n float *score;\n int *linesCleared;\n\n x = (int*)malloc(numberOfMoves * NUMBER_OF_PIECES_PER_ROUND * sizeof(int));\n y = (int*)malloc(numberOfMoves * NUMBER_OF_PIECES_PER_ROUND * sizeof(int));\n score = (float*)malloc(numberOfMoves * sizeof(float));\n linesCleared = (int*)malloc(numberOfMoves * sizeof(int));\n\n int *d_x;\n int *d_y;\n float *d_score;\n int *d_linesCleared;\n\n cudaMalloc(&d_x, numberOfMoves * NUMBER_OF_PIECES_PER_ROUND * sizeof(int));\n cudaMalloc(&d_y, numberOfMoves * NUMBER_OF_PIECES_PER_ROUND * sizeof(int));\n cudaMalloc(&d_score, numberOfMoves * sizeof(float));\n cudaMalloc(&d_linesCleared, numberOfMoves * sizeof(int));\n\n\n int *d_grid;\n int *d_piece;\n float *d_heuristicCoeff;\n\n cudaMalloc(&d_grid, GAME_BOARD_GRID_SIZE * GAME_BOARD_GRID_SIZE * sizeof(int));\n cudaMalloc(&d_piece, NUMBER_OF_PIECES_PER_ROUND * sizeof(int));\n cudaMalloc(&d_heuristicCoeff, NUMBER_OF_HEURISTICS * sizeof(float));\n\n \/\/ cudaMemcpy(d_grid, grid, GAME_BOARD_GRID_SIZE * GAME_BOARD_GRID_SIZE * sizeof(int), cudaMemcpyHostToDevice);\n \/\/ cudaMemcpy(d_piece, piece, NUMBER_OF_PIECES_PER_ROUND * sizeof(int), cudaMemcpyHostToDevice);\n \/\/ cudaMemcpy(d_heuristicCoeff, heuristicCoeff, NUMBER_OF_HEURISTICS * sizeof(float), cudaMemcpyHostToDevice);\n\n\n \/\/ int threadsPerBlock = 128;\n\n \/\/ calculateMoves <<<(numberOfMoves \/ threadsPerBlock) + 1, threadsPerBlock>>>(d_grid,\n \/\/ d_piece,\n \/\/ d_heuristicCoeff,\n \/\/ d_x,\n \/\/ d_y,\n \/\/ d_score,\n \/\/ d_linesCleared);\n\n\n \/\/ cudaMemcpy(x, d_x, numberOfMoves * NUMBER_OF_PIECES_PER_ROUND * sizeof(int), cudaMemcpyDeviceToHost);\n \/\/ cudaMemcpy(y, d_y, numberOfMoves * NUMBER_OF_PIECES_PER_ROUND * sizeof(int), cudaMemcpyDeviceToHost);\n \/\/ cudaMemcpy(score, d_score, numberOfMoves * sizeof(float), cudaMemcpyDeviceToHost);\n \/\/ cudaMemcpy(linesCleared, d_linesCleared, numberOfMoves * sizeof(int), cudaMemcpyDeviceToHost);\n\n\n \/\/ int bestMove = -1;\n \/\/ float bestMoveScore = -10000000000;\n \/\/ for (int i = 0; i < numberOfMoves; i++)\n \/\/ {\n \/\/ if (x[i * NUMBER_OF_PIECES_PER_ROUND] == -1)\n \/\/ {\n \/\/ continue;\n \/\/ }\n\n \/\/ if (score[i] > bestMoveScore)\n \/\/ {\n \/\/ bestMoveScore = score[i];\n \/\/ bestMove = i;\n \/\/ }\n \/\/ }\n\n \/\/ if (bestMove == -1)\n \/\/ {\n \/\/ cudaFree(d_grid);\n \/\/ cudaFree(d_piece);\n \/\/ cudaFree(d_heuristicCoeff);\n \/\/ cudaFree(d_x);\n \/\/ cudaFree(d_y);\n \/\/ cudaFree(d_score);\n \/\/ cudaFree(d_linesCleared);\n\n \/\/ free(x);\n \/\/ free(y);\n \/\/ free(score);\n \/\/ free(linesCleared);\n\n \/\/ return 0;\n \/\/ }\n\n\n \/\/ for (int i = 0 ; i < NUMBER_OF_PIECES_PER_ROUND; i++)\n \/\/ {\n \/\/ moveX[i] = x[bestMove * NUMBER_OF_PIECES_PER_ROUND + i];\n \/\/ moveY[i] = y[bestMove * NUMBER_OF_PIECES_PER_ROUND + i];\n \/\/ }\n\n\n\n cudaFree(d_grid);\n cudaFree(d_piece);\n cudaFree(d_heuristicCoeff);\n cudaFree(d_x);\n cudaFree(d_y);\n cudaFree(d_score);\n cudaFree(d_linesCleared);\n\n free(x);\n free(y);\n free(score);\n free(linesCleared);\n\n return 1;\n}","avg_line_length":24.3020833333,"max_line_length":142,"alphanum_fraction":0.4943206172} {"size":9385,"ext":"cu","lang":"Cuda","max_stars_count":17085.0,"content":"\/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License. *\/\n\n#include \"paddle\/fluid\/operators\/temporal_shift_op.h\"\n#include \"paddle\/fluid\/platform\/cuda_primitives.h\"\n#include \"paddle\/fluid\/platform\/gpu_launch_config.h\"\n\nnamespace paddle {\nnamespace operators {\n\nusing framework::Tensor;\n\ntemplate \n__global__ void KeTemporalShiftFwNCHW(const T* input, T* output,\n const int ntchw, const int tchw,\n const int chw, const int hw, const int t,\n const int c1, const int c2) {\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n int stride = blockDim.x * gridDim.x;\n int src_it = 0;\n\n for (; tid < ntchw; tid += stride) {\n int it = (tid % tchw) \/ chw;\n int ic = (tid % chw) \/ hw;\n\n if (ic < c1) {\n src_it = it - 1;\n } else if (ic < c2) {\n src_it = it + 1;\n } else {\n src_it = it;\n }\n\n if (src_it < 0 || src_it >= t) {\n output[tid] = 0;\n } else {\n output[tid] = input[tid + (src_it - it) * chw];\n }\n }\n}\n\ntemplate \n__global__ void KeTemporalShiftFwNHWC(const T* input, T* output,\n const int nthwc, const int thwc,\n const int hwc, const int t, const int c,\n const int c1, const int c2) {\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n int stride = blockDim.x * gridDim.x;\n int src_it = 0;\n\n for (; tid < nthwc; tid += stride) {\n int it = (tid % thwc) \/ hwc;\n int ic = tid % c;\n\n if (ic < c1) {\n src_it = it - 1;\n } else if (ic < c2) {\n src_it = it + 1;\n } else {\n src_it = it;\n }\n\n if (src_it < 0 || src_it >= t) {\n output[tid] = 0;\n } else {\n output[tid] = input[tid + (src_it - it) * hwc];\n }\n }\n}\n\ntemplate \n__global__ void KeTemporalShiftBwNCHW(const T* output_grad, T* input_grad,\n const int ntchw, const int tchw,\n const int chw, const int hw, const int t,\n const int c1, const int c2) {\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n int stride = blockDim.x * gridDim.x;\n int src_it = 0;\n\n for (; tid < ntchw; tid += stride) {\n int it = (tid % tchw) \/ chw;\n int ic = (tid % chw) \/ hw;\n\n if (ic < c1) {\n src_it = it + 1;\n } else if (ic < c2) {\n src_it = it - 1;\n } else {\n src_it = it;\n }\n\n if (src_it >= 0 && src_it < t) {\n input_grad[tid] = output_grad[tid + (src_it - it) * chw];\n } else {\n input_grad[tid] = 0;\n }\n }\n}\n\ntemplate \n__global__ void KeTemporalShiftBwNHWC(const T* output_grad, T* input_grad,\n const int nthwc, const int thwc,\n const int hwc, const int t, const int c,\n const int c1, const int c2) {\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n int stride = blockDim.x * gridDim.x;\n int src_it = 0;\n\n for (; tid < nthwc; tid += stride) {\n int it = (tid % thwc) \/ hwc;\n int ic = tid % c;\n\n if (ic < c1) {\n src_it = it + 1;\n } else if (ic < c2) {\n src_it = it - 1;\n } else {\n src_it = it;\n }\n\n if (src_it >= 0 && src_it < t) {\n input_grad[tid] = output_grad[tid + (src_it - it) * hwc];\n } else {\n input_grad[tid] = 0;\n }\n }\n}\n\ntemplate \nclass TemporalShiftOpCUDAKernel : public framework::OpKernel {\n public:\n void Compute(const framework::ExecutionContext& ctx) const override {\n PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,\n platform::errors::InvalidArgument(\n \"This kernel only runs on GPU device.\"));\n auto* input = ctx.Input(\"X\");\n auto* output = ctx.Output(\"Out\");\n int t = ctx.Attr(\"seg_num\");\n float shift_ratio = ctx.Attr(\"shift_ratio\");\n const std::string data_format_str = ctx.Attr(\"data_format\");\n const DataLayout data_layout =\n framework::StringToDataLayout(data_format_str);\n\n const int nt = input->dims()[0];\n const int c = (data_layout == DataLayout::kNCHW ? input->dims()[1]\n : input->dims()[3]);\n const int h = (data_layout == DataLayout::kNCHW ? input->dims()[2]\n : input->dims()[1]);\n const int w = (data_layout == DataLayout::kNCHW ? input->dims()[3]\n : input->dims()[2]);\n\n const int hw = h * w;\n const int chw = c * hw;\n const int tchw = t * chw;\n const int ntchw = nt * chw;\n\n const int c1 = static_cast(c * shift_ratio);\n const int c2 = static_cast(c * 2 * shift_ratio);\n\n framework::DDim out_dims = (data_layout == DataLayout::kNCHW\n ? framework::make_ddim({nt, c, h, w})\n : framework::make_ddim({nt, h, w, c}));\n const T* input_data = input->data();\n T* output_data = output->mutable_data(out_dims, ctx.GetPlace());\n\n int pixelNum = nt * chw;\n int threads = 1024;\n int grid = (pixelNum + threads - 1) \/ threads;\n const auto& dev_ctx = ctx.cuda_device_context();\n int blocks_per_sm = dev_ctx.GetMaxPhysicalThreadCount() \/ threads;\n grid = std::min(dev_ctx.GetSMCount() * blocks_per_sm, grid);\n\n if (data_layout == DataLayout::kNCHW) {\n KeTemporalShiftFwNCHW<\n T><<>>(\n input_data, output_data, ntchw, tchw, chw, hw, t, c1, c2);\n } else {\n KeTemporalShiftFwNHWC<\n T><<>>(\n input_data, output_data, ntchw, tchw, chw, t, c, c1, c2);\n }\n }\n};\n\ntemplate \nclass TemporalShiftGradOpCUDAKernel : public framework::OpKernel {\n public:\n void Compute(const framework::ExecutionContext& ctx) const override {\n auto* input_grad = ctx.Output(framework::GradVarName(\"X\"));\n auto* output_grad = ctx.Input(framework::GradVarName(\"Out\"));\n int t = ctx.Attr(\"seg_num\");\n float shift_ratio = ctx.Attr(\"shift_ratio\");\n const std::string data_format_str = ctx.Attr(\"data_format\");\n const DataLayout data_layout =\n framework::StringToDataLayout(data_format_str);\n\n const int nt = output_grad->dims()[0];\n const int c = (data_layout == DataLayout::kNCHW ? output_grad->dims()[1]\n : output_grad->dims()[3]);\n const int h = (data_layout == DataLayout::kNCHW ? output_grad->dims()[2]\n : output_grad->dims()[1]);\n const int w = (data_layout == DataLayout::kNCHW ? output_grad->dims()[3]\n : output_grad->dims()[2]);\n\n const int hw = h * w;\n const int chw = c * hw;\n const int tchw = t * chw;\n const int ntchw = nt * chw;\n\n const int c1 = static_cast(c * shift_ratio);\n const int c2 = static_cast(c * 2 * shift_ratio);\n\n framework::DDim in_grad_dims = (data_layout == DataLayout::kNCHW\n ? framework::make_ddim({nt, c, h, w})\n : framework::make_ddim({nt, h, w, c}));\n const T* output_grad_data = output_grad->data();\n T* input_grad_data =\n input_grad->mutable_data(in_grad_dims, ctx.GetPlace());\n\n int pixelNum = nt * chw;\n int threads = 1024;\n int grid = (pixelNum + threads - 1) \/ threads;\n const auto& dev_ctx = ctx.cuda_device_context();\n int blocks_per_sm = dev_ctx.GetMaxPhysicalThreadCount() \/ threads;\n grid = std::min(dev_ctx.GetSMCount() * blocks_per_sm, grid);\n\n if (data_layout == DataLayout::kNCHW) {\n KeTemporalShiftBwNCHW<\n T><<>>(\n output_grad_data, input_grad_data, ntchw, tchw, chw, hw, t, c1, c2);\n } else {\n KeTemporalShiftBwNHWC<\n T><<>>(\n output_grad_data, input_grad_data, ntchw, tchw, chw, t, c, c1, c2);\n }\n }\n};\n\n} \/\/ namespace operators\n} \/\/ namespace paddle\n\nnamespace ops = paddle::operators;\nREGISTER_OP_CUDA_KERNEL(\n temporal_shift, ops::TemporalShiftOpCUDAKernel,\n ops::TemporalShiftOpCUDAKernel,\n ops::TemporalShiftOpCUDAKernel);\nREGISTER_OP_CUDA_KERNEL(\n temporal_shift_grad, ops::TemporalShiftGradOpCUDAKernel,\n ops::TemporalShiftGradOpCUDAKernel,\n ops::TemporalShiftGradOpCUDAKernel);\n","avg_line_length":36.2355212355,"max_line_length":79,"alphanum_fraction":0.5721896644} {"size":83582,"ext":"cu","lang":"Cuda","max_stars_count":537.0,"content":"\n#include \"simple_yolo.hpp\"\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#if defined(_WIN32)\n#\tinclude \n# include \n#\tinclude \n#\tpragma comment(lib, \"shlwapi.lib\")\n#\tundef min\n#\tundef max\n#else\n#\tinclude \n#\tinclude \n#\tinclude \n#\tinclude \n# include \n#endif\n\nnamespace SimpleYolo{\n\n using namespace nvinfer1;\n using namespace std;\n using namespace cv;\n\n #define CURRENT_DEVICE_ID -1\n #define GPU_BLOCK_THREADS 512\n #define KernelPositionBlock\t\t\t\t\t\t\t\t\t\t\t\\\n int position = (blockDim.x * blockIdx.x + threadIdx.x);\t\t \\\n if (position >= (edge)) return;\n\n #define checkCudaRuntime(call) check_runtime(call, #call, __LINE__, __FILE__)\n static bool check_runtime(cudaError_t e, const char* call, int line, const char *file);\n\n #define checkCudaKernel(...) \\\n __VA_ARGS__; \\\n do{cudaError_t cudaStatus = cudaPeekAtLastError(); \\\n if (cudaStatus != cudaSuccess){ \\\n INFOE(\"launch failed: %s\", cudaGetErrorString(cudaStatus)); \\\n }} while(0);\n\n #define Assert(op)\t\t\t\t\t \\\n do{ \\\n bool cond = !(!(op)); \\\n if(!cond){ \\\n INFOF(\"Assert failed, \" #op); \\\n } \\\n }while(false)\n\n \/* \u4fee\u6539\u8fd9\u4e2alevel\u6765\u5b9e\u73b0\u4fee\u6539\u65e5\u5fd7\u8f93\u51fa\u7ea7\u522b *\/\n #define CURRENT_LOG_LEVEL LogLevel::Info\n #define INFOD(...)\t\t\t__log_func(__FILE__, __LINE__, LogLevel::Debug, __VA_ARGS__)\n #define INFOV(...)\t\t\t__log_func(__FILE__, __LINE__, LogLevel::Verbose, __VA_ARGS__)\n #define INFO(...)\t\t\t__log_func(__FILE__, __LINE__, LogLevel::Info, __VA_ARGS__)\n #define INFOW(...)\t\t\t__log_func(__FILE__, __LINE__, LogLevel::Warning, __VA_ARGS__)\n #define INFOE(...)\t\t\t__log_func(__FILE__, __LINE__, LogLevel::Error, __VA_ARGS__)\n #define INFOF(...)\t\t\t__log_func(__FILE__, __LINE__, LogLevel::Fatal, __VA_ARGS__)\n\n enum class NormType : int{\n None = 0,\n MeanStd = 1,\n AlphaBeta = 2\n };\n\n enum class ChannelType : int{\n None = 0,\n SwapRB = 1\n };\n\n \/* \u5f52\u4e00\u5316\u64cd\u4f5c\uff0c\u53ef\u4ee5\u652f\u6301\u5747\u503c\u6807\u51c6\u5dee\uff0calpha beta\uff0c\u548cswap RB *\/\n struct Norm{\n float mean[3];\n float std[3];\n float alpha, beta;\n NormType type = NormType::None;\n ChannelType channel_type = ChannelType::None;\n\n \/\/ out = (x * alpha - mean) \/ std\n static Norm mean_std(const float mean[3], const float std[3], float alpha = 1\/255.0f, ChannelType channel_type=ChannelType::None);\n\n \/\/ out = x * alpha + beta\n static Norm alpha_beta(float alpha, float beta = 0, ChannelType channel_type=ChannelType::None);\n\n \/\/ None\n static Norm None();\n };\n\n Norm Norm::mean_std(const float mean[3], const float std[3], float alpha, ChannelType channel_type){\n\n Norm out;\n out.type = NormType::MeanStd;\n out.alpha = alpha;\n out.channel_type = channel_type;\n memcpy(out.mean, mean, sizeof(out.mean));\n memcpy(out.std, std, sizeof(out.std));\n return out;\n }\n\n Norm Norm::alpha_beta(float alpha, float beta, ChannelType channel_type){\n\n Norm out;\n out.type = NormType::AlphaBeta;\n out.alpha = alpha;\n out.beta = beta;\n out.channel_type = channel_type;\n return out;\n }\n\n Norm Norm::None(){\n return Norm();\n }\n\n \/* \u6784\u9020\u65f6\u8bbe\u7f6e\u5f53\u524dgpuid\uff0c\u6790\u6784\u65f6\u4fee\u6539\u4e3a\u539f\u6765\u7684gpuid *\/\n class AutoDevice{\n public:\n AutoDevice(int device_id = 0){\n cudaGetDevice(&old_);\n checkCudaRuntime(cudaSetDevice(device_id));\n }\n\n virtual ~AutoDevice(){\n checkCudaRuntime(cudaSetDevice(old_));\n }\n \n private:\n int old_ = -1;\n };\n \n enum class LogLevel : int{\n Debug = 5,\n Verbose = 4,\n Info = 3,\n Warning = 2,\n Error = 1,\n Fatal = 0\n };\n\n static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...);\n inline int upbound(int n, int align = 32){return (n + align - 1) \/ align * align;}\n\n static bool check_runtime(cudaError_t e, const char* call, int line, const char *file){\n if (e != cudaSuccess) {\n INFOE(\"CUDA Runtime error %s # %s, code = %s [ %d ] in file %s:%d\", call, cudaGetErrorString(e), cudaGetErrorName(e), e, file, line);\n return false;\n }\n return true;\n }\n\n #define TRT_STR(v) #v\n #define TRT_VERSION_STRING(major, minor, patch, build) TRT_STR(major) \".\" TRT_STR(minor) \".\" TRT_STR(patch) \".\" TRT_STR(build)\n const char* trt_version(){\n return TRT_VERSION_STRING(NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, NV_TENSORRT_BUILD);\n }\n\n static bool check_device_id(int device_id){\n int device_count = -1;\n checkCudaRuntime(cudaGetDeviceCount(&device_count));\n if(device_id < 0 || device_id >= device_count){\n INFOE(\"Invalid device id: %d, count = %d\", device_id, device_count);\n return false;\n }\n return true;\n }\n\n static bool exists(const string& path){\n\n #ifdef _WIN32\n return ::PathFileExistsA(path.c_str());\n #else\n return access(path.c_str(), R_OK) == 0;\n #endif\n }\n\n static const char* level_string(LogLevel level){\n switch (level){\n case LogLevel::Debug: return \"debug\";\n case LogLevel::Verbose: return \"verbo\";\n case LogLevel::Info: return \"info\";\n case LogLevel::Warning: return \"warn\";\n case LogLevel::Error: return \"error\";\n case LogLevel::Fatal: return \"fatal\";\n default: return \"unknow\";\n }\n }\n\n template\n static string join_dims(const vector<_T>& dims){\n stringstream output;\n char buf[64];\n const char* fmts[] = {\"%d\", \" x %d\"};\n for(int i = 0; i < dims.size(); ++i){\n snprintf(buf, sizeof(buf), fmts[i != 0], dims[i]);\n output << buf;\n }\n return output.str();\n }\n\n static bool save_file(const string& file, const void* data, size_t length){\n\n FILE* f = fopen(file.c_str(), \"wb\");\n if (!f) return false;\n\n if (data && length > 0){\n if (fwrite(data, 1, length, f) != length){\n fclose(f);\n return false;\n }\n }\n fclose(f);\n return true;\n }\n\n static bool save_file(const string& file, const vector& data){\n return save_file(file, data.data(), data.size());\n }\n\n static string file_name(const string& path, bool include_suffix){\n\n if (path.empty()) return \"\";\n\n int p = path.rfind('\/');\n\n#ifdef U_OS_WINDOWS\n int e = path.rfind('\\\\');\n p = std::max(p, e);\n#endif\n p += 1;\n\n \/\/include suffix\n if (include_suffix)\n return path.substr(p);\n\n int u = path.rfind('.');\n if (u == -1)\n return path.substr(p);\n\n if (u <= p) u = path.size();\n return path.substr(p, u - p);\n }\n\n vector glob_image_files(const string& directory){\n\n \/* \u68c0\u7d22\u76ee\u5f55\u4e0b\u7684\u6240\u6709\u56fe\u50cf\uff1a\"*.jpg;*.png;*.bmp;*.jpeg;*.tiff\" *\/\n vector files, output;\n set pattern_set{\"jpg\", \"png\", \"bmp\", \"jpeg\", \"tiff\"};\n\n if(directory.empty()){\n INFOE(\"Glob images from folder failed, folder is empty\");\n return output;\n }\n\n try{\n\t\t\tvector files_;\n\t\t\tfiles_.reserve(10000);\n cv::glob(directory + \"\/*\", files_, true);\n\t\t\tfiles.insert(files.end(), files_.begin(), files_.end());\n }catch(...){\n INFOE(\"Glob %s failed\", directory.c_str());\n return output;\n }\n\n for(int i = 0; i < files.size(); ++i){\n auto& file = files[i];\n int p = file.rfind(\".\");\n if(p == -1) continue;\n\n auto suffix = file.substr(p+1);\n std::transform(suffix.begin(), suffix.end(), suffix.begin(), [](char c){\n if(c >= 'A' && c <= 'Z')\n c -= 'A' + 'a';\n return c;\n });\n if(pattern_set.find(suffix) != pattern_set.end())\n output.push_back(file);\n }\n return output;\n }\n\n static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...){\n\n if(level > CURRENT_LOG_LEVEL)\n return;\n\n va_list vl;\n va_start(vl, fmt);\n \n char buffer[2048];\n string filename = file_name(file, true);\n int n = snprintf(buffer, sizeof(buffer), \"[%s][%s:%d]:\", level_string(level), filename.c_str(), line);\n vsnprintf(buffer + n, sizeof(buffer) - n, fmt, vl);\n\n fprintf(stdout, \"%s\\n\", buffer);\n if (level == LogLevel::Fatal) {\n fflush(stdout);\n abort();\n }\n }\n\n static dim3 grid_dims(int numJobs) {\n int numBlockThreads = numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS;\n return dim3(((numJobs + numBlockThreads - 1) \/ (float)numBlockThreads));\n }\n\n static dim3 block_dims(int numJobs) {\n return numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS;\n }\n\n static int get_device(int device_id){\n if(device_id != CURRENT_DEVICE_ID){\n check_device_id(device_id);\n return device_id;\n }\n\n checkCudaRuntime(cudaGetDevice(&device_id));\n return device_id;\n }\n\n void set_device(int device_id) {\n if (device_id == -1)\n return;\n\n checkCudaRuntime(cudaSetDevice(device_id));\n }\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/CUDA kernels\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n const int NUM_BOX_ELEMENT = 7; \/\/ left, top, right, bottom, confidence, class, keepflag\n static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){\n *ox = matrix[0] * x + matrix[1] * y + matrix[2];\n *oy = matrix[3] * x + matrix[4] * y + matrix[5];\n }\n\n static __global__ void decode_kernel(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float* invert_affine_matrix, float* parray, int max_objects){ \n\n int position = blockDim.x * blockIdx.x + threadIdx.x;\n if (position >= num_bboxes) return;\n\n float* pitem = predict + (5 + num_classes) * position;\n float objectness = pitem[4];\n if(objectness < confidence_threshold)\n return;\n\n float* class_confidence = pitem + 5;\n float confidence = *class_confidence++;\n int label = 0;\n for(int i = 1; i < num_classes; ++i, ++class_confidence){\n if(*class_confidence > confidence){\n confidence = *class_confidence;\n label = i;\n }\n }\n\n confidence *= objectness;\n if(confidence < confidence_threshold)\n return;\n\n int index = atomicAdd(parray, 1);\n if(index >= max_objects)\n return;\n\n float cx = *pitem++;\n float cy = *pitem++;\n float width = *pitem++;\n float height = *pitem++;\n float left = cx - width * 0.5f;\n float top = cy - height * 0.5f;\n float right = cx + width * 0.5f;\n float bottom = cy + height * 0.5f;\n affine_project(invert_affine_matrix, left, top, &left, &top);\n affine_project(invert_affine_matrix, right, bottom, &right, &bottom);\n\n float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT;\n *pout_item++ = left;\n *pout_item++ = top;\n *pout_item++ = right;\n *pout_item++ = bottom;\n *pout_item++ = confidence;\n *pout_item++ = label;\n *pout_item++ = 1; \/\/ 1 = keep, 0 = ignore\n }\n\n static __device__ float box_iou(\n float aleft, float atop, float aright, float abottom, \n float bleft, float btop, float bright, float bbottom\n ){\n\n float cleft \t= max(aleft, bleft);\n float ctop \t\t= max(atop, btop);\n float cright \t= min(aright, bright);\n float cbottom \t= min(abottom, bbottom);\n \n float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f);\n if(c_area == 0.0f)\n return 0.0f;\n \n float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop);\n float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop);\n return c_area \/ (a_area + b_area - c_area);\n }\n \n static __global__ void fast_nms_kernel(float* bboxes, int max_objects, float threshold){\n\n int position = (blockDim.x * blockIdx.x + threadIdx.x);\n int count = min((int)*bboxes, max_objects);\n if (position >= count) \n return;\n \n \/\/ left, top, right, bottom, confidence, class, keepflag\n float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT;\n for(int i = 0; i < count; ++i){\n float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT;\n if(i == position || pcurrent[5] != pitem[5]) continue;\n\n if(pitem[4] >= pcurrent[4]){\n if(pitem[4] == pcurrent[4] && i < position)\n continue;\n\n float iou = box_iou(\n pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3],\n pitem[0], pitem[1], pitem[2], pitem[3]\n );\n\n if(iou > threshold){\n pcurrent[6] = 0; \/\/ 1=keep, 0=ignore\n return;\n }\n }\n }\n } \n\n static void decode_kernel_invoker(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, cudaStream_t stream){\n \n auto grid = grid_dims(num_bboxes);\n auto block = block_dims(num_bboxes);\n\n \/* \u5982\u679c\u6838\u51fd\u6570\u6709\u6ce2\u6d6a\u7ebf\uff0c\u6ca1\u5173\u7cfb\uff0c\u4ed6\u662f\u6b63\u5e38\u7684\uff0c\u4f60\u53ea\u662f\u770b\u4e0d\u987a\u773c\u7f62\u4e86 *\/\n checkCudaKernel(decode_kernel<<>>(predict, num_bboxes, num_classes, confidence_threshold, invert_affine_matrix, parray, max_objects));\n\n grid = grid_dims(max_objects);\n block = block_dims(max_objects);\n checkCudaKernel(fast_nms_kernel<<>>(parray, max_objects, nms_threshold));\n }\n\n static __global__ void warp_affine_bilinear_and_normalize_plane_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, \n uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){\n\n int position = blockDim.x * blockIdx.x + threadIdx.x;\n if (position >= edge) return;\n\n float m_x1 = warp_affine_matrix_2_3[0];\n float m_y1 = warp_affine_matrix_2_3[1];\n float m_z1 = warp_affine_matrix_2_3[2];\n float m_x2 = warp_affine_matrix_2_3[3];\n float m_y2 = warp_affine_matrix_2_3[4];\n float m_z2 = warp_affine_matrix_2_3[5];\n\n int dx = position % dst_width;\n int dy = position \/ dst_width;\n float src_x = m_x1 * dx + m_y1 * dy + m_z1;\n float src_y = m_x2 * dx + m_y2 * dy + m_z2;\n float c0, c1, c2;\n\n if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){\n \/\/ out of range\n c0 = const_value_st;\n c1 = const_value_st;\n c2 = const_value_st;\n }else{\n int y_low = floorf(src_y);\n int x_low = floorf(src_x);\n int y_high = y_low + 1;\n int x_high = x_low + 1;\n\n uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};\n float ly = src_y - y_low;\n float lx = src_x - x_low;\n float hy = 1 - ly;\n float hx = 1 - lx;\n float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;\n uint8_t* v1 = const_value;\n uint8_t* v2 = const_value;\n uint8_t* v3 = const_value;\n uint8_t* v4 = const_value;\n if(y_low >= 0){\n if (x_low >= 0)\n v1 = src + y_low * src_line_size + x_low * 3;\n\n if (x_high < src_width)\n v2 = src + y_low * src_line_size + x_high * 3;\n }\n \n if(y_high < src_height){\n if (x_low >= 0)\n v3 = src + y_high * src_line_size + x_low * 3;\n\n if (x_high < src_width)\n v4 = src + y_high * src_line_size + x_high * 3;\n }\n\n \/\/ same to opencv\n c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f);\n c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f);\n c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f);\n }\n\n if(norm.channel_type == ChannelType::SwapRB){\n float t = c2;\n c2 = c0; c0 = t;\n }\n\n if(norm.type == NormType::MeanStd){\n c0 = (c0 * norm.alpha - norm.mean[0]) \/ norm.std[0];\n c1 = (c1 * norm.alpha - norm.mean[1]) \/ norm.std[1];\n c2 = (c2 * norm.alpha - norm.mean[2]) \/ norm.std[2];\n }else if(norm.type == NormType::AlphaBeta){\n c0 = c0 * norm.alpha + norm.beta;\n c1 = c1 * norm.alpha + norm.beta;\n c2 = c2 * norm.alpha + norm.beta;\n }\n\n int area = dst_width * dst_height;\n float* pdst_c0 = dst + dy * dst_width + dx;\n float* pdst_c1 = pdst_c0 + area;\n float* pdst_c2 = pdst_c1 + area;\n *pdst_c0 = c0;\n *pdst_c1 = c1;\n *pdst_c2 = c2;\n }\n\n static void warp_affine_bilinear_and_normalize_plane(\n uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height,\n float* matrix_2_3, uint8_t const_value, const Norm& norm,\n cudaStream_t stream) {\n \n int jobs = dst_width * dst_height;\n auto grid = grid_dims(jobs);\n auto block = block_dims(jobs);\n \n checkCudaKernel(warp_affine_bilinear_and_normalize_plane_kernel << > > (\n src, src_line_size,\n src_width, src_height, dst,\n dst_width, dst_height, const_value, matrix_2_3, norm, jobs\n ));\n }\n\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/class MixMemory\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n \/* gpu\/cpu\u5185\u5b58\u7ba1\u7406\n \u81ea\u52a8\u5bf9gpu\u548ccpu\u5185\u5b58\u8fdb\u884c\u5206\u914d\u548c\u91ca\u653e\n \u8fd9\u91cc\u7684cpu\u4f7f\u7528\u7684\u662fpinned memory\uff0c\u5f53\u5bf9gpu\u505a\u5185\u5b58\u590d\u5236\u65f6\uff0c\u6027\u80fd\u6bd4\u8f83\u597d\n \u56e0\u4e3a\u662fcudaMallocHost\u5206\u914d\u7684\uff0c\u56e0\u6b64\u4ed6\u4e0ecuda context\u6709\u5173\u8054\n *\/\n class MixMemory {\n public:\n MixMemory(int device_id = CURRENT_DEVICE_ID);\n MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size);\n virtual ~MixMemory();\n void* gpu(size_t size);\n void* cpu(size_t size);\n void release_gpu();\n void release_cpu();\n void release_all();\n\n inline bool owner_gpu() const{return owner_gpu_;}\n inline bool owner_cpu() const{return owner_cpu_;}\n\n inline size_t cpu_size() const{return cpu_size_;}\n inline size_t gpu_size() const{return gpu_size_;}\n inline int device_id() const{return device_id_;}\n\n inline void* gpu() const { return gpu_; }\n\n \/\/ Pinned Memory\n inline void* cpu() const { return cpu_; }\n\n void reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size);\n\n private:\n void* cpu_ = nullptr;\n size_t cpu_size_ = 0;\n bool owner_cpu_ = true;\n int device_id_ = 0;\n\n void* gpu_ = nullptr;\n size_t gpu_size_ = 0;\n bool owner_gpu_ = true;\n };\n\n MixMemory::MixMemory(int device_id){\n device_id_ = get_device(device_id);\n }\n\n MixMemory::MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){\n reference_data(cpu, cpu_size, gpu, gpu_size);\t\t\n }\n\n void MixMemory::reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){\n release_all();\n \n if(cpu == nullptr || cpu_size == 0){\n cpu = nullptr;\n cpu_size = 0;\n }\n\n if(gpu == nullptr || gpu_size == 0){\n gpu = nullptr;\n gpu_size = 0;\n }\n\n this->cpu_ = cpu;\n this->cpu_size_ = cpu_size;\n this->gpu_ = gpu;\n this->gpu_size_ = gpu_size;\n\n this->owner_cpu_ = !(cpu && cpu_size > 0);\n this->owner_gpu_ = !(gpu && gpu_size > 0);\n checkCudaRuntime(cudaGetDevice(&device_id_));\n }\n\n MixMemory::~MixMemory() {\n release_all();\n }\n\n void* MixMemory::gpu(size_t size) {\n\n if (gpu_size_ < size) {\n release_gpu();\n\n gpu_size_ = size;\n AutoDevice auto_device_exchange(device_id_);\n checkCudaRuntime(cudaMalloc(&gpu_, size));\n checkCudaRuntime(cudaMemset(gpu_, 0, size));\n }\n return gpu_;\n }\n\n void* MixMemory::cpu(size_t size) {\n\n if (cpu_size_ < size) {\n release_cpu();\n\n cpu_size_ = size;\n AutoDevice auto_device_exchange(device_id_);\n checkCudaRuntime(cudaMallocHost(&cpu_, size));\n Assert(cpu_ != nullptr);\n memset(cpu_, 0, size);\n }\n return cpu_;\n }\n\n void MixMemory::release_cpu() {\n if (cpu_) {\n if(owner_cpu_){\n AutoDevice auto_device_exchange(device_id_);\n checkCudaRuntime(cudaFreeHost(cpu_));\n }\n cpu_ = nullptr;\n }\n cpu_size_ = 0;\n }\n\n void MixMemory::release_gpu() {\n if (gpu_) {\n if(owner_gpu_){\n AutoDevice auto_device_exchange(device_id_);\n checkCudaRuntime(cudaFree(gpu_));\n }\n gpu_ = nullptr;\n }\n gpu_size_ = 0;\n }\n\n void MixMemory::release_all() {\n release_cpu();\n release_gpu();\n }\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/class Tensor\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n \/* Tensor\u7c7b\uff0c\u5b9e\u73b0\u5f20\u91cf\u7684\u7ba1\u7406\n \u7531\u4e8eNN\u591a\u7528\u5f20\u91cf\uff0c\u5fc5\u987b\u6709\u4e2a\u7c7b\u8fdb\u884c\u7ba1\u7406\u624d\u65b9\u4fbf\uff0c\u5b9e\u73b0\u5185\u5b58\u81ea\u52a8\u5206\u914d\uff0c\u8ba1\u7b97\u7d22\u5f15\u7b49\u7b49\n \u5982\u679c\u8981\u8c03\u8bd5\uff0c\u53ef\u4ee5\u6267\u884csave_to_file\uff0c\u50a8\u5b58\u4e3a\u6587\u4ef6\u540e\uff0c\u5728python\u4e2d\u52a0\u8f7d\u5e76\u67e5\u770b\n *\/\n enum class DataHead : int{\n Init = 0,\n Device = 1,\n Host = 2\n };\n\n class Tensor {\n public:\n Tensor(const Tensor& other) = delete;\n Tensor& operator = (const Tensor& other) = delete;\n\n explicit Tensor(std::shared_ptr data = nullptr, int device_id = CURRENT_DEVICE_ID);\n explicit Tensor(int n, int c, int h, int w, std::shared_ptr data = nullptr, int device_id = CURRENT_DEVICE_ID);\n explicit Tensor(int ndims, const int* dims, std::shared_ptr data = nullptr, int device_id = CURRENT_DEVICE_ID);\n explicit Tensor(const std::vector& dims, std::shared_ptr data = nullptr, int device_id = CURRENT_DEVICE_ID);\n virtual ~Tensor();\n\n int numel() const;\n inline int ndims() const{return shape_.size();}\n inline int size(int index) const{return shape_[index];}\n inline int shape(int index) const{return shape_[index];}\n\n inline int batch() const{return shape_[0];}\n inline int channel() const{return shape_[1];}\n inline int height() const{return shape_[2];}\n inline int width() const{return shape_[3];}\n\n inline const std::vector& dims() const { return shape_; }\n inline int bytes() const { return bytes_; }\n inline int bytes(int start_axis) const { return count(start_axis) * element_size(); }\n inline int element_size() const { return sizeof(float); }\n inline DataHead head() const { return head_; }\n\n std::shared_ptr clone() const;\n Tensor& release();\n Tensor& set_to(float value);\n bool empty() const;\n\n template\n int offset(int index, _Args ... index_args) const{\n const int index_array[] = {index, index_args...};\n return offset_array(sizeof...(index_args) + 1, index_array);\n }\n\n int offset_array(const std::vector& index) const;\n int offset_array(size_t size, const int* index_array) const;\n\n template\n Tensor& resize(int dim_size, _Args ... dim_size_args){\n const int dim_size_array[] = {dim_size, dim_size_args...};\n return resize(sizeof...(dim_size_args) + 1, dim_size_array);\n }\n\n Tensor& resize(int ndims, const int* dims);\n Tensor& resize(const std::vector& dims);\n Tensor& resize_single_dim(int idim, int size);\n int count(int start_axis = 0) const;\n int device() const{return device_id_;}\n\n Tensor& to_gpu(bool copy=true);\n Tensor& to_cpu(bool copy=true);\n\n inline void* cpu() const { ((Tensor*)this)->to_cpu(); return data_->cpu(); }\n inline void* gpu() const { ((Tensor*)this)->to_gpu(); return data_->gpu(); }\n \n template inline const DType* cpu() const { return (DType*)cpu(); }\n template inline DType* cpu() { return (DType*)cpu(); }\n\n template \n inline DType* cpu(int i, _Args&& ... args) { return cpu() + offset(i, args...); }\n\n\n template inline const DType* gpu() const { return (DType*)gpu(); }\n template inline DType* gpu() { return (DType*)gpu(); }\n\n template \n inline DType* gpu(int i, _Args&& ... args) { return gpu() + offset(i, args...); }\n\n template \n inline DType& at(int i, _Args&& ... args) { return *(cpu() + offset(i, args...)); }\n \n std::shared_ptr get_data() const {return data_;}\n std::shared_ptr get_workspace() const {return workspace_;}\n Tensor& set_workspace(std::shared_ptr workspace) {workspace_ = workspace; return *this;}\n\n cudaStream_t get_stream() const{return stream_;}\n Tensor& set_stream(cudaStream_t stream){stream_ = stream; return *this;}\n\n Tensor& set_mat (int n, const cv::Mat& image);\n Tensor& set_norm_mat(int n, const cv::Mat& image, float mean[3], float std[3]);\n cv::Mat at_mat(int n = 0, int c = 0) { return cv::Mat(height(), width(), CV_32F, cpu(n, c)); }\n\n Tensor& synchronize();\n const char* shape_string() const{return shape_string_;}\n const char* descriptor() const;\n\n Tensor& copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id = CURRENT_DEVICE_ID);\n\n \/**\n \n # \u4ee5\u4e0b\u4ee3\u7801\u662fpython\u4e2d\u52a0\u8f7dTensor\n import numpy as np\n\n def load_tensor(file):\n \n with open(file, \"rb\") as f:\n binary_data = f.read()\n\n magic_number, ndims, dtype = np.frombuffer(binary_data, np.uint32, count=3, offset=0)\n assert magic_number == 0xFCCFE2E2, f\"{file} not a tensor file.\"\n \n dims = np.frombuffer(binary_data, np.uint32, count=ndims, offset=3 * 4)\n\n if dtype == 0:\n np_dtype = np.float32\n elif dtype == 1:\n np_dtype = np.float16\n else:\n assert False, f\"Unsupport dtype = {dtype}, can not convert to numpy dtype\"\n \n return np.frombuffer(binary_data, np_dtype, offset=(ndims + 3) * 4).reshape(*dims)\n\n **\/\n bool save_to_file(const std::string& file) const;\n\n private:\n Tensor& compute_shape_string();\n Tensor& adajust_memory_by_update_dims_or_type();\n void setup_data(std::shared_ptr data);\n\n private:\n std::vector shape_;\n size_t bytes_ = 0;\n DataHead head_ = DataHead::Init;\n cudaStream_t stream_ = nullptr;\n int device_id_ = 0;\n char shape_string_[100];\n char descriptor_string_[100];\n std::shared_ptr data_;\n std::shared_ptr workspace_;\n };\n\n Tensor::Tensor(int n, int c, int h, int w, shared_ptr data, int device_id) {\n this->device_id_ = get_device(device_id);\n descriptor_string_[0] = 0;\n setup_data(data);\n resize(n, c, h, w);\n }\n\n Tensor::Tensor(const std::vector& dims, shared_ptr data, int device_id){\n this->device_id_ = get_device(device_id);\n descriptor_string_[0] = 0;\n setup_data(data);\n resize(dims);\n }\n\n Tensor::Tensor(int ndims, const int* dims, shared_ptr data, int device_id) {\n this->device_id_ = get_device(device_id);\n descriptor_string_[0] = 0;\n setup_data(data);\n resize(ndims, dims);\n }\n\n Tensor::Tensor(shared_ptr data, int device_id){\n shape_string_[0] = 0;\n descriptor_string_[0] = 0;\n this->device_id_ = get_device(device_id);\n setup_data(data);\n }\n\n Tensor::~Tensor() {\n release();\n }\n\n const char* Tensor::descriptor() const{\n \n char* descriptor_ptr = (char*)descriptor_string_;\n int device_id = device();\n snprintf(descriptor_ptr, sizeof(descriptor_string_), \n \"Tensor:%p, %s, CUDA:%d\", \n data_.get(),\n shape_string_, \n device_id\n );\n return descriptor_ptr;\n }\n\n Tensor& Tensor::compute_shape_string(){\n\n \/\/ clean string\n shape_string_[0] = 0;\n\n char* buffer = shape_string_;\n size_t buffer_size = sizeof(shape_string_);\n for(int i = 0; i < shape_.size(); ++i){\n\n int size = 0;\n if(i < shape_.size() - 1)\n size = snprintf(buffer, buffer_size, \"%d x \", shape_[i]);\n else\n size = snprintf(buffer, buffer_size, \"%d\", shape_[i]);\n\n buffer += size;\n buffer_size -= size;\n }\n return *this;\n }\n\n void Tensor::setup_data(shared_ptr data){\n \n data_ = data;\n if(data_ == nullptr){\n data_ = make_shared(device_id_);\n }else{\n device_id_ = data_->device_id();\n }\n\n head_ = DataHead::Init;\n if(data_->cpu()){\n head_ = DataHead::Host;\n }\n\n if(data_->gpu()){\n head_ = DataHead::Device;\n }\n }\n\n Tensor& Tensor::copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id){\n\n if(head_ == DataHead::Init)\n to_gpu(false);\n\n size_t offset_location = offset * element_size();\n if(offset_location >= bytes_){\n INFOE(\"Offset location[%lld] >= bytes_[%lld], out of range\", offset_location, bytes_);\n return *this;\n }\n\n size_t copyed_bytes = num_element * element_size();\n size_t remain_bytes = bytes_ - offset_location;\n if(copyed_bytes > remain_bytes){\n INFOE(\"Copyed bytes[%lld] > remain bytes[%lld], out of range\", copyed_bytes, remain_bytes);\n return *this;\n }\n \n if(head_ == DataHead::Device){\n int current_device_id = get_device(device_id);\n int gpu_device_id = device();\n if(current_device_id != gpu_device_id){\n checkCudaRuntime(cudaMemcpyPeerAsync(gpu() + offset_location, gpu_device_id, src, current_device_id, copyed_bytes, stream_));\n \/\/checkCudaRuntime(cudaMemcpyAsync(gpu() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToDevice, stream_));\n }\n else{\n checkCudaRuntime(cudaMemcpyAsync(gpu() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToDevice, stream_));\n }\n }else if(head_ == DataHead::Host){\n AutoDevice auto_device_exchange(this->device());\n checkCudaRuntime(cudaMemcpyAsync(cpu() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToHost, stream_));\n }else{\n INFOE(\"Unsupport head type %d\", head_);\n }\n return *this;\n }\n\n Tensor& Tensor::release() {\n data_->release_all();\n shape_.clear();\n bytes_ = 0;\n head_ = DataHead::Init;\n return *this;\n }\n\n bool Tensor::empty() const{\n return data_->cpu() == nullptr && data_->gpu() == nullptr;\n }\n\n int Tensor::count(int start_axis) const {\n\n if(start_axis >= 0 && start_axis < shape_.size()){\n int size = 1;\n for (int i = start_axis; i < shape_.size(); ++i) \n size *= shape_[i];\n return size;\n }else{\n return 0;\n }\n }\n\n Tensor& Tensor::resize(const std::vector& dims) {\n return resize(dims.size(), dims.data());\n }\n\n int Tensor::numel() const{\n int value = shape_.empty() ? 0 : 1;\n for(int i = 0; i < shape_.size(); ++i){\n value *= shape_[i];\n }\n return value;\n }\n\n Tensor& Tensor::resize_single_dim(int idim, int size){\n\n Assert(idim >= 0 && idim < shape_.size());\n\n auto new_shape = shape_;\n new_shape[idim] = size;\n return resize(new_shape);\n }\n\n Tensor& Tensor::resize(int ndims, const int* dims) {\n\n vector setup_dims(ndims);\n for(int i = 0; i < ndims; ++i){\n int dim = dims[i];\n if(dim == -1){\n Assert(ndims == shape_.size());\n dim = shape_[i];\n }\n setup_dims[i] = dim;\n }\n this->shape_ = setup_dims;\n this->adajust_memory_by_update_dims_or_type();\n this->compute_shape_string();\n return *this;\n }\n\n Tensor& Tensor::adajust_memory_by_update_dims_or_type(){\n \n int needed_size = this->numel() * element_size();\n if(needed_size > this->bytes_){\n head_ = DataHead::Init;\n }\n this->bytes_ = needed_size;\n return *this;\n }\n\n Tensor& Tensor::synchronize(){ \n AutoDevice auto_device_exchange(this->device());\n checkCudaRuntime(cudaStreamSynchronize(stream_));\n return *this;\n }\n\n Tensor& Tensor::to_gpu(bool copy) {\n\n if (head_ == DataHead::Device)\n return *this;\n\n head_ = DataHead::Device;\n data_->gpu(bytes_);\n\n if (copy && data_->cpu() != nullptr) {\n AutoDevice auto_device_exchange(this->device());\n checkCudaRuntime(cudaMemcpyAsync(data_->gpu(), data_->cpu(), bytes_, cudaMemcpyHostToDevice, stream_));\n }\n return *this;\n }\n \n Tensor& Tensor::to_cpu(bool copy) {\n\n if (head_ == DataHead::Host)\n return *this;\n\n head_ = DataHead::Host;\n data_->cpu(bytes_);\n\n if (copy && data_->gpu() != nullptr) {\n AutoDevice auto_device_exchange(this->device());\n checkCudaRuntime(cudaMemcpyAsync(data_->cpu(), data_->gpu(), bytes_, cudaMemcpyDeviceToHost, stream_));\n checkCudaRuntime(cudaStreamSynchronize(stream_));\n }\n return *this;\n }\n\n int Tensor::offset_array(size_t size, const int* index_array) const{\n\n Assert(size <= shape_.size());\n int value = 0;\n for(int i = 0; i < shape_.size(); ++i){\n\n if(i < size)\n value += index_array[i];\n\n if(i + 1 < shape_.size())\n value *= shape_[i+1];\n }\n return value;\n }\n\n int Tensor::offset_array(const std::vector& index_array) const{\n return offset_array(index_array.size(), index_array.data());\n }\n\n bool Tensor::save_to_file(const std::string& file) const{\n\n if(empty()) return false;\n\n FILE* f = fopen(file.c_str(), \"wb\");\n if(f == nullptr) return false;\n\n int ndims = this->ndims();\n int dtype_ = 0;\n unsigned int head[3] = {0xFCCFE2E2, ndims, static_cast(dtype_)};\n fwrite(head, 1, sizeof(head), f);\n fwrite(shape_.data(), 1, sizeof(shape_[0]) * shape_.size(), f);\n fwrite(cpu(), 1, bytes_, f);\n fclose(f);\n return true;\n }\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/class TRTInferImpl\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n class Logger : public ILogger {\n public:\n virtual void log(Severity severity, const char* msg) noexcept override {\n\n if (severity == Severity::kINTERNAL_ERROR) {\n INFOE(\"NVInfer INTERNAL_ERROR: %s\", msg);\n abort();\n }else if (severity == Severity::kERROR) {\n INFOE(\"NVInfer: %s\", msg);\n }\n else if (severity == Severity::kWARNING) {\n INFOW(\"NVInfer: %s\", msg);\n }\n else if (severity == Severity::kINFO) {\n INFOD(\"NVInfer: %s\", msg);\n }\n else {\n INFOD(\"%s\", msg);\n }\n }\n };\n static Logger gLogger;\n\n template\n static void destroy_nvidia_pointer(_T* ptr) {\n if (ptr) ptr->destroy();\n }\n\n class EngineContext {\n public:\n virtual ~EngineContext() { destroy(); }\n\n void set_stream(cudaStream_t stream){\n\n if(owner_stream_){\n if (stream_) {cudaStreamDestroy(stream_);}\n owner_stream_ = false;\n }\n stream_ = stream;\n }\n\n bool build_model(const void* pdata, size_t size) {\n destroy();\n\n if(pdata == nullptr || size == 0)\n return false;\n\n owner_stream_ = true;\n checkCudaRuntime(cudaStreamCreate(&stream_));\n if(stream_ == nullptr)\n return false;\n\n runtime_ = shared_ptr(createInferRuntime(gLogger), destroy_nvidia_pointer);\n if (runtime_ == nullptr)\n return false;\n\n engine_ = shared_ptr(runtime_->deserializeCudaEngine(pdata, size, nullptr), destroy_nvidia_pointer);\n if (engine_ == nullptr)\n return false;\n\n \/\/runtime_->setDLACore(0);\n context_ = shared_ptr(engine_->createExecutionContext(), destroy_nvidia_pointer);\n return context_ != nullptr;\n }\n\n private:\n void destroy() {\n context_.reset();\n engine_.reset();\n runtime_.reset();\n\n if(owner_stream_){\n if (stream_) {cudaStreamDestroy(stream_);}\n }\n stream_ = nullptr;\n }\n\n public:\n cudaStream_t stream_ = nullptr;\n bool owner_stream_ = false;\n shared_ptr context_;\n shared_ptr engine_;\n shared_ptr runtime_ = nullptr;\n };\n\n class TRTInferImpl{\n public:\n virtual ~TRTInferImpl();\n bool load(const std::string& file);\n bool load_from_memory(const void* pdata, size_t size);\n void destroy();\n void forward(bool sync);\n int get_max_batch_size();\n cudaStream_t get_stream();\n void set_stream(cudaStream_t stream);\n void synchronize();\n size_t get_device_memory_size();\n std::shared_ptr get_workspace();\n std::shared_ptr input(int index = 0);\n std::string get_input_name(int index = 0);\n std::shared_ptr output(int index = 0);\n std::string get_output_name(int index = 0);\n std::shared_ptr tensor(const std::string& name);\n bool is_output_name(const std::string& name);\n bool is_input_name(const std::string& name);\n void set_input (int index, std::shared_ptr tensor);\n void set_output(int index, std::shared_ptr tensor);\n std::shared_ptr> serial_engine();\n\n void print();\n\n int num_output();\n int num_input();\n int device();\n\n private:\n void build_engine_input_and_outputs_mapper();\n\n private:\n std::vector> inputs_;\n std::vector> outputs_;\n std::vector inputs_map_to_ordered_index_;\n std::vector outputs_map_to_ordered_index_;\n std::vector inputs_name_;\n std::vector outputs_name_;\n std::vector> orderdBlobs_;\n std::map blobsNameMapper_;\n std::shared_ptr context_;\n std::vector bindingsPtr_;\n std::shared_ptr workspace_;\n int device_ = 0;\n };\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n TRTInferImpl::~TRTInferImpl(){\n destroy();\n }\n\n void TRTInferImpl::destroy() {\n\n int old_device = 0;\n checkCudaRuntime(cudaGetDevice(&old_device));\n checkCudaRuntime(cudaSetDevice(device_));\n this->context_.reset();\n this->blobsNameMapper_.clear();\n this->outputs_.clear();\n this->inputs_.clear();\n this->inputs_name_.clear();\n this->outputs_name_.clear();\n checkCudaRuntime(cudaSetDevice(old_device));\n }\n\n void TRTInferImpl::print(){\n if(!context_){\n INFOW(\"Infer print, nullptr.\");\n return;\n }\n\n INFO(\"Infer %p detail\", this);\n INFO(\"\\tMax Batch Size: %d\", this->get_max_batch_size());\n INFO(\"\\tInputs: %d\", inputs_.size());\n for(int i = 0; i < inputs_.size(); ++i){\n auto& tensor = inputs_[i];\n auto& name = inputs_name_[i];\n INFO(\"\\t\\t%d.%s : shape {%s}\", i, name.c_str(), tensor->shape_string());\n }\n\n INFO(\"\\tOutputs: %d\", outputs_.size());\n for(int i = 0; i < outputs_.size(); ++i){\n auto& tensor = outputs_[i];\n auto& name = outputs_name_[i];\n INFO(\"\\t\\t%d.%s : shape {%s}\", i, name.c_str(), tensor->shape_string());\n }\n }\n\n std::shared_ptr> TRTInferImpl::serial_engine() {\n auto memory = this->context_->engine_->serialize();\n auto output = make_shared>((uint8_t*)memory->data(), (uint8_t*)memory->data()+memory->size());\n memory->destroy();\n return output;\n }\n\n bool TRTInferImpl::load_from_memory(const void* pdata, size_t size) {\n\n if (pdata == nullptr || size == 0)\n return false;\n\n context_.reset(new EngineContext());\n\n \/\/build model\n if (!context_->build_model(pdata, size)) {\n context_.reset();\n return false;\n }\n\n workspace_.reset(new MixMemory());\n cudaGetDevice(&device_);\n build_engine_input_and_outputs_mapper();\n return true;\n }\n\n static std::vector load_file(const string& file){\n\n ifstream in(file, ios::in | ios::binary);\n if (!in.is_open())\n return {};\n\n in.seekg(0, ios::end);\n size_t length = in.tellg();\n\n std::vector data;\n if (length > 0){\n in.seekg(0, ios::beg);\n data.resize(length);\n\n in.read((char*)&data[0], length);\n }\n in.close();\n return data;\n }\n\n bool TRTInferImpl::load(const std::string& file) {\n\n auto data = load_file(file);\n if (data.empty())\n return false;\n\n context_.reset(new EngineContext());\n\n \/\/build model\n if (!context_->build_model(data.data(), data.size())) {\n context_.reset();\n return false;\n }\n\n workspace_.reset(new MixMemory());\n cudaGetDevice(&device_);\n build_engine_input_and_outputs_mapper();\n return true;\n }\n\n size_t TRTInferImpl::get_device_memory_size() {\n EngineContext* context = (EngineContext*)this->context_.get();\n return context->context_->getEngine().getDeviceMemorySize();\n }\n\n void TRTInferImpl::build_engine_input_and_outputs_mapper() {\n \n EngineContext* context = (EngineContext*)this->context_.get();\n int nbBindings = context->engine_->getNbBindings();\n int max_batchsize = context->engine_->getMaxBatchSize();\n\n inputs_.clear();\n inputs_name_.clear();\n outputs_.clear();\n outputs_name_.clear();\n orderdBlobs_.clear();\n bindingsPtr_.clear();\n blobsNameMapper_.clear();\n for (int i = 0; i < nbBindings; ++i) {\n\n auto dims = context->engine_->getBindingDimensions(i);\n auto type = context->engine_->getBindingDataType(i);\n const char* bindingName = context->engine_->getBindingName(i);\n dims.d[0] = max_batchsize;\n auto newTensor = make_shared(dims.nbDims, dims.d);\n newTensor->set_stream(this->context_->stream_);\n newTensor->set_workspace(this->workspace_);\n if (context->engine_->bindingIsInput(i)) {\n \/\/if is input\n inputs_.push_back(newTensor);\n inputs_name_.push_back(bindingName);\n inputs_map_to_ordered_index_.push_back(orderdBlobs_.size());\n }\n else {\n \/\/if is output\n outputs_.push_back(newTensor);\n outputs_name_.push_back(bindingName);\n outputs_map_to_ordered_index_.push_back(orderdBlobs_.size());\n }\n blobsNameMapper_[bindingName] = i;\n orderdBlobs_.push_back(newTensor);\n }\n bindingsPtr_.resize(orderdBlobs_.size());\n }\n\n void TRTInferImpl::set_stream(cudaStream_t stream){\n this->context_->set_stream(stream);\n\n for(auto& t : orderdBlobs_)\n t->set_stream(stream);\n }\n\n cudaStream_t TRTInferImpl::get_stream() {\n return this->context_->stream_;\n }\n\n int TRTInferImpl::device() {\n return device_;\n }\n\n void TRTInferImpl::synchronize() {\n checkCudaRuntime(cudaStreamSynchronize(context_->stream_));\n }\n\n bool TRTInferImpl::is_output_name(const std::string& name){\n return std::find(outputs_name_.begin(), outputs_name_.end(), name) != outputs_name_.end();\n }\n\n bool TRTInferImpl::is_input_name(const std::string& name){\n return std::find(inputs_name_.begin(), inputs_name_.end(), name) != inputs_name_.end();\n }\n\n void TRTInferImpl::forward(bool sync) {\n\n EngineContext* context = (EngineContext*)context_.get();\n int inputBatchSize = inputs_[0]->size(0);\n for(int i = 0; i < context->engine_->getNbBindings(); ++i){\n auto dims = context->engine_->getBindingDimensions(i);\n auto type = context->engine_->getBindingDataType(i);\n dims.d[0] = inputBatchSize;\n if(context->engine_->bindingIsInput(i)){\n context->context_->setBindingDimensions(i, dims);\n }\n }\n\n for (int i = 0; i < outputs_.size(); ++i) {\n outputs_[i]->resize_single_dim(0, inputBatchSize);\n outputs_[i]->to_gpu(false);\n }\n\n for (int i = 0; i < orderdBlobs_.size(); ++i)\n bindingsPtr_[i] = orderdBlobs_[i]->gpu();\n\n void** bindingsptr = bindingsPtr_.data();\n \/\/bool execute_result = context->context_->enqueue(inputBatchSize, bindingsptr, context->stream_, nullptr);\n bool execute_result = context->context_->enqueueV2(bindingsptr, context->stream_, nullptr);\n if(!execute_result){\n auto code = cudaGetLastError();\n INFOF(\"execute fail, code %d[%s], message %s\", code, cudaGetErrorName(code), cudaGetErrorString(code));\n }\n\n if (sync) {\n synchronize();\n }\n }\n\n std::shared_ptr TRTInferImpl::get_workspace() {\n return workspace_;\n }\n\n int TRTInferImpl::num_input() {\n return this->inputs_.size();\n }\n\n int TRTInferImpl::num_output() {\n return this->outputs_.size();\n }\n\n void TRTInferImpl::set_input (int index, std::shared_ptr tensor){\n Assert(index >= 0 && index < inputs_.size());\n this->inputs_[index] = tensor;\n\n int order_index = inputs_map_to_ordered_index_[index];\n this->orderdBlobs_[order_index] = tensor;\n }\n\n void TRTInferImpl::set_output(int index, std::shared_ptr tensor){\n Assert(index >= 0 && index < outputs_.size());\n this->outputs_[index] = tensor;\n\n int order_index = outputs_map_to_ordered_index_[index];\n this->orderdBlobs_[order_index] = tensor;\n }\n\n std::shared_ptr TRTInferImpl::input(int index) {\n Assert(index >= 0 && index < inputs_name_.size());\n return this->inputs_[index];\n }\n\n std::string TRTInferImpl::get_input_name(int index){\n Assert(index >= 0 && index < inputs_name_.size());\n return inputs_name_[index];\n }\n\n std::shared_ptr TRTInferImpl::output(int index) {\n Assert(index >= 0 && index < outputs_.size());\n return outputs_[index];\n }\n\n std::string TRTInferImpl::get_output_name(int index){\n Assert(index >= 0 && index < outputs_name_.size());\n return outputs_name_[index];\n }\n\n int TRTInferImpl::get_max_batch_size() {\n Assert(this->context_ != nullptr);\n return this->context_->engine_->getMaxBatchSize();\n }\n\n std::shared_ptr TRTInferImpl::tensor(const std::string& name) {\n Assert(this->blobsNameMapper_.find(name) != this->blobsNameMapper_.end());\n return orderdBlobs_[blobsNameMapper_[name]];\n }\n\n std::shared_ptr load_infer(const string& file) {\n \n std::shared_ptr infer(new TRTInferImpl());\n if (!infer->load(file))\n infer.reset();\n return infer;\n }\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/class MonopolyAllocator\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n \/* \u72ec\u5360\u5206\u914d\u5668\n \u901a\u8fc7\u5bf9tensor\u505a\u72ec\u5360\u7ba1\u7406\uff0c\u5177\u6709max_batch * 2\u4e2atensor\uff0c\u901a\u8fc7query\u83b7\u53d6\u4e00\u4e2a\n \u5f53\u63a8\u7406\u7ed3\u675f\u540e\uff0c\u8be5tensor\u91ca\u653e\u4f7f\u7528\u6743\uff0c\u5373\u53ef\u4ea4\u7ed9\u4e0b\u4e00\u4e2a\u56fe\u50cf\u4f7f\u7528\uff0c\u5185\u5b58\u5b9e\u73b0\u590d\u7528\n *\/\n template\n class MonopolyAllocator{\n public:\n class MonopolyData{\n public:\n std::shared_ptr<_ItemType>& data(){ return data_; }\n void release(){manager_->release_one(this);}\n\n private:\n MonopolyData(MonopolyAllocator* pmanager){manager_ = pmanager;}\n\n private:\n friend class MonopolyAllocator;\n MonopolyAllocator* manager_ = nullptr;\n std::shared_ptr<_ItemType> data_;\n bool available_ = true;\n };\n typedef std::shared_ptr MonopolyDataPointer;\n\n MonopolyAllocator(int size){\n capacity_ = size;\n num_available_ = size;\n datas_.resize(size);\n\n for(int i = 0; i < size; ++i)\n datas_[i] = std::shared_ptr(new MonopolyData(this));\n }\n\n virtual ~MonopolyAllocator(){\n run_ = false;\n cv_.notify_all();\n \n std::unique_lock l(lock_);\n cv_exit_.wait(l, [&](){\n return num_wait_thread_ == 0;\n });\n }\n\n MonopolyDataPointer query(int timeout = 10000){\n\n std::unique_lock l(lock_);\n if(!run_) return nullptr;\n \n if(num_available_ == 0){\n num_wait_thread_++;\n\n auto state = cv_.wait_for(l, std::chrono::milliseconds(timeout), [&](){\n return num_available_ > 0 || !run_;\n });\n\n num_wait_thread_--;\n cv_exit_.notify_one();\n\n \/\/ timeout, no available, exit program\n if(!state || num_available_ == 0 || !run_)\n return nullptr;\n }\n\n auto item = std::find_if(datas_.begin(), datas_.end(), [](MonopolyDataPointer& item){return item->available_;});\n if(item == datas_.end())\n return nullptr;\n \n (*item)->available_ = false;\n num_available_--;\n return *item;\n }\n\n int num_available(){\n return num_available_;\n }\n\n int capacity(){\n return capacity_;\n }\n\n private:\n void release_one(MonopolyData* prq){\n std::unique_lock l(lock_);\n if(!prq->available_){\n prq->available_ = true;\n num_available_++;\n cv_.notify_one();\n }\n }\n\n private:\n std::mutex lock_;\n std::condition_variable cv_;\n std::condition_variable cv_exit_;\n std::vector datas_;\n int capacity_ = 0;\n volatile int num_available_ = 0;\n volatile int num_wait_thread_ = 0;\n volatile bool run_ = true;\n };\n\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/class ThreadSafedAsyncInfer\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n \/* \u5f02\u6b65\u7ebf\u7a0b\u5b89\u5168\u7684\u63a8\u7406\u5668\n \u901a\u8fc7\u5f02\u6b65\u7ebf\u7a0b\u542f\u52a8\uff0c\u4f7f\u5f97\u8c03\u7528\u65b9\u5141\u8bb8\u4efb\u610f\u7ebf\u7a0b\u8c03\u7528\u628a\u56fe\u50cf\u505a\u8f93\u5165\uff0c\u5e76\u901a\u8fc7future\u6765\u83b7\u53d6\u5f02\u6b65\u7ed3\u679c\n *\/\n template, class JobAdditional=int>\n class ThreadSafedAsyncInfer{\n public:\n struct Job{\n Input input;\n Output output;\n JobAdditional additional;\n MonopolyAllocator::MonopolyDataPointer mono_tensor;\n std::shared_ptr> pro;\n };\n\n virtual ~ThreadSafedAsyncInfer(){\n stop();\n }\n\n void stop(){\n run_ = false;\n cond_.notify_all();\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ cleanup jobs\n {\n std::unique_lock l(jobs_lock_);\n while(!jobs_.empty()){\n auto& item = jobs_.front();\n if(item.pro)\n item.pro->set_value(Output());\n jobs_.pop();\n }\n };\n\n if(worker_){\n worker_->join();\n worker_.reset();\n }\n }\n\n bool startup(const StartParam& param){\n run_ = true;\n\n std::promise pro;\n start_param_ = param;\n worker_ = std::make_shared(&ThreadSafedAsyncInfer::worker, this, std::ref(pro));\n return pro.get_future().get();\n }\n\n virtual std::shared_future commit(const Input& input){\n\n Job job;\n job.pro = std::make_shared>();\n if(!preprocess(job, input)){\n job.pro->set_value(Output());\n return job.pro->get_future();\n }\n \n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n {\n std::unique_lock l(jobs_lock_);\n jobs_.push(job);\n };\n cond_.notify_one();\n return job.pro->get_future();\n }\n\n virtual std::vector> commits(const std::vector& inputs){\n\n int batch_size = std::min((int)inputs.size(), this->tensor_allocator_->capacity());\n std::vector jobs(inputs.size());\n std::vector> results(inputs.size());\n\n int nepoch = (inputs.size() + batch_size - 1) \/ batch_size;\n for(int epoch = 0; epoch < nepoch; ++epoch){\n int begin = epoch * batch_size;\n int end = std::min((int)inputs.size(), begin + batch_size);\n\n for(int i = begin; i < end; ++i){\n Job& job = jobs[i];\n job.pro = std::make_shared>();\n if(!preprocess(job, inputs[i])){\n job.pro->set_value(Output());\n }\n results[i] = job.pro->get_future();\n }\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n {\n std::unique_lock l(jobs_lock_);\n for(int i = begin; i < end; ++i){\n jobs_.emplace(std::move(jobs[i]));\n };\n }\n cond_.notify_one();\n }\n return results;\n }\n\n protected:\n virtual void worker(std::promise& result) = 0;\n virtual bool preprocess(Job& job, const Input& input) = 0;\n \n virtual bool get_jobs_and_wait(std::vector& fetch_jobs, int max_size){\n\n std::unique_lock l(jobs_lock_);\n cond_.wait(l, [&](){\n return !run_ || !jobs_.empty();\n });\n\n if(!run_) return false;\n \n fetch_jobs.clear();\n for(int i = 0; i < max_size && !jobs_.empty(); ++i){\n fetch_jobs.emplace_back(std::move(jobs_.front()));\n jobs_.pop();\n }\n return true;\n }\n\n virtual bool get_job_and_wait(Job& fetch_job){\n\n std::unique_lock l(jobs_lock_);\n cond_.wait(l, [&](){\n return !run_ || !jobs_.empty();\n });\n\n if(!run_) return false;\n \n fetch_job = std::move(jobs_.front());\n jobs_.pop();\n return true;\n }\n\n protected:\n StartParam start_param_;\n std::atomic run_;\n std::mutex jobs_lock_;\n std::queue jobs_;\n std::shared_ptr worker_;\n std::condition_variable cond_;\n std::shared_ptr> tensor_allocator_;\n };\n\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/class YoloTRTInferImpl\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n \/* Yolo\u7684\u5177\u4f53\u5b9e\u73b0\n \u901a\u8fc7\u4e0a\u8ff0\u7c7b\u7684\u7279\u6027\uff0c\u5b9e\u73b0\u9884\u5904\u7406\u7684\u8ba1\u7b97\u91cd\u53e0\u3001\u5f02\u6b65\u57ae\u7ebf\u7a0b\u8c03\u7528\uff0c\u6700\u7ec8\u62fc\u63a5\u4e3a\u591a\u4e2a\u56fe\u4e3a\u4e00\u4e2abatch\u8fdb\u884c\u63a8\u7406\u3002\u6700\u5927\u5316\u7684\u5229\u7528\n \u663e\u5361\u6027\u80fd\uff0c\u5b9e\u73b0\u9ad8\u6027\u80fd\u9ad8\u53ef\u7528\u597d\u7528\u7684yolo\u63a8\u7406\n *\/\n const char* type_name(Type type){\n switch(type){\n case Type::V5: return \"YoloV5\";\n case Type::X: return \"YoloX\";\n default: return \"Unknow\";\n }\n }\n\n struct AffineMatrix{\n float i2d[6]; \/\/ image to dst(network), 2x3 matrix\n float d2i[6]; \/\/ dst to image, 2x3 matrix\n\n void compute(const cv::Size& from, const cv::Size& to){\n float scale_x = to.width \/ (float)from.width;\n float scale_y = to.height \/ (float)from.height;\n float scale = std::min(scale_x, scale_y);\n i2d[0] = scale; i2d[1] = 0; i2d[2] = -scale * from.width * 0.5 + to.width * 0.5 + scale * 0.5 - 0.5;\n i2d[3] = 0; i2d[4] = scale; i2d[5] = -scale * from.height * 0.5 + to.height * 0.5 + scale * 0.5 - 0.5;\n\n cv::Mat m2x3_i2d(2, 3, CV_32F, i2d);\n cv::Mat m2x3_d2i(2, 3, CV_32F, d2i);\n cv::invertAffineTransform(m2x3_i2d, m2x3_d2i);\n }\n\n cv::Mat i2d_mat(){\n return cv::Mat(2, 3, CV_32F, i2d);\n }\n };\n\n using ThreadSafedAsyncInferImpl = ThreadSafedAsyncInfer\n <\n cv::Mat, \/\/ input\n BoxArray, \/\/ output\n tuple, \/\/ start param\n AffineMatrix \/\/ additional\n >;\n class YoloTRTInferImpl : public Infer, public ThreadSafedAsyncInferImpl{\n public:\n\n \/** \u8981\u6c42\u5728TRTInferImpl\u91cc\u9762\u6267\u884cstop\uff0c\u800c\u4e0d\u662f\u5728\u57fa\u7c7b\u6267\u884cstop **\/\n virtual ~YoloTRTInferImpl(){\n stop();\n }\n\n virtual bool startup(const string& file, Type type, int gpuid, float confidence_threshold, float nms_threshold){\n\n if(type == Type::V5){\n normalize_ = Norm::alpha_beta(1 \/ 255.0f, 0.0f, ChannelType::SwapRB);\n }else if(type == Type::X){\n \/\/float mean[] = {0.485, 0.456, 0.406};\n \/\/float std[] = {0.229, 0.224, 0.225};\n \/\/normalize_ = Norm::mean_std(mean, std, 1\/255.0f, ChannelType::Invert);\n normalize_ = Norm::None();\n }else{\n INFOE(\"Unsupport type %d\", type);\n }\n \n confidence_threshold_ = confidence_threshold;\n nms_threshold_ = nms_threshold;\n return ThreadSafedAsyncInferImpl::startup(make_tuple(file, gpuid));\n }\n\n virtual void worker(promise& result) override{\n\n string file = get<0>(start_param_);\n int gpuid = get<1>(start_param_);\n\n set_device(gpuid);\n auto engine = load_infer(file);\n if(engine == nullptr){\n INFOE(\"Engine %s load failed\", file.c_str());\n result.set_value(false);\n return;\n }\n\n engine->print();\n\n const int MAX_IMAGE_BBOX = 1024;\n const int NUM_BOX_ELEMENT = 7; \/\/ left, top, right, bottom, confidence, class, keepflag\n Tensor affin_matrix_device;\n Tensor output_array_device;\n int max_batch_size = engine->get_max_batch_size();\n auto input = engine->tensor(\"images\");\n auto output = engine->tensor(\"output\");\n int num_classes = output->size(2) - 5;\n\n input_width_ = input->size(3);\n input_height_ = input->size(2);\n tensor_allocator_ = make_shared>(max_batch_size * 2);\n stream_ = engine->get_stream();\n gpu_ = gpuid;\n result.set_value(true);\n\n input->resize_single_dim(0, max_batch_size).to_gpu();\n affin_matrix_device.set_stream(stream_);\n\n \/\/ \u8fd9\u91cc8\u4e2a\u503c\u7684\u76ee\u7684\u662f\u4fdd\u8bc1 8 * sizeof(float) % 32 == 0\n affin_matrix_device.resize(max_batch_size, 8).to_gpu();\n\n \/\/ \u8fd9\u91cc\u7684 1 + MAX_IMAGE_BBOX\u7ed3\u6784\u662f\uff0ccounter + bboxes ...\n output_array_device.resize(max_batch_size, 1 + MAX_IMAGE_BBOX * NUM_BOX_ELEMENT).to_gpu();\n\n vector fetch_jobs;\n while(get_jobs_and_wait(fetch_jobs, max_batch_size)){\n\n int infer_batch_size = fetch_jobs.size();\n input->resize_single_dim(0, infer_batch_size);\n\n for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){\n auto& job = fetch_jobs[ibatch];\n auto& mono = job.mono_tensor->data();\n affin_matrix_device.copy_from_gpu(affin_matrix_device.offset(ibatch), mono->get_workspace()->gpu(), 6);\n input->copy_from_gpu(input->offset(ibatch), mono->gpu(), mono->count());\n job.mono_tensor->release();\n }\n\n engine->forward(false);\n output_array_device.to_gpu(false);\n for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){\n \n auto& job = fetch_jobs[ibatch];\n float* image_based_output = output->gpu(ibatch);\n float* output_array_ptr = output_array_device.gpu(ibatch);\n auto affine_matrix = affin_matrix_device.gpu(ibatch);\n checkCudaRuntime(cudaMemsetAsync(output_array_ptr, 0, sizeof(int), stream_));\n decode_kernel_invoker(image_based_output, output->size(1), num_classes, confidence_threshold_, nms_threshold_, affine_matrix, output_array_ptr, MAX_IMAGE_BBOX, stream_);\n }\n\n output_array_device.to_cpu();\n for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){\n float* parray = output_array_device.cpu(ibatch);\n int count = min(MAX_IMAGE_BBOX, (int)*parray);\n auto& job = fetch_jobs[ibatch];\n auto& image_based_boxes = job.output;\n for(int i = 0; i < count; ++i){\n float* pbox = parray + 1 + i * NUM_BOX_ELEMENT;\n int label = pbox[5];\n int keepflag = pbox[6];\n if(keepflag == 1){\n image_based_boxes.emplace_back(pbox[0], pbox[1], pbox[2], pbox[3], pbox[4], label);\n }\n }\n job.pro->set_value(image_based_boxes);\n }\n fetch_jobs.clear();\n }\n stream_ = nullptr;\n tensor_allocator_.reset();\n INFO(\"Engine destroy.\");\n }\n\n virtual bool preprocess(Job& job, const Mat& image) override{\n\n if(tensor_allocator_ == nullptr){\n INFOE(\"tensor_allocator_ is nullptr\");\n return false;\n }\n\n job.mono_tensor = tensor_allocator_->query();\n if(job.mono_tensor == nullptr){\n INFOE(\"Tensor allocator query failed.\");\n return false;\n }\n\n AutoDevice auto_device(gpu_);\n auto& tensor = job.mono_tensor->data();\n if(tensor == nullptr){\n \/\/ not init\n tensor = make_shared();\n tensor->set_workspace(make_shared());\n }\n\n Size input_size(input_width_, input_height_);\n job.additional.compute(image.size(), input_size);\n \n tensor->set_stream(stream_);\n tensor->resize(1, 3, input_height_, input_width_);\n\n size_t size_image = image.cols * image.rows * 3;\n size_t size_matrix = upbound(sizeof(job.additional.d2i), 32);\n auto workspace = tensor->get_workspace();\n uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image);\n float* affine_matrix_device = (float*)gpu_workspace;\n uint8_t* image_device = size_matrix + gpu_workspace;\n\n uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image);\n float* affine_matrix_host = (float*)cpu_workspace;\n uint8_t* image_host = size_matrix + cpu_workspace;\n\n \/\/checkCudaRuntime(cudaMemcpyAsync(image_host, image.data, size_image, cudaMemcpyHostToHost, stream_));\n \/\/ speed up\n memcpy(image_host, image.data, size_image);\n memcpy(affine_matrix_host, job.additional.d2i, sizeof(job.additional.d2i));\n checkCudaRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, stream_));\n checkCudaRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(job.additional.d2i), cudaMemcpyHostToDevice, stream_));\n\n warp_affine_bilinear_and_normalize_plane(\n image_device, image.cols * 3, image.cols, image.rows, \n tensor->gpu(), input_width_, input_height_, \n affine_matrix_device, 114, \n normalize_, stream_\n );\n return true;\n }\n\n virtual vector> commits(const vector& images) override{\n return ThreadSafedAsyncInferImpl::commits(images);\n }\n\n virtual std::shared_future commit(const Mat& image) override{\n return ThreadSafedAsyncInferImpl::commit(image);\n }\n\n private:\n int input_width_ = 0;\n int input_height_ = 0;\n int gpu_ = 0;\n float confidence_threshold_ = 0;\n float nms_threshold_ = 0;\n cudaStream_t stream_ = nullptr;\n Norm normalize_;\n };\n\n void image_to_tensor(const cv::Mat& image, shared_ptr& tensor, Type type, int ibatch){\n\n Norm normalize;\n if(type == Type::V5){\n normalize = Norm::alpha_beta(1 \/ 255.0f, 0.0f, ChannelType::SwapRB);\n }else if(type == Type::X){\n \/\/float mean[] = {0.485, 0.456, 0.406};\n \/\/float std[] = {0.229, 0.224, 0.225};\n \/\/normalize_ = CUDAKernel::Norm::mean_std(mean, std, 1\/255.0f, CUDAKernel::ChannelType::Invert);\n normalize = Norm::None();\n }else{\n INFOE(\"Unsupport type %d\", type);\n }\n \n Size input_size(tensor->size(3), tensor->size(2));\n AffineMatrix affine;\n affine.compute(image.size(), input_size);\n\n size_t size_image = image.cols * image.rows * 3;\n size_t size_matrix = upbound(sizeof(affine.d2i), 32);\n auto workspace = tensor->get_workspace();\n uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image);\n float* affine_matrix_device = (float*)gpu_workspace;\n uint8_t* image_device = size_matrix + gpu_workspace;\n\n uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image);\n float* affine_matrix_host = (float*)cpu_workspace;\n uint8_t* image_host = size_matrix + cpu_workspace;\n auto stream = tensor->get_stream();\n\n memcpy(image_host, image.data, size_image);\n memcpy(affine_matrix_host, affine.d2i, sizeof(affine.d2i));\n checkCudaRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, stream));\n checkCudaRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(affine.d2i), cudaMemcpyHostToDevice, stream));\n\n warp_affine_bilinear_and_normalize_plane(\n image_device, image.cols * 3, image.cols, image.rows, \n tensor->gpu(ibatch), input_size.width, input_size.height, \n affine_matrix_device, 114, \n normalize, stream\n );\n }\n\n shared_ptr create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold, float nms_threshold){\n shared_ptr instance(new YoloTRTInferImpl());\n if(!instance->startup(engine_file, type, gpuid, confidence_threshold, nms_threshold)){\n instance.reset();\n }\n return instance;\n }\n\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/Compile Model\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n const char* mode_string(Mode type) {\n switch (type) {\n case Mode::FP32:\n return \"FP32\";\n case Mode::FP16:\n return \"FP16\";\n case Mode::INT8:\n return \"INT8\";\n default:\n return \"UnknowCompileMode\";\n }\n }\n\n typedef std::function& files, std::shared_ptr& tensor)> Int8Process;\n\n class Int8EntropyCalibrator : public IInt8EntropyCalibrator2{\n public:\n Int8EntropyCalibrator(const vector& imagefiles, nvinfer1::Dims dims, const Int8Process& preprocess) {\n\n Assert(preprocess != nullptr);\n this->dims_ = dims;\n this->allimgs_ = imagefiles;\n this->preprocess_ = preprocess;\n this->fromCalibratorData_ = false;\n files_.resize(dims.d[0]);\n checkCudaRuntime(cudaStreamCreate(&stream_));\n }\n\n Int8EntropyCalibrator(const vector& entropyCalibratorData, nvinfer1::Dims dims, const Int8Process& preprocess) {\n Assert(preprocess != nullptr);\n\n this->dims_ = dims;\n this->entropyCalibratorData_ = entropyCalibratorData;\n this->preprocess_ = preprocess;\n this->fromCalibratorData_ = true;\n files_.resize(dims.d[0]);\n checkCudaRuntime(cudaStreamCreate(&stream_));\n }\n\n virtual ~Int8EntropyCalibrator(){\n checkCudaRuntime(cudaStreamDestroy(stream_));\n }\n\n int getBatchSize() const noexcept {\n return dims_.d[0];\n }\n\n bool next() {\n int batch_size = dims_.d[0];\n if (cursor_ + batch_size > allimgs_.size())\n return false;\n\n int old_cursor = cursor_;\n for(int i = 0; i < batch_size; ++i)\n files_[i] = allimgs_[cursor_++];\n\n if (!tensor_){\n tensor_.reset(new Tensor(dims_.nbDims, dims_.d));\n tensor_->set_stream(stream_);\n tensor_->set_workspace(make_shared());\n }\n\n preprocess_(old_cursor, allimgs_.size(), files_, tensor_);\n return true;\n }\n\n bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept {\n if (!next()) return false;\n bindings[0] = tensor_->gpu();\n return true;\n }\n\n const vector& getEntropyCalibratorData() {\n return entropyCalibratorData_;\n }\n\n const void* readCalibrationCache(size_t& length) noexcept {\n if (fromCalibratorData_) {\n length = this->entropyCalibratorData_.size();\n return this->entropyCalibratorData_.data();\n }\n\n length = 0;\n return nullptr;\n }\n\n virtual void writeCalibrationCache(const void* cache, size_t length) noexcept {\n entropyCalibratorData_.assign((uint8_t*)cache, (uint8_t*)cache + length);\n }\n\n private:\n Int8Process preprocess_;\n vector allimgs_;\n size_t batchCudaSize_ = 0;\n int cursor_ = 0;\n nvinfer1::Dims dims_;\n vector files_;\n shared_ptr tensor_;\n vector entropyCalibratorData_;\n bool fromCalibratorData_ = false;\n cudaStream_t stream_ = nullptr;\n };\n\n bool compile(\n Mode mode, Type type,\n unsigned int max_batch_size,\n const string& source_onnx,\n const string& saveto,\n size_t max_workspace_size,\n const std::string& int8_images_folder,\n const std::string& int8_entropy_calibrator_cache_file) {\n\n bool hasEntropyCalibrator = false;\n vector entropyCalibratorData;\n vector entropyCalibratorFiles;\n\n auto int8process = [=](int current, int count, const vector& files, shared_ptr& tensor){\n\n for(int i = 0; i < files.size(); ++i){\n\n auto& file = files[i];\n INFO(\"Int8 load %d \/ %d, %s\", current + i + 1, count, file.c_str());\n\n auto image = cv::imread(file);\n if(image.empty()){\n INFOE(\"Load image failed, %s\", file.c_str());\n continue;\n }\n image_to_tensor(image, tensor, type, i);\n }\n tensor->synchronize();\n };\n\n if (mode == Mode::INT8) {\n if (!int8_entropy_calibrator_cache_file.empty()) {\n if (exists(int8_entropy_calibrator_cache_file)) {\n entropyCalibratorData = load_file(int8_entropy_calibrator_cache_file);\n if (entropyCalibratorData.empty()) {\n INFOE(\"entropyCalibratorFile is set as: %s, but we read is empty.\", int8_entropy_calibrator_cache_file.c_str());\n return false;\n }\n hasEntropyCalibrator = true;\n }\n }\n \n if (hasEntropyCalibrator) {\n if (!int8_images_folder.empty()) {\n INFOW(\"int8_images_folder is ignore, when int8_entropy_calibrator_cache_file is set\");\n }\n }\n else {\n entropyCalibratorFiles = glob_image_files(int8_images_folder);\n if (entropyCalibratorFiles.empty()) {\n INFOE(\"Can not find any images(jpg\/png\/bmp\/jpeg\/tiff) from directory: %s\", int8_images_folder.c_str());\n return false;\n }\n\n if(entropyCalibratorFiles.size() < max_batch_size){\n INFOW(\"Too few images provided, %d[provided] < %d[max batch size], image copy will be performed\", entropyCalibratorFiles.size(), max_batch_size);\n\n int old_size = entropyCalibratorFiles.size();\n for(int i = old_size; i < max_batch_size; ++i)\n entropyCalibratorFiles.push_back(entropyCalibratorFiles[i % old_size]);\n }\n }\n }\n else {\n if (hasEntropyCalibrator) {\n INFOW(\"int8_entropy_calibrator_cache_file is ignore, when Mode is '%s'\", mode_string(mode));\n }\n }\n\n INFO(\"Compile %s %s.\", mode_string(mode), source_onnx.c_str());\n shared_ptr builder(createInferBuilder(gLogger), destroy_nvidia_pointer);\n if (builder == nullptr) {\n INFOE(\"Can not create builder.\");\n return false;\n }\n\n shared_ptr config(builder->createBuilderConfig(), destroy_nvidia_pointer);\n if (mode == Mode::FP16) {\n if (!builder->platformHasFastFp16()) {\n INFOW(\"Platform not have fast fp16 support\");\n }\n config->setFlag(BuilderFlag::kFP16);\n }\n else if (mode == Mode::INT8) {\n if (!builder->platformHasFastInt8()) {\n INFOW(\"Platform not have fast int8 support\");\n }\n config->setFlag(BuilderFlag::kINT8);\n }\n\n shared_ptr network;\n shared_ptr onnxParser;\n const auto explicitBatch = 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);\n network = shared_ptr(builder->createNetworkV2(explicitBatch), destroy_nvidia_pointer);\n\n \/\/from onnx is not markOutput\n onnxParser.reset(nvonnxparser::createParser(*network, gLogger), destroy_nvidia_pointer);\n if (onnxParser == nullptr) {\n INFOE(\"Can not create parser.\");\n return false;\n }\n\n if (!onnxParser->parseFromFile(source_onnx.c_str(), 1)) {\n INFOE(\"Can not parse OnnX file: %s\", source_onnx.c_str());\n return false;\n }\n \n auto inputTensor = network->getInput(0);\n auto inputDims = inputTensor->getDimensions();\n\n shared_ptr int8Calibrator;\n if (mode == Mode::INT8) {\n auto calibratorDims = inputDims;\n calibratorDims.d[0] = max_batch_size;\n\n if (hasEntropyCalibrator) {\n INFO(\"Using exist entropy calibrator data[%d bytes]: %s\", entropyCalibratorData.size(), int8_entropy_calibrator_cache_file.c_str());\n int8Calibrator.reset(new Int8EntropyCalibrator(\n entropyCalibratorData, calibratorDims, int8process\n ));\n }\n else {\n INFO(\"Using image list[%d files]: %s\", entropyCalibratorFiles.size(), int8_images_folder.c_str());\n int8Calibrator.reset(new Int8EntropyCalibrator(\n entropyCalibratorFiles, calibratorDims, int8process\n ));\n }\n config->setInt8Calibrator(int8Calibrator.get());\n }\n\n INFO(\"Input shape is %s\", join_dims(vector(inputDims.d, inputDims.d + inputDims.nbDims)).c_str());\n INFO(\"Set max batch size = %d\", max_batch_size);\n INFO(\"Set max workspace size = %.2f MB\", max_workspace_size \/ 1024.0f \/ 1024.0f);\n\n int net_num_input = network->getNbInputs();\n INFO(\"Network has %d inputs:\", net_num_input);\n vector input_names(net_num_input);\n for(int i = 0; i < net_num_input; ++i){\n auto tensor = network->getInput(i);\n auto dims = tensor->getDimensions();\n auto dims_str = join_dims(vector(dims.d, dims.d+dims.nbDims));\n INFO(\" %d.[%s] shape is %s\", i, tensor->getName(), dims_str.c_str());\n\n input_names[i] = tensor->getName();\n }\n\n int net_num_output = network->getNbOutputs();\n INFO(\"Network has %d outputs:\", net_num_output);\n for(int i = 0; i < net_num_output; ++i){\n auto tensor = network->getOutput(i);\n auto dims = tensor->getDimensions();\n auto dims_str = join_dims(vector(dims.d, dims.d+dims.nbDims));\n INFO(\" %d.[%s] shape is %s\", i, tensor->getName(), dims_str.c_str());\n }\n\n int net_num_layers = network->getNbLayers();\n INFO(\"Network has %d layers\", net_num_layers);\t\t\n builder->setMaxBatchSize(max_batch_size);\n config->setMaxWorkspaceSize(max_workspace_size);\n\n auto profile = builder->createOptimizationProfile();\n for(int i = 0; i < net_num_input; ++i){\n auto input = network->getInput(i);\n auto input_dims = input->getDimensions();\n input_dims.d[0] = 1;\n profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);\n profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);\n input_dims.d[0] = max_batch_size;\n profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);\n }\n config->addOptimizationProfile(profile);\n\n INFO(\"Building engine...\");\n auto time_start = chrono::duration_cast(chrono::system_clock::now().time_since_epoch()).count();\n shared_ptr engine(builder->buildEngineWithConfig(*network, *config), destroy_nvidia_pointer);\n if (engine == nullptr) {\n INFOE(\"engine is nullptr\");\n return false;\n }\n\n if (mode == Mode::INT8) {\n if (!hasEntropyCalibrator) {\n if (!int8_entropy_calibrator_cache_file.empty()) {\n INFO(\"Save calibrator to: %s\", int8_entropy_calibrator_cache_file.c_str());\n save_file(int8_entropy_calibrator_cache_file, int8Calibrator->getEntropyCalibratorData());\n }\n else {\n INFO(\"No set entropyCalibratorFile, and entropyCalibrator will not save.\");\n }\n }\n }\n\n auto time_end = chrono::duration_cast(chrono::system_clock::now().time_since_epoch()).count();\n INFO(\"Build done %lld ms !\", time_end - time_start);\n \n \/\/ serialize the engine, then close everything down\n shared_ptr seridata(engine->serialize(), destroy_nvidia_pointer);\n return save_file(saveto, seridata->data(), seridata->size());\n }\n};","avg_line_length":35.5971039182,"max_line_length":218,"alphanum_fraction":0.5511354119} {"size":5078,"ext":"cu","lang":"Cuda","max_stars_count":16.0,"content":"\/*\n * MIT License\n *\n * Copyright (c) 2021 CSCS, ETH Zurich\n * 2021 University of Basel\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n *\/\n\n\/*! @file\n * @brief Density i-loop GPU driver\n *\n * @author Sebastian Keller \n *\/\n\n#include \"sph\/sph.cuh\"\n#include \"sph\/particles_data.hpp\"\n#include \"sph\/util\/cuda_utils.cuh\"\n#include \"sph\/hydro_ve\/ve_def_gradh_kern.hpp\"\n\n#include \"cstone\/cuda\/findneighbors.cuh\"\n\nnamespace sph\n{\nnamespace cuda\n{\n\ntemplate\n__global__ void veDefGradhGpu(T sincIndex, T K, int ngmax, const cstone::Box box, size_t first, size_t last,\n size_t numParticles, const KeyType* particleKeys, const T* x, const T* y, const T* z,\n const T* h, const T* m, const T* wh, const T* whd, const T* xm, T* kx, T* gradh)\n{\n unsigned tid = blockDim.x * blockIdx.x + threadIdx.x;\n unsigned i = tid + first;\n\n if (i >= last) return;\n\n \/\/ need to hard-code ngmax stack allocation for now\n assert(ngmax <= NGMAX && \"ngmax too big, please increase NGMAX to desired size\");\n int neighbors[NGMAX];\n int neighborsCount;\n\n \/\/ starting from CUDA 11.3, dynamic stack allocation is available with the following command\n \/\/ int* neighbors = (int*)alloca(ngmax * sizeof(int));\n\n cstone::findNeighbors(\n i, x, y, z, h, box, cstone::sfcKindPointer(particleKeys), neighbors, &neighborsCount, numParticles, ngmax);\n\n auto [kxi, gradhi] = veDefGradhJLoop(i, sincIndex, K, box, neighbors, neighborsCount, x, y, z, h, m, wh, whd, xm);\n\n kx[i] = kxi;\n gradh[i] = gradhi;\n}\n\ntemplate\nvoid computeVeDefGradh(size_t startIndex, size_t endIndex, size_t ngmax, Dataset& d,\n const cstone::Box& box)\n{\n using T = typename Dataset::RealType;\n\n \/\/ number of locally present particles, including halos\n size_t sizeWithHalos = d.x.size();\n size_t numParticlesCompute = endIndex - startIndex;\n\n unsigned numThreads = 128;\n unsigned numBlocks = (numParticlesCompute + numThreads - 1) \/ numThreads;\n\n veDefGradhGpu<<>>(d.sincIndex,\n d.K,\n ngmax,\n box,\n startIndex,\n endIndex,\n sizeWithHalos,\n rawPtr(d.devData.codes),\n rawPtr(d.devData.x),\n rawPtr(d.devData.y),\n rawPtr(d.devData.z),\n rawPtr(d.devData.h),\n rawPtr(d.devData.m),\n rawPtr(d.devData.wh),\n rawPtr(d.devData.whd),\n rawPtr(d.devData.xm),\n rawPtr(d.devData.kx),\n rawPtr(d.devData.gradh));\n\n CHECK_CUDA_ERR(cudaGetLastError());\n}\n\ntemplate void computeVeDefGradh(size_t, size_t, size_t, sphexa::ParticlesData& d,\n const cstone::Box&);\ntemplate void computeVeDefGradh(size_t, size_t, size_t, sphexa::ParticlesData& d,\n const cstone::Box&);\ntemplate void computeVeDefGradh(size_t, size_t, size_t, sphexa::ParticlesData& d,\n const cstone::Box&);\ntemplate void computeVeDefGradh(size_t, size_t, size_t, sphexa::ParticlesData& d,\n const cstone::Box&);\n\n} \/\/ namespace cuda\n} \/\/ namespace sph\n","avg_line_length":43.4017094017,"max_line_length":118,"alphanum_fraction":0.5850728633} {"size":816,"ext":"cu","lang":"Cuda","max_stars_count":3.0,"content":"\/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nYou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License. *\/\n\n#define EIGEN_USE_GPU\n#include \"paddle\/operators\/proximal_adagrad_op.h\"\n\nnamespace ops = paddle::operators;\nREGISTER_OP_GPU_KERNEL(\n proximal_adagrad,\n ops::ProximalAdagradOpKernel);\n","avg_line_length":38.8571428571,"max_line_length":79,"alphanum_fraction":0.7892156863} {"size":25178,"ext":"cu","lang":"Cuda","max_stars_count":7.0,"content":"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at {\nnamespace native {\nnamespace {\n\n__device__ inline int min(int a, int b) {\n return a <= b ? a : b;\n}\n\n#define CUDA_MAX_THREADS 1024 \/\/ this is safe, in reality 256 is our limit\n\n#define BLOCK_STRIDE 2 \/\/ increasing block_stride to lower # of blocks launched\n\nstatic __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) {\n return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) \/ stride + 1;\n}\n\nstatic __device__ inline int p_end(int size, int pad, int pooled_size, int stride) {\n return min((size + pad) \/ stride + 1, pooled_size);\n}\n\n\/\/ kernels borrowed from Caffe\ntemplate \n__global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data,\n const int num, const int channels, const int height,\n const int width, const int pooled_height, const int pooled_width,\n const int kernel_h, const int kernel_w, const int stride_h,\n const int stride_w, const int pad_h, const int pad_w,\n const int dilation_h, const int dilation_w, scalar_t* top_data,\n int64_t* top_mask) {\n CUDA_KERNEL_LOOP(index, nthreads) {\n int pw = index % pooled_width;\n int ph = (index \/ pooled_width) % pooled_height;\n int c = (index \/ pooled_width \/ pooled_height) % channels;\n int n = index \/ pooled_width \/ pooled_height \/ channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height);\n int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width);\n while(hstart < 0)\n hstart += dilation_h;\n while(wstart < 0)\n wstart += dilation_w;\n accscalar_t maxval = at::numeric_limits::lower_bound(); \/\/ -Infinity\n int maxidx = hstart * width + wstart;\n const scalar_t* btm_data = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += dilation_h) {\n for (int w = wstart; w < wend; w += dilation_w) {\n scalar_t val = btm_data[h * width + w];\n if ((ScalarConvert::to(val) > maxval) || THCNumerics::isnan(val)) {\n maxidx = h * width + w;\n maxval = ScalarConvert::to(val);\n }\n }\n }\n top_data[index] = ScalarConvert::to(maxval);\n top_mask[index] = maxidx;\n }\n}\n\ntemplate \nC10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)\n__global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int nbatch,\n const int channels, const int height,\n const int width, const int pooled_height, const int pooled_width,\n const int kernel_h, const int kernel_w, const int stride_h,\n const int stride_w, const int pad_h, const int pad_w,\n const int dilation_h, const int dilation_w,\n const int in_stride_n, const int in_stride_c,\n const int in_stride_h, const int in_stride_w,\n const int kernel_stride_C, const int kernel_size_C,\n scalar_t* top_data, int64_t* top_mask) {\n extern __shared__ int smem[];\n int *out_mask_cached = smem;\n scalar_t *out_cached = reinterpret_cast(&out_mask_cached[kernel_size_C*blockDim.x*blockDim.y*blockDim.z]);\n\n \/\/ flattening cta for pre-computation & smem initialization;\n int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);\n int block_size = blockDim.x * blockDim.y * blockDim.z;\n\n \/\/ use shared memory to store temporary output value. This is simply to\n \/\/ reduce register usage.\n for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {\n out_cached[i] = at::numeric_limits::lower_bound();\n out_mask_cached[i] = 0;\n }\n\n __syncthreads();\n\n int batch_id = blockIdx.x % nbatch;\n int channel_id = blockIdx.x \/ nbatch;\n int channel_offset = threadIdx.x + channel_id * blockDim.x;\n\n top_data = top_data + batch_id * pooled_height * pooled_width * channels;\n top_mask = top_mask + batch_id * pooled_height * pooled_width * channels;\n bottom_data = bottom_data + batch_id * in_stride_n;\n\n out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];\n out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];\n\n int oH = (pooled_height + gridDim.z-1) \/ gridDim.z;\n int oW = (pooled_width + gridDim.y-1) \/ gridDim.y;\n int ostartH = threadIdx.z + blockIdx.z*oH;\n int oendH = ::min(ostartH+oH, pooled_height);\n int ostartW = threadIdx.y + blockIdx.y*oW;\n int oendW = ::min(ostartW+oW, pooled_width);\n\n for (int oh = ostartH; oh < oendH; oh+=blockDim.z) {\n int hstart = oh * stride_h - pad_h;\n int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height);\n for (int ow = ostartW; ow < oendW; ow+=blockDim.y) {\n int wstart = ow * stride_w - pad_w;\n int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width);\n while(hstart < 0)\n hstart += dilation_h;\n while(wstart < 0)\n wstart += dilation_w;\n for (int ih = hstart; ih < hend; ih++) {\n for (int iw = wstart; iw < wend; iw++) {\n int cached_index = threadIdx.x;\n const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w;\n for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) {\n scalar_t val = ptr_input[c*in_stride_c];\n if ((scalar_cast(val) > out_cached[cached_index]) || THCNumerics::isnan(val)) {\n out_cached[cached_index] = scalar_cast(val);\n out_mask_cached[cached_index] = ih * width + iw;\n }\n cached_index += blockDim.x;\n }\n }\n }\n scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels;\n int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels;\n\n int cached_index = threadIdx.x;\n for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) {\n ptr_output_data[c] = out_cached[cached_index];\n ptr_output_mask[c] = out_mask_cached[cached_index];\n out_cached[cached_index] = at::numeric_limits::lower_bound();\n out_mask_cached[cached_index] = 0;\n cached_index += blockDim.x;\n }\n }\n }\n}\n\n\nstatic const int BLOCK_THREADS = 256;\n\ntemplate \n#if defined (__HIP_PLATFORM_HCC__)\nC10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4)\n#else\nC10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8)\n#endif\n__global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff,\n const int64_t* top_mask, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, const int pad_h, const int pad_w,\n const int dilation_h, const int dilation_w,\n scalar_t* bottom_diff) {\n CUDA_KERNEL_LOOP(index, height*width) {\n int h = index \/ width;\n int w = index - h * width;\n int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h);\n int phend = p_end(h, pad_h, pooled_height, stride_h);\n int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w);\n int pwend = p_end(w, pad_w, pooled_width, stride_w);\n for (int n = blockIdx.y; n < num; n += gridDim.y) {\n for (int c = blockIdx.z; c < channels; c+= gridDim.z) {\n accscalar_t gradient = accscalar_t(0);\n int offset = (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask[ph * pooled_width + pw + offset] == h * width + w) {\n gradient += ScalarConvert::to(top_diff[ph * pooled_width + pw + offset]);\n }\n }\n }\n bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert::to(gradient);\n }\n }\n }\n}\n\ntemplate \nC10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)\n__global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff,\n const int64_t* top_mask, const int nbatch, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, const int pad_h, const int pad_w,\n const int dilation_h, const int dilation_w,\n const int out_stride_c, const int out_stride_h, const int out_stride_w,\n const int in_stride_n, const int in_stride_c,\n const int in_stride_h, const int in_stride_w,\n const int kernel_stride_C, const int kernel_size_C,\n scalar_t* bottom_diff) {\n extern __shared__ int smem[];\n accscalar_t *out_cached = reinterpret_cast(smem);\n\n int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);\n int block_size = blockDim.x * blockDim.y * blockDim.z;\n\n int batch_id = blockIdx.x % nbatch;\n int channel_id = blockIdx.x \/ nbatch;\n int channel_offset = threadIdx.x + channel_id * blockDim.x;\n\n for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {\n out_cached[i] = accscalar_t(0.0);\n }\n\n __syncthreads();\n\n out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];\n\n bottom_diff = bottom_diff + batch_id * height * width * channels;\n top_mask = top_mask + batch_id * pooled_height * pooled_width * channels;\n top_diff = top_diff + batch_id * pooled_height * pooled_width * channels;\n\n int iH = (height + gridDim.z-1) \/ gridDim.z;\n int iW = (width + gridDim.y-1) \/ gridDim.y;\n int istartH = threadIdx.z + blockIdx.z*iH;\n int iendH = ::min(istartH+iH, height);\n int istartW = threadIdx.y + blockIdx.y*iW;\n int iendW = ::min(istartW+iW, width);\n\n for (int ih = istartH; ih < iendH; ih+=blockDim.z) {\n int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h);\n int phend = p_end(ih, pad_h, pooled_height, stride_h);\n for (int iw = istartW; iw < iendW; iw+=blockDim.y) {\n int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w);\n int pwend = p_end(iw, pad_w, pooled_width, stride_w);\n int index_shift = ih * width + iw;\n if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) {\n for(int oh = phstart; oh < phend; ++oh) {\n for(int ow = pwstart; ow < pwend; ++ow) {\n int cached_index = threadIdx.x;\n const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w;\n for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {\n if (ptr_top_mask[c*out_stride_c] == index_shift) {\n out_cached[cached_index] +=\n scalar_cast(top_diff[oh * out_stride_h + ow * out_stride_w + c*out_stride_c]);\n }\n cached_index += blockDim.x;\n }\n }\n }\n scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels;\n int cached_index = threadIdx.x;\n for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {\n ptr_bottom_diff[c] = scalar_cast(out_cached[cached_index]);\n out_cached[cached_index] = accscalar_t(0.0);\n cached_index += blockDim.x;\n }\n } else {\n const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w;\n scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels;\n int cached_index = threadIdx.x;\n for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {\n if (ptr_top_mask[c*out_stride_c] == index_shift) {\n ptr_bottom_diff[c] =\n scalar_cast(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c*out_stride_c]);\n }\n cached_index += blockDim.x;\n }\n }\n }\n }\n}\n\n} \/\/ namespace\n\nTORCH_IMPL_FUNC(max_pool2d_with_indices_out_cuda)\n(const Tensor& input_,\nIntArrayRef kernel_size,\nIntArrayRef stride,\nIntArrayRef padding,\nIntArrayRef dilation,\nbool ceil_mode,\nconst Tensor& output,\nconst Tensor& indices) {\n NoNamesGuard guard;\n\n TensorArg output_arg{ output, \"output\", 1 };\n TensorArg indices_arg{ indices, \"indices\", 2 };\n TensorArg input_arg{ input_, \"input_\", 3 };\n\n checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg});\n\n const int kH = safe_downcast(kernel_size[0]);\n const int kW = kernel_size.size() == 1 ? kH : safe_downcast(kernel_size[1]);\n\n const int dH = stride.empty() ? kH : safe_downcast(stride[0]);\n const int dW = stride.empty() ? kW :\n stride.size() == 1 ? dH : safe_downcast(stride[1]);\n\n const int padH = safe_downcast(padding[0]);\n const int padW = padding.size() == 1 ? padH : safe_downcast(padding[1]);\n\n const int dilationH = safe_downcast(dilation[0]);\n const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast(dilation[1]);\n\n const auto memory_format = input_.suggest_memory_format();\n\n const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;\n const int64_t nInputPlane = input_.size(-3);\n const int64_t inputHeight = input_.size(-2);\n const int64_t inputWidth = input_.size(-1);\n\n const int64_t outputHeight = output.size(-2);\n const int64_t outputWidth = output.size(-1);\n\n Tensor input = input_.contiguous(memory_format);\n\n const int64_t in_stride_n = input_.ndimension() == 4 ? input.stride(-4) : 0;\n const int64_t in_stride_c = input.stride(-3);\n const int64_t in_stride_h = input.stride(-2);\n const int64_t in_stride_w = input.stride(-1);\n\n const int count = safe_downcast(output.numel());\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),\n \"max_pool2d_with_indices_out_cuda_frame\",\n [&] {\n using accscalar_t = acc_type;\n\n scalar_t *output_data = output.data_ptr();\n scalar_t *input_data = input.data_ptr();\n int64_t *indices_data = indices.data_ptr();\n\n switch (memory_format) {\n case MemoryFormat::ChannelsLast: {\n const int max_threads = std::min(\n at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);\n int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;\n int block_x = std::min(\n maxThreadsDim[0], std::min(lastPow2(nInputPlane), at::cuda::warp_size()));\n int block_y = std::min(\n maxThreadsDim[1], std::min(lastPow2(outputWidth), max_threads \/ block_x));\n int block_z = std::min(\n maxThreadsDim[2], std::min(lastPow2(outputHeight), max_threads \/ block_x \/ block_y));\n block_x = std::min(\n maxThreadsDim[0], std::min(lastPow2(nInputPlane), max_threads \/ block_y \/ block_z));\n const dim3 block(block_x, block_y, block_z);\n\n int kernel_stride_C = cuda::ATenCeilDiv(\n safe_downcast(nInputPlane), block_x * 4);\n int kernel_size_C = cuda::ATenCeilDiv(\n safe_downcast(nInputPlane), block_x * kernel_stride_C);\n\n int grid_x = nbatch*kernel_stride_C;\n int grid_y = std::min(\n at::cuda::getCurrentDeviceProperties()->maxGridSize[1],\n cuda::ATenCeilDiv(safe_downcast(outputWidth), block_y*BLOCK_STRIDE));\n int grid_z = std::min(\n at::cuda::getCurrentDeviceProperties()->maxGridSize[2],\n cuda::ATenCeilDiv(safe_downcast(outputHeight), block_z*BLOCK_STRIDE));\n const dim3 grid(grid_x, grid_y, grid_z);\n\n size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * (sizeof(int) + sizeof(scalar_t));\n AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock);\n\n max_pool_forward_nhwc\n <<>>(\n input_data, nbatch,\n nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,\n kH, kW, dH, dW, padH, padW, dilationH, dilationW,\n in_stride_n, in_stride_c,\n in_stride_h, in_stride_w,\n kernel_stride_C, kernel_size_C,\n output_data, indices_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n break;\n }\n case MemoryFormat::Contiguous: {\n const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,\n BLOCK_THREADS);\n max_pool_forward_nchw\n <<>>(\n count, input_data,\n nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,\n kH, kW, dH, dW, padH, padW, dilationH, dilationW,\n output_data, indices_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n break;\n }\n default: TORCH_CHECK(false, \"Unsupported memory format. Supports only ChannelsLast, Contiguous\");\n }\n }\n );\n}\n\nTORCH_IMPL_FUNC(max_pool2d_with_indices_backward_out_cuda)\n(const Tensor& gradOutput_,\nconst Tensor& input_,\nIntArrayRef kernel_size,\nIntArrayRef stride,\nIntArrayRef padding,\nIntArrayRef dilation,\nbool ceil_mode,\nconst Tensor& indices,\nconst Tensor& gradInput) {\n NoNamesGuard guard;\n\n TensorArg gradInput_arg{ gradInput, \"gradInput\", 1 };\n TensorArg gradOutput_arg{ gradOutput_, \"gradOutput_\", 2 };\n TensorArg input_arg{ input_, \"input_\", 3 };\n TensorArg indices_arg{ indices, \"indices\", 4 };\n\n checkAllSameGPU(__func__,\n {gradInput_arg, gradOutput_arg, input_arg, indices_arg});\n\n const int kH = safe_downcast(kernel_size[0]);\n const int kW = kernel_size.size() == 1 ? kH : safe_downcast(kernel_size[1]);\n\n const int dH = stride.empty() ? kH : safe_downcast(stride[0]);\n const int dW = stride.empty() ? kW :\n stride.size() == 1 ? dH : safe_downcast(stride[1]);\n\n const int padH = safe_downcast(padding[0]);\n const int padW = padding.size() == 1 ? padH : safe_downcast(padding[1]);\n\n const int dilationH = safe_downcast(dilation[0]);\n const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast(dilation[1]);\n\n const auto memory_format = input_.suggest_memory_format();\n\n const Tensor input = input_.contiguous(memory_format);\n\n const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;\n const int64_t nInputPlane = input.size(-3);\n const int64_t inputHeight = input.size(-2);\n const int64_t inputWidth = input.size(-1);\n\n const int64_t in_stride_n = input.ndimension() == 4 ? input.stride(-4) : 0;\n const int64_t in_stride_c = input.stride(-3);\n const int64_t in_stride_h = input.stride(-2);\n const int64_t in_stride_w = input.stride(-1);\n\n const Tensor gradOutput = gradOutput_.contiguous(memory_format);\n\n const int64_t outputHeight = gradOutput.size(-2);\n const int64_t outputWidth = gradOutput.size(-1);\n\n const int64_t out_stride_c = gradOutput.stride(-3);\n const int64_t out_stride_h = gradOutput.stride(-2);\n const int64_t out_stride_w = gradOutput.stride(-1);\n\n gradInput.zero_();\n\n int64_t count = input.numel();\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),\n \"max_pool2d_with_indices_out_cuda_frame\",\n [&] {\n using accscalar_t = acc_type;\n\n scalar_t *gradOutput_data = gradOutput.data_ptr();\n scalar_t *gradInput_data = gradInput.data_ptr();\n int64_t *indices_data = indices.data_ptr();\n\n switch (memory_format) {\n case MemoryFormat::ChannelsLast: {\n const int max_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);\n int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;\n int block_x = std::min(\n maxThreadsDim[0], std::min(lastPow2(nInputPlane), at::cuda::warp_size()));\n int block_y = std::min(\n maxThreadsDim[1], std::min(lastPow2(inputWidth), max_threads \/ block_x));\n int block_z = std::min(\n maxThreadsDim[2], std::min(lastPow2(inputHeight), max_threads \/ block_x \/ block_y));\n block_x = std::min(\n maxThreadsDim[0], std::min(lastPow2(nInputPlane), max_threads \/ block_y \/ block_z));\n const dim3 block(block_x, block_y, block_z);\n\n int kernel_stride_C = cuda::ATenCeilDiv(\n safe_downcast(nInputPlane), block_x * 4);\n int kernel_size_C = cuda::ATenCeilDiv(\n safe_downcast(nInputPlane), block_x * kernel_stride_C);\n\n int grid_x = nbatch*kernel_stride_C;\n int grid_y = std::min(\n at::cuda::getCurrentDeviceProperties()->maxGridSize[1],\n cuda::ATenCeilDiv(safe_downcast(inputWidth), block_y*BLOCK_STRIDE));\n int grid_z = std::min(\n at::cuda::getCurrentDeviceProperties()->maxGridSize[2],\n cuda::ATenCeilDiv(safe_downcast(inputHeight), block_z*BLOCK_STRIDE));\n const dim3 grid(grid_x, grid_y, grid_z);\n\n size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * sizeof(accscalar_t);\n AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock);\n\n \/\/ The backward kernel is launched on input instead output.\n \/\/ If it is launched on output layer, atomic_add would not provide much benefit on FP16.\n \/\/ Please check comments at https:\/\/github.com\/pytorch\/pytorch\/pull\/34519.\n max_pool_backward_nhwc\n <<>>(\n count,\n gradOutput_data,\n indices_data,\n nbatch,\n nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,\n kH, kW, dH, dW, padH, padW, dilationH, dilationW,\n out_stride_c, out_stride_h, out_stride_w,\n in_stride_n, in_stride_c,\n in_stride_h, in_stride_w,\n kernel_stride_C, kernel_size_C,\n gradInput_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n break;\n }\n case MemoryFormat::Contiguous: {\n int imgcount = inputWidth * inputHeight;\n dim3 grid;\n const int blocks = (imgcount + BLOCK_THREADS - 1) \/ BLOCK_THREADS;\n grid.x = blocks;\n grid.y = nbatch;\n uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];\n if (maxGridY < grid.y) grid.y = maxGridY;\n grid.z = nInputPlane;\n uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2];\n if (maxGridZ < grid.z) grid.z = maxGridZ;\n\n max_pool_backward_nchw\n <<>>(\n count,\n gradOutput_data,\n indices_data,\n nbatch,\n nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,\n kH, kW, dH, dW, padH, padW, dilationH, dilationW,\n gradInput_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n break;\n }\n default: TORCH_CHECK(false, \"Unsupported memory format. Supports only ChannelsLast, Contiguous\");\n }\n }\n );\n}\n\n} \/\/ at::native\n} \/\/ at\n","avg_line_length":45.0411449016,"max_line_length":126,"alphanum_fraction":0.642545079}